]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge tag 'gfs2-4.11.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 137
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 11, 2014"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU ETH_ZLEN
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3 "tigon/tg3.bin"
218 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
221
222 static char version[] =
223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 { "rx_octets" },
365 { "rx_fragments" },
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
369 { "rx_fcs_errors" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
376 { "rx_jabbers" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
390
391 { "tx_octets" },
392 { "tx_collisions" },
393
394 { "tx_xon_sent" },
395 { "tx_xoff_sent" },
396 { "tx_flow_control" },
397 { "tx_mac_errors" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
400 { "tx_deferred" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
421 { "tx_discards" },
422 { "tx_errors" },
423
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
426 { "rxbds_empty" },
427 { "rx_discards" },
428 { "rx_errors" },
429 { "rx_threshold_hit" },
430
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
434
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
437 { "nic_irqs" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
440
441 { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
453
454
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
466 };
467
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
469
470
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 writel(val, tp->regs + off);
474 }
475
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 return readl(tp->regs + off);
479 }
480
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 writel(val, tp->aperegs + off);
484 }
485
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 return readl(tp->aperegs + off);
489 }
490
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 unsigned long flags;
494
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
505 }
506
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 unsigned long flags;
510 u32 val;
511
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 return val;
517 }
518
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 unsigned long flags;
522
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
526 return;
527 }
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
531 return;
532 }
533
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
541 */
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 (val == 0x1)) {
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 }
547 }
548
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 unsigned long flags;
552 u32 val;
553
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
571 else {
572 /* Posted method */
573 tg3_write32(tp, off, val);
574 if (usec_wait)
575 udelay(usec_wait);
576 tp->read32(tp, off);
577 }
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
580 */
581 if (usec_wait)
582 udelay(usec_wait);
583 }
584
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
592 }
593
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 void __iomem *mbox = tp->regs + off;
597 writel(val, mbox);
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 writel(val, mbox);
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 readl(mbox);
603 }
604
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
625
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 unsigned long flags;
629
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 return;
633
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 } else {
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 }
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 unsigned long flags;
654
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 *val = 0;
658 return;
659 }
660
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 } else {
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 }
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 int i;
681 u32 regbase, bit;
682
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
685 else
686 regbase = TG3_APE_PER_LOCK_GRANT;
687
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 switch (i) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
696 break;
697 default:
698 if (!tp->pci_fn)
699 bit = APE_LOCK_GRANT_DRIVER;
700 else
701 bit = 1 << tp->pci_fn;
702 }
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
704 }
705
706 }
707
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 int i, off;
711 int ret = 0;
712 u32 status, req, gnt, bit;
713
714 if (!tg3_flag(tp, ENABLE_APE))
715 return 0;
716
717 switch (locknum) {
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 return 0;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 case TG3_APE_LOCK_GRC:
782 case TG3_APE_LOCK_MEM:
783 if (!tp->pci_fn)
784 bit = APE_LOCK_GRANT_DRIVER;
785 else
786 bit = 1 << tp->pci_fn;
787 break;
788 case TG3_APE_LOCK_PHY0:
789 case TG3_APE_LOCK_PHY1:
790 case TG3_APE_LOCK_PHY2:
791 case TG3_APE_LOCK_PHY3:
792 bit = APE_LOCK_GRANT_DRIVER;
793 break;
794 default:
795 return;
796 }
797
798 if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 gnt = TG3_APE_LOCK_GRANT;
800 else
801 gnt = TG3_APE_PER_LOCK_GRANT;
802
803 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808 u32 apedata;
809
810 while (timeout_us) {
811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812 return -EBUSY;
813
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816 break;
817
818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820 udelay(10);
821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822 }
823
824 return timeout_us ? 0 : -EBUSY;
825 }
826
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 u32 i, apedata;
830
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 break;
836
837 udelay(10);
838 }
839
840 return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 u32 len)
845 {
846 int err;
847 u32 i, bufoff, msgoff, maxlen, apedata;
848
849 if (!tg3_flag(tp, APE_HAS_NCSI))
850 return 0;
851
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
854 return -ENODEV;
855
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
858 return -EAGAIN;
859
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 TG3_APE_SHMEM_BASE;
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865 while (len) {
866 u32 length;
867
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
870 len -= length;
871
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
874 return -EAGAIN;
875
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
878 if (err)
879 return err;
880
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892 base_off += length;
893
894 if (tg3_ape_wait_for_event(tp, 30000))
895 return -EAGAIN;
896
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
900 data++;
901 }
902 }
903
904 return 0;
905 }
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909 int err;
910 u32 apedata;
911
912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913 if (apedata != APE_SEG_SIG_MAGIC)
914 return -EAGAIN;
915
916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917 if (!(apedata & APE_FW_STATUS_READY))
918 return -EAGAIN;
919
920 /* Wait for up to 1 millisecond for APE to service previous event. */
921 err = tg3_ape_event_lock(tp, 1000);
922 if (err)
923 return err;
924
925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926 event | APE_EVENT_STATUS_EVENT_PENDING);
927
928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931 return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936 u32 event;
937 u32 apedata;
938
939 if (!tg3_flag(tp, ENABLE_APE))
940 return;
941
942 switch (kind) {
943 case RESET_KIND_INIT:
944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945 APE_HOST_SEG_SIG_MAGIC);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947 APE_HOST_SEG_LEN_MAGIC);
948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953 APE_HOST_BEHAV_NO_PHYLOCK);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955 TG3_APE_HOST_DRVR_STATE_START);
956
957 event = APE_EVENT_STATUS_STATE_START;
958 break;
959 case RESET_KIND_SHUTDOWN:
960 /* With the interface we are currently using,
961 * APE does not track driver state. Wiping
962 * out the HOST SEGMENT SIGNATURE forces
963 * the APE to assume OS absent status.
964 */
965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967 if (device_may_wakeup(&tp->pdev->dev) &&
968 tg3_flag(tp, WOL_ENABLE)) {
969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 TG3_APE_HOST_WOL_SPEED_AUTO);
971 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 } else
973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 break;
979 default:
980 return;
981 }
982
983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985 tg3_ape_send_event(tp, event);
986 }
987
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990 int i;
991
992 tw32(TG3PCI_MISC_HOST_CTRL,
993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994 for (i = 0; i < tp->irq_max; i++)
995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000 int i;
1001
1002 tp->irq_sync = 0;
1003 wmb();
1004
1005 tw32(TG3PCI_MISC_HOST_CTRL,
1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009 for (i = 0; i < tp->irq_cnt; i++) {
1010 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 if (tg3_flag(tp, 1SHOT_MSI))
1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016 tp->coal_now |= tnapi->coal_now;
1017 }
1018
1019 /* Force an initial interrupt */
1020 if (!tg3_flag(tp, TAGGED_STATUS) &&
1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023 else
1024 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031 struct tg3 *tp = tnapi->tp;
1032 struct tg3_hw_status *sblk = tnapi->hw_status;
1033 unsigned int work_exists = 0;
1034
1035 /* check for phy events */
1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037 if (sblk->status & SD_STATUS_LINK_CHG)
1038 work_exists = 1;
1039 }
1040
1041 /* check for TX work to do */
1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043 work_exists = 1;
1044
1045 /* check for RX work to do */
1046 if (tnapi->rx_rcb_prod_idx &&
1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048 work_exists = 1;
1049
1050 return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054 * similar to tg3_enable_ints, but it accurately determines whether there
1055 * is new work pending and can return without flushing the PIO write
1056 * which reenables interrupts
1057 */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060 struct tg3 *tp = tnapi->tp;
1061
1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063 mmiowb();
1064
1065 /* When doing tagged status, this work check is unnecessary.
1066 * The last_tag we write above tells the chip which piece of
1067 * work we've completed.
1068 */
1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076 u32 clock_ctrl;
1077 u32 orig_clock_ctrl;
1078
1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080 return;
1081
1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084 orig_clock_ctrl = clock_ctrl;
1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086 CLOCK_CTRL_CLKRUN_OENABLE |
1087 0x1f);
1088 tp->pci_clock_ctrl = clock_ctrl;
1089
1090 if (tg3_flag(tp, 5705_PLUS)) {
1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094 }
1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097 clock_ctrl |
1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099 40);
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102 40);
1103 }
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS 5000
1108
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 u32 *val)
1111 {
1112 u32 frame_val;
1113 unsigned int loops;
1114 int ret;
1115
1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117 tw32_f(MAC_MI_MODE,
1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119 udelay(80);
1120 }
1121
1122 tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124 *val = 0x0;
1125
1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132 tw32_f(MAC_MI_COM, frame_val);
1133
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1136 udelay(10);
1137 frame_val = tr32(MAC_MI_COM);
1138
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1140 udelay(5);
1141 frame_val = tr32(MAC_MI_COM);
1142 break;
1143 }
1144 loops -= 1;
1145 }
1146
1147 ret = -EBUSY;
1148 if (loops != 0) {
1149 *val = frame_val & MI_COM_DATA_MASK;
1150 ret = 0;
1151 }
1152
1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 udelay(80);
1156 }
1157
1158 tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160 return ret;
1161 }
1162
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 u32 val)
1170 {
1171 u32 frame_val;
1172 unsigned int loops;
1173 int ret;
1174
1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177 return 0;
1178
1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180 tw32_f(MAC_MI_MODE,
1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182 udelay(80);
1183 }
1184
1185 tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188 MI_COM_PHY_ADDR_MASK);
1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190 MI_COM_REG_ADDR_MASK);
1191 frame_val |= (val & MI_COM_DATA_MASK);
1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194 tw32_f(MAC_MI_COM, frame_val);
1195
1196 loops = PHY_BUSY_LOOPS;
1197 while (loops != 0) {
1198 udelay(10);
1199 frame_val = tr32(MAC_MI_COM);
1200 if ((frame_val & MI_COM_BUSY) == 0) {
1201 udelay(5);
1202 frame_val = tr32(MAC_MI_COM);
1203 break;
1204 }
1205 loops -= 1;
1206 }
1207
1208 ret = -EBUSY;
1209 if (loops != 0)
1210 ret = 0;
1211
1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214 udelay(80);
1215 }
1216
1217 tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219 return ret;
1220 }
1221
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229 int err;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241 if (err)
1242 goto done;
1243
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247 return err;
1248 }
1249
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252 int err;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264 if (err)
1265 goto done;
1266
1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270 return err;
1271 }
1272
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275 int err;
1276
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278 if (!err)
1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281 return err;
1282 }
1283
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286 int err;
1287
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289 if (!err)
1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292 return err;
1293 }
1294
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297 int err;
1298
1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301 MII_TG3_AUXCTL_SHDWSEL_MISC);
1302 if (!err)
1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305 return err;
1306 }
1307
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318 u32 val;
1319 int err;
1320
1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323 if (err)
1324 return err;
1325
1326 if (enable)
1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 else
1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334 return err;
1335 }
1336
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340 reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345 u32 phy_control;
1346 int limit, err;
1347
1348 /* OK, reset it, and poll the BMCR_RESET bit until it
1349 * clears or we time out.
1350 */
1351 phy_control = BMCR_RESET;
1352 err = tg3_writephy(tp, MII_BMCR, phy_control);
1353 if (err != 0)
1354 return -EBUSY;
1355
1356 limit = 5000;
1357 while (limit--) {
1358 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359 if (err != 0)
1360 return -EBUSY;
1361
1362 if ((phy_control & BMCR_RESET) == 0) {
1363 udelay(40);
1364 break;
1365 }
1366 udelay(10);
1367 }
1368 if (limit < 0)
1369 return -EBUSY;
1370
1371 return 0;
1372 }
1373
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 val;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (__tg3_readphy(tp, mii_id, reg, &val))
1382 val = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return val;
1387 }
1388
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391 struct tg3 *tp = bp->priv;
1392 u32 ret = 0;
1393
1394 spin_lock_bh(&tp->lock);
1395
1396 if (__tg3_writephy(tp, mii_id, reg, val))
1397 ret = -EIO;
1398
1399 spin_unlock_bh(&tp->lock);
1400
1401 return ret;
1402 }
1403
1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1405 {
1406 u32 val;
1407 struct phy_device *phydev;
1408
1409 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1410 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411 case PHY_ID_BCM50610:
1412 case PHY_ID_BCM50610M:
1413 val = MAC_PHYCFG2_50610_LED_MODES;
1414 break;
1415 case PHY_ID_BCMAC131:
1416 val = MAC_PHYCFG2_AC131_LED_MODES;
1417 break;
1418 case PHY_ID_RTL8211C:
1419 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420 break;
1421 case PHY_ID_RTL8201E:
1422 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423 break;
1424 default:
1425 return;
1426 }
1427
1428 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429 tw32(MAC_PHYCFG2, val);
1430
1431 val = tr32(MAC_PHYCFG1);
1432 val &= ~(MAC_PHYCFG1_RGMII_INT |
1433 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435 tw32(MAC_PHYCFG1, val);
1436
1437 return;
1438 }
1439
1440 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442 MAC_PHYCFG2_FMODE_MASK_MASK |
1443 MAC_PHYCFG2_GMODE_MASK_MASK |
1444 MAC_PHYCFG2_ACT_MASK_MASK |
1445 MAC_PHYCFG2_QUAL_MASK_MASK |
1446 MAC_PHYCFG2_INBAND_ENABLE;
1447
1448 tw32(MAC_PHYCFG2, val);
1449
1450 val = tr32(MAC_PHYCFG1);
1451 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458 }
1459 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461 tw32(MAC_PHYCFG1, val);
1462
1463 val = tr32(MAC_EXT_RGMII_MODE);
1464 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET |
1468 MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_RGMII_MODE_RX_INT_B |
1474 MAC_RGMII_MODE_RX_QUALITY |
1475 MAC_RGMII_MODE_RX_ACTIVITY |
1476 MAC_RGMII_MODE_RX_ENG_DET;
1477 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478 val |= MAC_RGMII_MODE_TX_ENABLE |
1479 MAC_RGMII_MODE_TX_LOWPWR |
1480 MAC_RGMII_MODE_TX_RESET;
1481 }
1482 tw32(MAC_EXT_RGMII_MODE, val);
1483 }
1484
1485 static void tg3_mdio_start(struct tg3 *tp)
1486 {
1487 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488 tw32_f(MAC_MI_MODE, tp->mi_mode);
1489 udelay(80);
1490
1491 if (tg3_flag(tp, MDIOBUS_INITED) &&
1492 tg3_asic_rev(tp) == ASIC_REV_5785)
1493 tg3_mdio_config_5785(tp);
1494 }
1495
1496 static int tg3_mdio_init(struct tg3 *tp)
1497 {
1498 int i;
1499 u32 reg;
1500 struct phy_device *phydev;
1501
1502 if (tg3_flag(tp, 5717_PLUS)) {
1503 u32 is_serdes;
1504
1505 tp->phy_addr = tp->pci_fn + 1;
1506
1507 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509 else
1510 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511 TG3_CPMU_PHY_STRAP_IS_SERDES;
1512 if (is_serdes)
1513 tp->phy_addr += 7;
1514 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515 int addr;
1516
1517 addr = ssb_gige_get_phyaddr(tp->pdev);
1518 if (addr < 0)
1519 return addr;
1520 tp->phy_addr = addr;
1521 } else
1522 tp->phy_addr = TG3_PHY_MII_ADDR;
1523
1524 tg3_mdio_start(tp);
1525
1526 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527 return 0;
1528
1529 tp->mdio_bus = mdiobus_alloc();
1530 if (tp->mdio_bus == NULL)
1531 return -ENOMEM;
1532
1533 tp->mdio_bus->name = "tg3 mdio bus";
1534 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536 tp->mdio_bus->priv = tp;
1537 tp->mdio_bus->parent = &tp->pdev->dev;
1538 tp->mdio_bus->read = &tg3_mdio_read;
1539 tp->mdio_bus->write = &tg3_mdio_write;
1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541
1542 /* The bus registration will look for all the PHYs on the mdio bus.
1543 * Unfortunately, it does not ensure the PHY is powered up before
1544 * accessing the PHY ID registers. A chip reset is the
1545 * quickest way to bring the device back to an operational state..
1546 */
1547 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1548 tg3_bmcr_reset(tp);
1549
1550 i = mdiobus_register(tp->mdio_bus);
1551 if (i) {
1552 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1553 mdiobus_free(tp->mdio_bus);
1554 return i;
1555 }
1556
1557 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1558
1559 if (!phydev || !phydev->drv) {
1560 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1561 mdiobus_unregister(tp->mdio_bus);
1562 mdiobus_free(tp->mdio_bus);
1563 return -ENODEV;
1564 }
1565
1566 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1567 case PHY_ID_BCM57780:
1568 phydev->interface = PHY_INTERFACE_MODE_GMII;
1569 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1570 break;
1571 case PHY_ID_BCM50610:
1572 case PHY_ID_BCM50610M:
1573 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1574 PHY_BRCM_RX_REFCLK_UNUSED |
1575 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1576 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1578 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1579 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1580 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1581 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1582 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1583 /* fallthru */
1584 case PHY_ID_RTL8211C:
1585 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586 break;
1587 case PHY_ID_RTL8201E:
1588 case PHY_ID_BCMAC131:
1589 phydev->interface = PHY_INTERFACE_MODE_MII;
1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592 break;
1593 }
1594
1595 tg3_flag_set(tp, MDIOBUS_INITED);
1596
1597 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 tg3_mdio_config_5785(tp);
1599
1600 return 0;
1601 }
1602
1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605 if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 tg3_flag_clear(tp, MDIOBUS_INITED);
1607 mdiobus_unregister(tp->mdio_bus);
1608 mdiobus_free(tp->mdio_bus);
1609 }
1610 }
1611
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615 u32 val;
1616
1617 val = tr32(GRC_RX_CPU_EVENT);
1618 val |= GRC_RX_CPU_DRIVER_EVENT;
1619 tw32_f(GRC_RX_CPU_EVENT, val);
1620
1621 tp->last_event_jiffies = jiffies;
1622 }
1623
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629 int i;
1630 unsigned int delay_cnt;
1631 long time_remain;
1632
1633 /* If enough time has passed, no wait is necessary. */
1634 time_remain = (long)(tp->last_event_jiffies + 1 +
1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636 (long)jiffies;
1637 if (time_remain < 0)
1638 return;
1639
1640 /* Check if we can shorten the wait time. */
1641 delay_cnt = jiffies_to_usecs(time_remain);
1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 delay_cnt = (delay_cnt >> 3) + 1;
1645
1646 for (i = 0; i < delay_cnt; i++) {
1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648 break;
1649 if (pci_channel_offline(tp->pdev))
1650 break;
1651
1652 udelay(8);
1653 }
1654 }
1655
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659 u32 reg, val;
1660
1661 val = 0;
1662 if (!tg3_readphy(tp, MII_BMCR, &reg))
1663 val = reg << 16;
1664 if (!tg3_readphy(tp, MII_BMSR, &reg))
1665 val |= (reg & 0xffff);
1666 *data++ = val;
1667
1668 val = 0;
1669 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670 val = reg << 16;
1671 if (!tg3_readphy(tp, MII_LPA, &reg))
1672 val |= (reg & 0xffff);
1673 *data++ = val;
1674
1675 val = 0;
1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678 val = reg << 16;
1679 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680 val |= (reg & 0xffff);
1681 }
1682 *data++ = val;
1683
1684 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685 val = reg << 16;
1686 else
1687 val = 0;
1688 *data++ = val;
1689 }
1690
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694 u32 data[4];
1695
1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697 return;
1698
1699 tg3_phy_gather_ump_data(tp, data);
1700
1701 tg3_wait_for_event_ack(tp);
1702
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709
1710 tg3_generate_fw_event(tp);
1711 }
1712
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 /* Wait for RX cpu to ACK the previous event. */
1718 tg3_wait_for_event_ack(tp);
1719
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721
1722 tg3_generate_fw_event(tp);
1723
1724 /* Wait for RX cpu to ACK this event. */
1725 tg3_wait_for_event_ack(tp);
1726 }
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734
1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736 switch (kind) {
1737 case RESET_KIND_INIT:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739 DRV_STATE_START);
1740 break;
1741
1742 case RESET_KIND_SHUTDOWN:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 DRV_STATE_UNLOAD);
1745 break;
1746
1747 case RESET_KIND_SUSPEND:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 DRV_STATE_SUSPEND);
1750 break;
1751
1752 default:
1753 break;
1754 }
1755 }
1756 }
1757
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762 switch (kind) {
1763 case RESET_KIND_INIT:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 DRV_STATE_START_DONE);
1766 break;
1767
1768 case RESET_KIND_SHUTDOWN:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_UNLOAD_DONE);
1771 break;
1772
1773 default:
1774 break;
1775 }
1776 }
1777 }
1778
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782 if (tg3_flag(tp, ENABLE_ASF)) {
1783 switch (kind) {
1784 case RESET_KIND_INIT:
1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786 DRV_STATE_START);
1787 break;
1788
1789 case RESET_KIND_SHUTDOWN:
1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791 DRV_STATE_UNLOAD);
1792 break;
1793
1794 case RESET_KIND_SUSPEND:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 DRV_STATE_SUSPEND);
1797 break;
1798
1799 default:
1800 break;
1801 }
1802 }
1803 }
1804
1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807 int i;
1808 u32 val;
1809
1810 if (tg3_flag(tp, NO_FWARE_REPORTED))
1811 return 0;
1812
1813 if (tg3_flag(tp, IS_SSB_CORE)) {
1814 /* We don't use firmware. */
1815 return 0;
1816 }
1817
1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 /* Wait up to 20ms for init done. */
1820 for (i = 0; i < 200; i++) {
1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822 return 0;
1823 if (pci_channel_offline(tp->pdev))
1824 return -ENODEV;
1825
1826 udelay(100);
1827 }
1828 return -ENODEV;
1829 }
1830
1831 /* Wait for firmware initialization to complete. */
1832 for (i = 0; i < 100000; i++) {
1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835 break;
1836 if (pci_channel_offline(tp->pdev)) {
1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 netdev_info(tp->dev, "No firmware running\n");
1840 }
1841
1842 break;
1843 }
1844
1845 udelay(10);
1846 }
1847
1848 /* Chip might not be fitted with firmware. Some Sun onboard
1849 * parts are configured like that. So don't signal the timeout
1850 * of the above loop as an error, but do report the lack of
1851 * running firmware once.
1852 */
1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 tg3_flag_set(tp, NO_FWARE_REPORTED);
1855
1856 netdev_info(tp->dev, "No firmware running\n");
1857 }
1858
1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 /* The 57765 A0 needs a little more
1861 * time to do some important work.
1862 */
1863 mdelay(10);
1864 }
1865
1866 return 0;
1867 }
1868
1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871 if (!netif_carrier_ok(tp->dev)) {
1872 netif_info(tp, link, tp->dev, "Link is down\n");
1873 tg3_ump_link_report(tp);
1874 } else if (netif_msg_link(tp)) {
1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 (tp->link_config.active_speed == SPEED_1000 ?
1877 1000 :
1878 (tp->link_config.active_speed == SPEED_100 ?
1879 100 : 10)),
1880 (tp->link_config.active_duplex == DUPLEX_FULL ?
1881 "full" : "half"));
1882
1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885 "on" : "off",
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887 "on" : "off");
1888
1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 netdev_info(tp->dev, "EEE is %s\n",
1891 tp->setlpicnt ? "enabled" : "disabled");
1892
1893 tg3_ump_link_report(tp);
1894 }
1895
1896 tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901 u32 flowctrl = 0;
1902
1903 if (adv & ADVERTISE_PAUSE_CAP) {
1904 flowctrl |= FLOW_CTRL_RX;
1905 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 flowctrl |= FLOW_CTRL_TX;
1907 } else if (adv & ADVERTISE_PAUSE_ASYM)
1908 flowctrl |= FLOW_CTRL_TX;
1909
1910 return flowctrl;
1911 }
1912
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915 u16 miireg;
1916
1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 miireg = ADVERTISE_1000XPAUSE;
1919 else if (flow_ctrl & FLOW_CTRL_TX)
1920 miireg = ADVERTISE_1000XPSE_ASYM;
1921 else if (flow_ctrl & FLOW_CTRL_RX)
1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923 else
1924 miireg = 0;
1925
1926 return miireg;
1927 }
1928
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931 u32 flowctrl = 0;
1932
1933 if (adv & ADVERTISE_1000XPAUSE) {
1934 flowctrl |= FLOW_CTRL_RX;
1935 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 flowctrl |= FLOW_CTRL_TX;
1937 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 flowctrl |= FLOW_CTRL_TX;
1939
1940 return flowctrl;
1941 }
1942
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945 u8 cap = 0;
1946
1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 if (lcladv & ADVERTISE_1000XPAUSE)
1951 cap = FLOW_CTRL_RX;
1952 if (rmtadv & ADVERTISE_1000XPAUSE)
1953 cap = FLOW_CTRL_TX;
1954 }
1955
1956 return cap;
1957 }
1958
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961 u8 autoneg;
1962 u8 flowctrl = 0;
1963 u32 old_rx_mode = tp->rx_mode;
1964 u32 old_tx_mode = tp->tx_mode;
1965
1966 if (tg3_flag(tp, USE_PHYLIB))
1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968 else
1969 autoneg = tp->link_config.autoneg;
1970
1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974 else
1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976 } else
1977 flowctrl = tp->link_config.flowctrl;
1978
1979 tp->link_config.active_flowctrl = flowctrl;
1980
1981 if (flowctrl & FLOW_CTRL_RX)
1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983 else
1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985
1986 if (old_rx_mode != tp->rx_mode)
1987 tw32_f(MAC_RX_MODE, tp->rx_mode);
1988
1989 if (flowctrl & FLOW_CTRL_TX)
1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991 else
1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993
1994 if (old_tx_mode != tp->tx_mode)
1995 tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997
1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000 u8 oldflowctrl, linkmesg = 0;
2001 u32 mac_mode, lcl_adv, rmt_adv;
2002 struct tg3 *tp = netdev_priv(dev);
2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004
2005 spin_lock_bh(&tp->lock);
2006
2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 MAC_MODE_HALF_DUPLEX);
2009
2010 oldflowctrl = tp->link_config.active_flowctrl;
2011
2012 if (phydev->link) {
2013 lcl_adv = 0;
2014 rmt_adv = 0;
2015
2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 else if (phydev->speed == SPEED_1000 ||
2019 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021 else
2022 mac_mode |= MAC_MODE_PORT_MODE_MII;
2023
2024 if (phydev->duplex == DUPLEX_HALF)
2025 mac_mode |= MAC_MODE_HALF_DUPLEX;
2026 else {
2027 lcl_adv = mii_advertise_flowctrl(
2028 tp->link_config.flowctrl);
2029
2030 if (phydev->pause)
2031 rmt_adv = LPA_PAUSE_CAP;
2032 if (phydev->asym_pause)
2033 rmt_adv |= LPA_PAUSE_ASYM;
2034 }
2035
2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037 } else
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040 if (mac_mode != tp->mac_mode) {
2041 tp->mac_mode = mac_mode;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2043 udelay(40);
2044 }
2045
2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 if (phydev->speed == SPEED_10)
2048 tw32(MAC_MI_STAT,
2049 MAC_MI_STAT_10MBPS_MODE |
2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051 else
2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 }
2054
2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 tw32(MAC_TX_LENGTHS,
2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 (6 << TX_LENGTHS_IPG_SHIFT) |
2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060 else
2061 tw32(MAC_TX_LENGTHS,
2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 (6 << TX_LENGTHS_IPG_SHIFT) |
2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065
2066 if (phydev->link != tp->old_link ||
2067 phydev->speed != tp->link_config.active_speed ||
2068 phydev->duplex != tp->link_config.active_duplex ||
2069 oldflowctrl != tp->link_config.active_flowctrl)
2070 linkmesg = 1;
2071
2072 tp->old_link = phydev->link;
2073 tp->link_config.active_speed = phydev->speed;
2074 tp->link_config.active_duplex = phydev->duplex;
2075
2076 spin_unlock_bh(&tp->lock);
2077
2078 if (linkmesg)
2079 tg3_link_report(tp);
2080 }
2081
2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084 struct phy_device *phydev;
2085
2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087 return 0;
2088
2089 /* Bring the PHY back to a known state. */
2090 tg3_bmcr_reset(tp);
2091
2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093
2094 /* Attach the MAC to the PHY. */
2095 phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 tg3_adjust_link, phydev->interface);
2097 if (IS_ERR(phydev)) {
2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 return PTR_ERR(phydev);
2100 }
2101
2102 /* Mask with MAC supported features. */
2103 switch (phydev->interface) {
2104 case PHY_INTERFACE_MODE_GMII:
2105 case PHY_INTERFACE_MODE_RGMII:
2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 phydev->supported &= (PHY_GBIT_FEATURES |
2108 SUPPORTED_Pause |
2109 SUPPORTED_Asym_Pause);
2110 break;
2111 }
2112 /* fallthru */
2113 case PHY_INTERFACE_MODE_MII:
2114 phydev->supported &= (PHY_BASIC_FEATURES |
2115 SUPPORTED_Pause |
2116 SUPPORTED_Asym_Pause);
2117 break;
2118 default:
2119 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 return -EINVAL;
2121 }
2122
2123 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124
2125 phydev->advertising = phydev->supported;
2126
2127 phy_attached_info(phydev);
2128
2129 return 0;
2130 }
2131
2132 static void tg3_phy_start(struct tg3 *tp)
2133 {
2134 struct phy_device *phydev;
2135
2136 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2137 return;
2138
2139 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2140
2141 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2143 phydev->speed = tp->link_config.speed;
2144 phydev->duplex = tp->link_config.duplex;
2145 phydev->autoneg = tp->link_config.autoneg;
2146 phydev->advertising = tp->link_config.advertising;
2147 }
2148
2149 phy_start(phydev);
2150
2151 phy_start_aneg(phydev);
2152 }
2153
2154 static void tg3_phy_stop(struct tg3 *tp)
2155 {
2156 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2157 return;
2158
2159 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160 }
2161
2162 static void tg3_phy_fini(struct tg3 *tp)
2163 {
2164 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2165 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2166 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167 }
2168 }
2169
2170 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2171 {
2172 int err;
2173 u32 val;
2174
2175 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2176 return 0;
2177
2178 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2179 /* Cannot do read-modify-write on 5401 */
2180 err = tg3_phy_auxctl_write(tp,
2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2182 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2183 0x4c20);
2184 goto done;
2185 }
2186
2187 err = tg3_phy_auxctl_read(tp,
2188 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189 if (err)
2190 return err;
2191
2192 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2193 err = tg3_phy_auxctl_write(tp,
2194 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2195
2196 done:
2197 return err;
2198 }
2199
2200 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 {
2202 u32 phytest;
2203
2204 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2205 u32 phy;
2206
2207 tg3_writephy(tp, MII_TG3_FET_TEST,
2208 phytest | MII_TG3_FET_SHADOW_EN);
2209 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2210 if (enable)
2211 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 else
2213 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2215 }
2216 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217 }
2218 }
2219
2220 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 {
2222 u32 reg;
2223
2224 if (!tg3_flag(tp, 5705_PLUS) ||
2225 (tg3_flag(tp, 5717_PLUS) &&
2226 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2227 return;
2228
2229 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2230 tg3_phy_fet_toggle_apd(tp, enable);
2231 return;
2232 }
2233
2234 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2235 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2236 MII_TG3_MISC_SHDW_SCR5_SDTL |
2237 MII_TG3_MISC_SHDW_SCR5_C125OE;
2238 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2239 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2240
2241 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2242
2243
2244 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2245 if (enable)
2246 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2247
2248 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2249 }
2250
2251 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2252 {
2253 u32 phy;
2254
2255 if (!tg3_flag(tp, 5705_PLUS) ||
2256 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2257 return;
2258
2259 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2260 u32 ephy;
2261
2262 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2263 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2264
2265 tg3_writephy(tp, MII_TG3_FET_TEST,
2266 ephy | MII_TG3_FET_SHADOW_EN);
2267 if (!tg3_readphy(tp, reg, &phy)) {
2268 if (enable)
2269 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 else
2271 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272 tg3_writephy(tp, reg, phy);
2273 }
2274 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2275 }
2276 } else {
2277 int ret;
2278
2279 ret = tg3_phy_auxctl_read(tp,
2280 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2281 if (!ret) {
2282 if (enable)
2283 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 else
2285 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286 tg3_phy_auxctl_write(tp,
2287 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2288 }
2289 }
2290 }
2291
2292 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2293 {
2294 int ret;
2295 u32 val;
2296
2297 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2298 return;
2299
2300 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2301 if (!ret)
2302 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2303 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2304 }
2305
2306 static void tg3_phy_apply_otp(struct tg3 *tp)
2307 {
2308 u32 otp, phy;
2309
2310 if (!tp->phy_otp)
2311 return;
2312
2313 otp = tp->phy_otp;
2314
2315 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2316 return;
2317
2318 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2319 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2320 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2321
2322 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2323 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2324 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2325
2326 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2327 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2328 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2329
2330 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2332
2333 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2334 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2335
2336 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2337 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2338 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2339
2340 tg3_phy_toggle_auxctl_smdsp(tp, false);
2341 }
2342
2343 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2344 {
2345 u32 val;
2346 struct ethtool_eee *dest = &tp->eee;
2347
2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349 return;
2350
2351 if (eee)
2352 dest = eee;
2353
2354 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2355 return;
2356
2357 /* Pull eee_active */
2358 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2359 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2360 dest->eee_active = 1;
2361 } else
2362 dest->eee_active = 0;
2363
2364 /* Pull lp advertised settings */
2365 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2366 return;
2367 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2368
2369 /* Pull advertised and eee_enabled settings */
2370 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2371 return;
2372 dest->eee_enabled = !!val;
2373 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374
2375 /* Pull tx_lpi_enabled */
2376 val = tr32(TG3_CPMU_EEE_MODE);
2377 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2378
2379 /* Pull lpi timer value */
2380 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2381 }
2382
2383 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2384 {
2385 u32 val;
2386
2387 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2388 return;
2389
2390 tp->setlpicnt = 0;
2391
2392 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2393 current_link_up &&
2394 tp->link_config.active_duplex == DUPLEX_FULL &&
2395 (tp->link_config.active_speed == SPEED_100 ||
2396 tp->link_config.active_speed == SPEED_1000)) {
2397 u32 eeectl;
2398
2399 if (tp->link_config.active_speed == SPEED_1000)
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2401 else
2402 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2403
2404 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2405
2406 tg3_eee_pull_config(tp, NULL);
2407 if (tp->eee.eee_active)
2408 tp->setlpicnt = 2;
2409 }
2410
2411 if (!tp->setlpicnt) {
2412 if (current_link_up &&
2413 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2414 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2415 tg3_phy_toggle_auxctl_smdsp(tp, false);
2416 }
2417
2418 val = tr32(TG3_CPMU_EEE_MODE);
2419 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2420 }
2421 }
2422
2423 static void tg3_phy_eee_enable(struct tg3 *tp)
2424 {
2425 u32 val;
2426
2427 if (tp->link_config.active_speed == SPEED_1000 &&
2428 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2429 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2430 tg3_flag(tp, 57765_CLASS)) &&
2431 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432 val = MII_TG3_DSP_TAP26_ALNOKO |
2433 MII_TG3_DSP_TAP26_RMRXSTO;
2434 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2435 tg3_phy_toggle_auxctl_smdsp(tp, false);
2436 }
2437
2438 val = tr32(TG3_CPMU_EEE_MODE);
2439 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2440 }
2441
2442 static int tg3_wait_macro_done(struct tg3 *tp)
2443 {
2444 int limit = 100;
2445
2446 while (limit--) {
2447 u32 tmp32;
2448
2449 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2450 if ((tmp32 & 0x1000) == 0)
2451 break;
2452 }
2453 }
2454 if (limit < 0)
2455 return -EBUSY;
2456
2457 return 0;
2458 }
2459
2460 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2461 {
2462 static const u32 test_pat[4][6] = {
2463 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2464 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2465 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2466 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2467 };
2468 int chan;
2469
2470 for (chan = 0; chan < 4; chan++) {
2471 int i;
2472
2473 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2474 (chan * 0x2000) | 0x0200);
2475 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2476
2477 for (i = 0; i < 6; i++)
2478 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2479 test_pat[chan][i]);
2480
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp)) {
2483 *resetp = 1;
2484 return -EBUSY;
2485 }
2486
2487 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488 (chan * 0x2000) | 0x0200);
2489 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2490 if (tg3_wait_macro_done(tp)) {
2491 *resetp = 1;
2492 return -EBUSY;
2493 }
2494
2495 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2496 if (tg3_wait_macro_done(tp)) {
2497 *resetp = 1;
2498 return -EBUSY;
2499 }
2500
2501 for (i = 0; i < 6; i += 2) {
2502 u32 low, high;
2503
2504 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2505 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2506 tg3_wait_macro_done(tp)) {
2507 *resetp = 1;
2508 return -EBUSY;
2509 }
2510 low &= 0x7fff;
2511 high &= 0x000f;
2512 if (low != test_pat[chan][i] ||
2513 high != test_pat[chan][i+1]) {
2514 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2515 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2516 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2517
2518 return -EBUSY;
2519 }
2520 }
2521 }
2522
2523 return 0;
2524 }
2525
2526 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2527 {
2528 int chan;
2529
2530 for (chan = 0; chan < 4; chan++) {
2531 int i;
2532
2533 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2534 (chan * 0x2000) | 0x0200);
2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2536 for (i = 0; i < 6; i++)
2537 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2538 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2539 if (tg3_wait_macro_done(tp))
2540 return -EBUSY;
2541 }
2542
2543 return 0;
2544 }
2545
2546 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2547 {
2548 u32 reg32, phy9_orig;
2549 int retries, do_phy_reset, err;
2550
2551 retries = 10;
2552 do_phy_reset = 1;
2553 do {
2554 if (do_phy_reset) {
2555 err = tg3_bmcr_reset(tp);
2556 if (err)
2557 return err;
2558 do_phy_reset = 0;
2559 }
2560
2561 /* Disable transmitter and interrupt. */
2562 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2563 continue;
2564
2565 reg32 |= 0x3000;
2566 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2567
2568 /* Set full-duplex, 1000 mbps. */
2569 tg3_writephy(tp, MII_BMCR,
2570 BMCR_FULLDPLX | BMCR_SPEED1000);
2571
2572 /* Set to master mode. */
2573 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2574 continue;
2575
2576 tg3_writephy(tp, MII_CTRL1000,
2577 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2578
2579 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2580 if (err)
2581 return err;
2582
2583 /* Block the PHY control access. */
2584 tg3_phydsp_write(tp, 0x8005, 0x0800);
2585
2586 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2587 if (!err)
2588 break;
2589 } while (--retries);
2590
2591 err = tg3_phy_reset_chanpat(tp);
2592 if (err)
2593 return err;
2594
2595 tg3_phydsp_write(tp, 0x8005, 0x0000);
2596
2597 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2598 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2599
2600 tg3_phy_toggle_auxctl_smdsp(tp, false);
2601
2602 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2603
2604 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2605 if (err)
2606 return err;
2607
2608 reg32 &= ~0x3000;
2609 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2610
2611 return 0;
2612 }
2613
2614 static void tg3_carrier_off(struct tg3 *tp)
2615 {
2616 netif_carrier_off(tp->dev);
2617 tp->link_up = false;
2618 }
2619
2620 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2621 {
2622 if (tg3_flag(tp, ENABLE_ASF))
2623 netdev_warn(tp->dev,
2624 "Management side-band traffic will be interrupted during phy settings change\n");
2625 }
2626
2627 /* This will reset the tigon3 PHY if there is no valid
2628 * link unless the FORCE argument is non-zero.
2629 */
2630 static int tg3_phy_reset(struct tg3 *tp)
2631 {
2632 u32 val, cpmuctrl;
2633 int err;
2634
2635 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2636 val = tr32(GRC_MISC_CFG);
2637 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2638 udelay(40);
2639 }
2640 err = tg3_readphy(tp, MII_BMSR, &val);
2641 err |= tg3_readphy(tp, MII_BMSR, &val);
2642 if (err != 0)
2643 return -EBUSY;
2644
2645 if (netif_running(tp->dev) && tp->link_up) {
2646 netif_carrier_off(tp->dev);
2647 tg3_link_report(tp);
2648 }
2649
2650 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2651 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2652 tg3_asic_rev(tp) == ASIC_REV_5705) {
2653 err = tg3_phy_reset_5703_4_5(tp);
2654 if (err)
2655 return err;
2656 goto out;
2657 }
2658
2659 cpmuctrl = 0;
2660 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2661 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2662 cpmuctrl = tr32(TG3_CPMU_CTRL);
2663 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2664 tw32(TG3_CPMU_CTRL,
2665 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2666 }
2667
2668 err = tg3_bmcr_reset(tp);
2669 if (err)
2670 return err;
2671
2672 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2673 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2674 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2675
2676 tw32(TG3_CPMU_CTRL, cpmuctrl);
2677 }
2678
2679 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2680 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2681 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2682 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2683 CPMU_LSPD_1000MB_MACCLK_12_5) {
2684 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2685 udelay(40);
2686 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2687 }
2688 }
2689
2690 if (tg3_flag(tp, 5717_PLUS) &&
2691 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2692 return 0;
2693
2694 tg3_phy_apply_otp(tp);
2695
2696 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2697 tg3_phy_toggle_apd(tp, true);
2698 else
2699 tg3_phy_toggle_apd(tp, false);
2700
2701 out:
2702 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2703 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2704 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2705 tg3_phydsp_write(tp, 0x000a, 0x0323);
2706 tg3_phy_toggle_auxctl_smdsp(tp, false);
2707 }
2708
2709 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2710 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2712 }
2713
2714 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2715 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2716 tg3_phydsp_write(tp, 0x000a, 0x310b);
2717 tg3_phydsp_write(tp, 0x201f, 0x9506);
2718 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2719 tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 }
2721 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2722 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2723 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2724 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2725 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2726 tg3_writephy(tp, MII_TG3_TEST1,
2727 MII_TG3_TEST1_TRIM_EN | 0x4);
2728 } else
2729 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2730
2731 tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 }
2733 }
2734
2735 /* Set Extended packet length bit (bit 14) on all chips that */
2736 /* support jumbo frames */
2737 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2738 /* Cannot do read-modify-write on 5401 */
2739 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2740 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2741 /* Set bit 14 with read-modify-write to preserve other bits */
2742 err = tg3_phy_auxctl_read(tp,
2743 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2744 if (!err)
2745 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2746 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2747 }
2748
2749 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2750 * jumbo frames transmission.
2751 */
2752 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2753 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2754 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2755 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2756 }
2757
2758 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2759 /* adjust output voltage */
2760 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2761 }
2762
2763 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2764 tg3_phydsp_write(tp, 0xffb, 0x4000);
2765
2766 tg3_phy_toggle_automdix(tp, true);
2767 tg3_phy_set_wirespeed(tp);
2768 return 0;
2769 }
2770
2771 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2772 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2773 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2774 TG3_GPIO_MSG_NEED_VAUX)
2775 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2776 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2778 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2779 (TG3_GPIO_MSG_DRVR_PRES << 12))
2780
2781 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2782 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2784 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2785 (TG3_GPIO_MSG_NEED_VAUX << 12))
2786
2787 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2788 {
2789 u32 status, shift;
2790
2791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 tg3_asic_rev(tp) == ASIC_REV_5719)
2793 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2794 else
2795 status = tr32(TG3_CPMU_DRV_STATUS);
2796
2797 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2798 status &= ~(TG3_GPIO_MSG_MASK << shift);
2799 status |= (newstat << shift);
2800
2801 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2802 tg3_asic_rev(tp) == ASIC_REV_5719)
2803 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2804 else
2805 tw32(TG3_CPMU_DRV_STATUS, status);
2806
2807 return status >> TG3_APE_GPIO_MSG_SHIFT;
2808 }
2809
2810 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2811 {
2812 if (!tg3_flag(tp, IS_NIC))
2813 return 0;
2814
2815 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2817 tg3_asic_rev(tp) == ASIC_REV_5720) {
2818 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2819 return -EIO;
2820
2821 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2822
2823 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2824 TG3_GRC_LCLCTL_PWRSW_DELAY);
2825
2826 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2827 } else {
2828 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830 }
2831
2832 return 0;
2833 }
2834
2835 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2836 {
2837 u32 grc_local_ctrl;
2838
2839 if (!tg3_flag(tp, IS_NIC) ||
2840 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2841 tg3_asic_rev(tp) == ASIC_REV_5701)
2842 return;
2843
2844 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2845
2846 tw32_wait_f(GRC_LOCAL_CTRL,
2847 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2848 TG3_GRC_LCLCTL_PWRSW_DELAY);
2849
2850 tw32_wait_f(GRC_LOCAL_CTRL,
2851 grc_local_ctrl,
2852 TG3_GRC_LCLCTL_PWRSW_DELAY);
2853
2854 tw32_wait_f(GRC_LOCAL_CTRL,
2855 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 }
2858
2859 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2860 {
2861 if (!tg3_flag(tp, IS_NIC))
2862 return;
2863
2864 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2865 tg3_asic_rev(tp) == ASIC_REV_5701) {
2866 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2867 (GRC_LCLCTRL_GPIO_OE0 |
2868 GRC_LCLCTRL_GPIO_OE1 |
2869 GRC_LCLCTRL_GPIO_OE2 |
2870 GRC_LCLCTRL_GPIO_OUTPUT0 |
2871 GRC_LCLCTRL_GPIO_OUTPUT1),
2872 TG3_GRC_LCLCTL_PWRSW_DELAY);
2873 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2874 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2875 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2876 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2877 GRC_LCLCTRL_GPIO_OE1 |
2878 GRC_LCLCTRL_GPIO_OE2 |
2879 GRC_LCLCTRL_GPIO_OUTPUT0 |
2880 GRC_LCLCTRL_GPIO_OUTPUT1 |
2881 tp->grc_local_ctrl;
2882 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2883 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884
2885 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2886 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 TG3_GRC_LCLCTL_PWRSW_DELAY);
2888
2889 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2890 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 } else {
2893 u32 no_gpio2;
2894 u32 grc_local_ctrl = 0;
2895
2896 /* Workaround to prevent overdrawing Amps. */
2897 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2899 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2900 grc_local_ctrl,
2901 TG3_GRC_LCLCTL_PWRSW_DELAY);
2902 }
2903
2904 /* On 5753 and variants, GPIO2 cannot be used. */
2905 no_gpio2 = tp->nic_sram_data_cfg &
2906 NIC_SRAM_DATA_CFG_NO_GPIO2;
2907
2908 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2909 GRC_LCLCTRL_GPIO_OE1 |
2910 GRC_LCLCTRL_GPIO_OE2 |
2911 GRC_LCLCTRL_GPIO_OUTPUT1 |
2912 GRC_LCLCTRL_GPIO_OUTPUT2;
2913 if (no_gpio2) {
2914 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2915 GRC_LCLCTRL_GPIO_OUTPUT2);
2916 }
2917 tw32_wait_f(GRC_LOCAL_CTRL,
2918 tp->grc_local_ctrl | grc_local_ctrl,
2919 TG3_GRC_LCLCTL_PWRSW_DELAY);
2920
2921 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2922
2923 tw32_wait_f(GRC_LOCAL_CTRL,
2924 tp->grc_local_ctrl | grc_local_ctrl,
2925 TG3_GRC_LCLCTL_PWRSW_DELAY);
2926
2927 if (!no_gpio2) {
2928 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2929 tw32_wait_f(GRC_LOCAL_CTRL,
2930 tp->grc_local_ctrl | grc_local_ctrl,
2931 TG3_GRC_LCLCTL_PWRSW_DELAY);
2932 }
2933 }
2934 }
2935
2936 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2937 {
2938 u32 msg = 0;
2939
2940 /* Serialize power state transitions */
2941 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2942 return;
2943
2944 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2945 msg = TG3_GPIO_MSG_NEED_VAUX;
2946
2947 msg = tg3_set_function_status(tp, msg);
2948
2949 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2950 goto done;
2951
2952 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2953 tg3_pwrsrc_switch_to_vaux(tp);
2954 else
2955 tg3_pwrsrc_die_with_vmain(tp);
2956
2957 done:
2958 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2959 }
2960
2961 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2962 {
2963 bool need_vaux = false;
2964
2965 /* The GPIOs do something completely different on 57765. */
2966 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2967 return;
2968
2969 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2970 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2971 tg3_asic_rev(tp) == ASIC_REV_5720) {
2972 tg3_frob_aux_power_5717(tp, include_wol ?
2973 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2974 return;
2975 }
2976
2977 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2978 struct net_device *dev_peer;
2979
2980 dev_peer = pci_get_drvdata(tp->pdev_peer);
2981
2982 /* remove_one() may have been run on the peer. */
2983 if (dev_peer) {
2984 struct tg3 *tp_peer = netdev_priv(dev_peer);
2985
2986 if (tg3_flag(tp_peer, INIT_COMPLETE))
2987 return;
2988
2989 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2990 tg3_flag(tp_peer, ENABLE_ASF))
2991 need_vaux = true;
2992 }
2993 }
2994
2995 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2996 tg3_flag(tp, ENABLE_ASF))
2997 need_vaux = true;
2998
2999 if (need_vaux)
3000 tg3_pwrsrc_switch_to_vaux(tp);
3001 else
3002 tg3_pwrsrc_die_with_vmain(tp);
3003 }
3004
3005 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3006 {
3007 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3008 return 1;
3009 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3010 if (speed != SPEED_10)
3011 return 1;
3012 } else if (speed == SPEED_10)
3013 return 1;
3014
3015 return 0;
3016 }
3017
3018 static bool tg3_phy_power_bug(struct tg3 *tp)
3019 {
3020 switch (tg3_asic_rev(tp)) {
3021 case ASIC_REV_5700:
3022 case ASIC_REV_5704:
3023 return true;
3024 case ASIC_REV_5780:
3025 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3026 return true;
3027 return false;
3028 case ASIC_REV_5717:
3029 if (!tp->pci_fn)
3030 return true;
3031 return false;
3032 case ASIC_REV_5719:
3033 case ASIC_REV_5720:
3034 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3035 !tp->pci_fn)
3036 return true;
3037 return false;
3038 }
3039
3040 return false;
3041 }
3042
3043 static bool tg3_phy_led_bug(struct tg3 *tp)
3044 {
3045 switch (tg3_asic_rev(tp)) {
3046 case ASIC_REV_5719:
3047 case ASIC_REV_5720:
3048 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3049 !tp->pci_fn)
3050 return true;
3051 return false;
3052 }
3053
3054 return false;
3055 }
3056
3057 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3058 {
3059 u32 val;
3060
3061 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3062 return;
3063
3064 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3065 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3066 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3067 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3068
3069 sg_dig_ctrl |=
3070 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3071 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3072 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3073 }
3074 return;
3075 }
3076
3077 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3078 tg3_bmcr_reset(tp);
3079 val = tr32(GRC_MISC_CFG);
3080 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3081 udelay(40);
3082 return;
3083 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3084 u32 phytest;
3085 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3086 u32 phy;
3087
3088 tg3_writephy(tp, MII_ADVERTISE, 0);
3089 tg3_writephy(tp, MII_BMCR,
3090 BMCR_ANENABLE | BMCR_ANRESTART);
3091
3092 tg3_writephy(tp, MII_TG3_FET_TEST,
3093 phytest | MII_TG3_FET_SHADOW_EN);
3094 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3095 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3096 tg3_writephy(tp,
3097 MII_TG3_FET_SHDW_AUXMODE4,
3098 phy);
3099 }
3100 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3101 }
3102 return;
3103 } else if (do_low_power) {
3104 if (!tg3_phy_led_bug(tp))
3105 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3106 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3107
3108 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3109 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3110 MII_TG3_AUXCTL_PCTL_VREG_11V;
3111 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3112 }
3113
3114 /* The PHY should not be powered down on some chips because
3115 * of bugs.
3116 */
3117 if (tg3_phy_power_bug(tp))
3118 return;
3119
3120 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3121 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3122 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3123 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3124 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3125 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3126 }
3127
3128 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3129 }
3130
3131 /* tp->lock is held. */
3132 static int tg3_nvram_lock(struct tg3 *tp)
3133 {
3134 if (tg3_flag(tp, NVRAM)) {
3135 int i;
3136
3137 if (tp->nvram_lock_cnt == 0) {
3138 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3139 for (i = 0; i < 8000; i++) {
3140 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3141 break;
3142 udelay(20);
3143 }
3144 if (i == 8000) {
3145 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3146 return -ENODEV;
3147 }
3148 }
3149 tp->nvram_lock_cnt++;
3150 }
3151 return 0;
3152 }
3153
3154 /* tp->lock is held. */
3155 static void tg3_nvram_unlock(struct tg3 *tp)
3156 {
3157 if (tg3_flag(tp, NVRAM)) {
3158 if (tp->nvram_lock_cnt > 0)
3159 tp->nvram_lock_cnt--;
3160 if (tp->nvram_lock_cnt == 0)
3161 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3162 }
3163 }
3164
3165 /* tp->lock is held. */
3166 static void tg3_enable_nvram_access(struct tg3 *tp)
3167 {
3168 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3169 u32 nvaccess = tr32(NVRAM_ACCESS);
3170
3171 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3172 }
3173 }
3174
3175 /* tp->lock is held. */
3176 static void tg3_disable_nvram_access(struct tg3 *tp)
3177 {
3178 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3179 u32 nvaccess = tr32(NVRAM_ACCESS);
3180
3181 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3182 }
3183 }
3184
3185 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3186 u32 offset, u32 *val)
3187 {
3188 u32 tmp;
3189 int i;
3190
3191 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3192 return -EINVAL;
3193
3194 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3195 EEPROM_ADDR_DEVID_MASK |
3196 EEPROM_ADDR_READ);
3197 tw32(GRC_EEPROM_ADDR,
3198 tmp |
3199 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3200 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3201 EEPROM_ADDR_ADDR_MASK) |
3202 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3203
3204 for (i = 0; i < 1000; i++) {
3205 tmp = tr32(GRC_EEPROM_ADDR);
3206
3207 if (tmp & EEPROM_ADDR_COMPLETE)
3208 break;
3209 msleep(1);
3210 }
3211 if (!(tmp & EEPROM_ADDR_COMPLETE))
3212 return -EBUSY;
3213
3214 tmp = tr32(GRC_EEPROM_DATA);
3215
3216 /*
3217 * The data will always be opposite the native endian
3218 * format. Perform a blind byteswap to compensate.
3219 */
3220 *val = swab32(tmp);
3221
3222 return 0;
3223 }
3224
3225 #define NVRAM_CMD_TIMEOUT 5000
3226
3227 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3228 {
3229 int i;
3230
3231 tw32(NVRAM_CMD, nvram_cmd);
3232 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3233 usleep_range(10, 40);
3234 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3235 udelay(10);
3236 break;
3237 }
3238 }
3239
3240 if (i == NVRAM_CMD_TIMEOUT)
3241 return -EBUSY;
3242
3243 return 0;
3244 }
3245
3246 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3247 {
3248 if (tg3_flag(tp, NVRAM) &&
3249 tg3_flag(tp, NVRAM_BUFFERED) &&
3250 tg3_flag(tp, FLASH) &&
3251 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252 (tp->nvram_jedecnum == JEDEC_ATMEL))
3253
3254 addr = ((addr / tp->nvram_pagesize) <<
3255 ATMEL_AT45DB0X1B_PAGE_POS) +
3256 (addr % tp->nvram_pagesize);
3257
3258 return addr;
3259 }
3260
3261 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3262 {
3263 if (tg3_flag(tp, NVRAM) &&
3264 tg3_flag(tp, NVRAM_BUFFERED) &&
3265 tg3_flag(tp, FLASH) &&
3266 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3267 (tp->nvram_jedecnum == JEDEC_ATMEL))
3268
3269 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3270 tp->nvram_pagesize) +
3271 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3272
3273 return addr;
3274 }
3275
3276 /* NOTE: Data read in from NVRAM is byteswapped according to
3277 * the byteswapping settings for all other register accesses.
3278 * tg3 devices are BE devices, so on a BE machine, the data
3279 * returned will be exactly as it is seen in NVRAM. On a LE
3280 * machine, the 32-bit value will be byteswapped.
3281 */
3282 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3283 {
3284 int ret;
3285
3286 if (!tg3_flag(tp, NVRAM))
3287 return tg3_nvram_read_using_eeprom(tp, offset, val);
3288
3289 offset = tg3_nvram_phys_addr(tp, offset);
3290
3291 if (offset > NVRAM_ADDR_MSK)
3292 return -EINVAL;
3293
3294 ret = tg3_nvram_lock(tp);
3295 if (ret)
3296 return ret;
3297
3298 tg3_enable_nvram_access(tp);
3299
3300 tw32(NVRAM_ADDR, offset);
3301 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3302 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3303
3304 if (ret == 0)
3305 *val = tr32(NVRAM_RDDATA);
3306
3307 tg3_disable_nvram_access(tp);
3308
3309 tg3_nvram_unlock(tp);
3310
3311 return ret;
3312 }
3313
3314 /* Ensures NVRAM data is in bytestream format. */
3315 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3316 {
3317 u32 v;
3318 int res = tg3_nvram_read(tp, offset, &v);
3319 if (!res)
3320 *val = cpu_to_be32(v);
3321 return res;
3322 }
3323
3324 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3325 u32 offset, u32 len, u8 *buf)
3326 {
3327 int i, j, rc = 0;
3328 u32 val;
3329
3330 for (i = 0; i < len; i += 4) {
3331 u32 addr;
3332 __be32 data;
3333
3334 addr = offset + i;
3335
3336 memcpy(&data, buf + i, 4);
3337
3338 /*
3339 * The SEEPROM interface expects the data to always be opposite
3340 * the native endian format. We accomplish this by reversing
3341 * all the operations that would have been performed on the
3342 * data from a call to tg3_nvram_read_be32().
3343 */
3344 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3345
3346 val = tr32(GRC_EEPROM_ADDR);
3347 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3348
3349 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3350 EEPROM_ADDR_READ);
3351 tw32(GRC_EEPROM_ADDR, val |
3352 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3353 (addr & EEPROM_ADDR_ADDR_MASK) |
3354 EEPROM_ADDR_START |
3355 EEPROM_ADDR_WRITE);
3356
3357 for (j = 0; j < 1000; j++) {
3358 val = tr32(GRC_EEPROM_ADDR);
3359
3360 if (val & EEPROM_ADDR_COMPLETE)
3361 break;
3362 msleep(1);
3363 }
3364 if (!(val & EEPROM_ADDR_COMPLETE)) {
3365 rc = -EBUSY;
3366 break;
3367 }
3368 }
3369
3370 return rc;
3371 }
3372
3373 /* offset and length are dword aligned */
3374 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3375 u8 *buf)
3376 {
3377 int ret = 0;
3378 u32 pagesize = tp->nvram_pagesize;
3379 u32 pagemask = pagesize - 1;
3380 u32 nvram_cmd;
3381 u8 *tmp;
3382
3383 tmp = kmalloc(pagesize, GFP_KERNEL);
3384 if (tmp == NULL)
3385 return -ENOMEM;
3386
3387 while (len) {
3388 int j;
3389 u32 phy_addr, page_off, size;
3390
3391 phy_addr = offset & ~pagemask;
3392
3393 for (j = 0; j < pagesize; j += 4) {
3394 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3395 (__be32 *) (tmp + j));
3396 if (ret)
3397 break;
3398 }
3399 if (ret)
3400 break;
3401
3402 page_off = offset & pagemask;
3403 size = pagesize;
3404 if (len < size)
3405 size = len;
3406
3407 len -= size;
3408
3409 memcpy(tmp + page_off, buf, size);
3410
3411 offset = offset + (pagesize - page_off);
3412
3413 tg3_enable_nvram_access(tp);
3414
3415 /*
3416 * Before we can erase the flash page, we need
3417 * to issue a special "write enable" command.
3418 */
3419 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3420
3421 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422 break;
3423
3424 /* Erase the target page */
3425 tw32(NVRAM_ADDR, phy_addr);
3426
3427 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3428 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3429
3430 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431 break;
3432
3433 /* Issue another write enable to start the write. */
3434 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3435
3436 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437 break;
3438
3439 for (j = 0; j < pagesize; j += 4) {
3440 __be32 data;
3441
3442 data = *((__be32 *) (tmp + j));
3443
3444 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3445
3446 tw32(NVRAM_ADDR, phy_addr + j);
3447
3448 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3449 NVRAM_CMD_WR;
3450
3451 if (j == 0)
3452 nvram_cmd |= NVRAM_CMD_FIRST;
3453 else if (j == (pagesize - 4))
3454 nvram_cmd |= NVRAM_CMD_LAST;
3455
3456 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3457 if (ret)
3458 break;
3459 }
3460 if (ret)
3461 break;
3462 }
3463
3464 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465 tg3_nvram_exec_cmd(tp, nvram_cmd);
3466
3467 kfree(tmp);
3468
3469 return ret;
3470 }
3471
3472 /* offset and length are dword aligned */
3473 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3474 u8 *buf)
3475 {
3476 int i, ret = 0;
3477
3478 for (i = 0; i < len; i += 4, offset += 4) {
3479 u32 page_off, phy_addr, nvram_cmd;
3480 __be32 data;
3481
3482 memcpy(&data, buf + i, 4);
3483 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3484
3485 page_off = offset % tp->nvram_pagesize;
3486
3487 phy_addr = tg3_nvram_phys_addr(tp, offset);
3488
3489 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3490
3491 if (page_off == 0 || i == 0)
3492 nvram_cmd |= NVRAM_CMD_FIRST;
3493 if (page_off == (tp->nvram_pagesize - 4))
3494 nvram_cmd |= NVRAM_CMD_LAST;
3495
3496 if (i == (len - 4))
3497 nvram_cmd |= NVRAM_CMD_LAST;
3498
3499 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3500 !tg3_flag(tp, FLASH) ||
3501 !tg3_flag(tp, 57765_PLUS))
3502 tw32(NVRAM_ADDR, phy_addr);
3503
3504 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3505 !tg3_flag(tp, 5755_PLUS) &&
3506 (tp->nvram_jedecnum == JEDEC_ST) &&
3507 (nvram_cmd & NVRAM_CMD_FIRST)) {
3508 u32 cmd;
3509
3510 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3511 ret = tg3_nvram_exec_cmd(tp, cmd);
3512 if (ret)
3513 break;
3514 }
3515 if (!tg3_flag(tp, FLASH)) {
3516 /* We always do complete word writes to eeprom. */
3517 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3518 }
3519
3520 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3521 if (ret)
3522 break;
3523 }
3524 return ret;
3525 }
3526
3527 /* offset and length are dword aligned */
3528 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3529 {
3530 int ret;
3531
3532 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3533 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3534 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3535 udelay(40);
3536 }
3537
3538 if (!tg3_flag(tp, NVRAM)) {
3539 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3540 } else {
3541 u32 grc_mode;
3542
3543 ret = tg3_nvram_lock(tp);
3544 if (ret)
3545 return ret;
3546
3547 tg3_enable_nvram_access(tp);
3548 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3549 tw32(NVRAM_WRITE1, 0x406);
3550
3551 grc_mode = tr32(GRC_MODE);
3552 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3553
3554 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3555 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3556 buf);
3557 } else {
3558 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3559 buf);
3560 }
3561
3562 grc_mode = tr32(GRC_MODE);
3563 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3564
3565 tg3_disable_nvram_access(tp);
3566 tg3_nvram_unlock(tp);
3567 }
3568
3569 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3570 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3571 udelay(40);
3572 }
3573
3574 return ret;
3575 }
3576
3577 #define RX_CPU_SCRATCH_BASE 0x30000
3578 #define RX_CPU_SCRATCH_SIZE 0x04000
3579 #define TX_CPU_SCRATCH_BASE 0x34000
3580 #define TX_CPU_SCRATCH_SIZE 0x04000
3581
3582 /* tp->lock is held. */
3583 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3584 {
3585 int i;
3586 const int iters = 10000;
3587
3588 for (i = 0; i < iters; i++) {
3589 tw32(cpu_base + CPU_STATE, 0xffffffff);
3590 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3591 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3592 break;
3593 if (pci_channel_offline(tp->pdev))
3594 return -EBUSY;
3595 }
3596
3597 return (i == iters) ? -EBUSY : 0;
3598 }
3599
3600 /* tp->lock is held. */
3601 static int tg3_rxcpu_pause(struct tg3 *tp)
3602 {
3603 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3604
3605 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3606 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3607 udelay(10);
3608
3609 return rc;
3610 }
3611
3612 /* tp->lock is held. */
3613 static int tg3_txcpu_pause(struct tg3 *tp)
3614 {
3615 return tg3_pause_cpu(tp, TX_CPU_BASE);
3616 }
3617
3618 /* tp->lock is held. */
3619 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3620 {
3621 tw32(cpu_base + CPU_STATE, 0xffffffff);
3622 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3623 }
3624
3625 /* tp->lock is held. */
3626 static void tg3_rxcpu_resume(struct tg3 *tp)
3627 {
3628 tg3_resume_cpu(tp, RX_CPU_BASE);
3629 }
3630
3631 /* tp->lock is held. */
3632 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3633 {
3634 int rc;
3635
3636 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3637
3638 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3639 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3640
3641 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3642 return 0;
3643 }
3644 if (cpu_base == RX_CPU_BASE) {
3645 rc = tg3_rxcpu_pause(tp);
3646 } else {
3647 /*
3648 * There is only an Rx CPU for the 5750 derivative in the
3649 * BCM4785.
3650 */
3651 if (tg3_flag(tp, IS_SSB_CORE))
3652 return 0;
3653
3654 rc = tg3_txcpu_pause(tp);
3655 }
3656
3657 if (rc) {
3658 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3659 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3660 return -ENODEV;
3661 }
3662
3663 /* Clear firmware's nvram arbitration. */
3664 if (tg3_flag(tp, NVRAM))
3665 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3666 return 0;
3667 }
3668
3669 static int tg3_fw_data_len(struct tg3 *tp,
3670 const struct tg3_firmware_hdr *fw_hdr)
3671 {
3672 int fw_len;
3673
3674 /* Non fragmented firmware have one firmware header followed by a
3675 * contiguous chunk of data to be written. The length field in that
3676 * header is not the length of data to be written but the complete
3677 * length of the bss. The data length is determined based on
3678 * tp->fw->size minus headers.
3679 *
3680 * Fragmented firmware have a main header followed by multiple
3681 * fragments. Each fragment is identical to non fragmented firmware
3682 * with a firmware header followed by a contiguous chunk of data. In
3683 * the main header, the length field is unused and set to 0xffffffff.
3684 * In each fragment header the length is the entire size of that
3685 * fragment i.e. fragment data + header length. Data length is
3686 * therefore length field in the header minus TG3_FW_HDR_LEN.
3687 */
3688 if (tp->fw_len == 0xffffffff)
3689 fw_len = be32_to_cpu(fw_hdr->len);
3690 else
3691 fw_len = tp->fw->size;
3692
3693 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3694 }
3695
3696 /* tp->lock is held. */
3697 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3698 u32 cpu_scratch_base, int cpu_scratch_size,
3699 const struct tg3_firmware_hdr *fw_hdr)
3700 {
3701 int err, i;
3702 void (*write_op)(struct tg3 *, u32, u32);
3703 int total_len = tp->fw->size;
3704
3705 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3706 netdev_err(tp->dev,
3707 "%s: Trying to load TX cpu firmware which is 5705\n",
3708 __func__);
3709 return -EINVAL;
3710 }
3711
3712 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3713 write_op = tg3_write_mem;
3714 else
3715 write_op = tg3_write_indirect_reg32;
3716
3717 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3718 /* It is possible that bootcode is still loading at this point.
3719 * Get the nvram lock first before halting the cpu.
3720 */
3721 int lock_err = tg3_nvram_lock(tp);
3722 err = tg3_halt_cpu(tp, cpu_base);
3723 if (!lock_err)
3724 tg3_nvram_unlock(tp);
3725 if (err)
3726 goto out;
3727
3728 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3729 write_op(tp, cpu_scratch_base + i, 0);
3730 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731 tw32(cpu_base + CPU_MODE,
3732 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3733 } else {
3734 /* Subtract additional main header for fragmented firmware and
3735 * advance to the first fragment
3736 */
3737 total_len -= TG3_FW_HDR_LEN;
3738 fw_hdr++;
3739 }
3740
3741 do {
3742 u32 *fw_data = (u32 *)(fw_hdr + 1);
3743 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3744 write_op(tp, cpu_scratch_base +
3745 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3746 (i * sizeof(u32)),
3747 be32_to_cpu(fw_data[i]));
3748
3749 total_len -= be32_to_cpu(fw_hdr->len);
3750
3751 /* Advance to next fragment */
3752 fw_hdr = (struct tg3_firmware_hdr *)
3753 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3754 } while (total_len > 0);
3755
3756 err = 0;
3757
3758 out:
3759 return err;
3760 }
3761
3762 /* tp->lock is held. */
3763 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3764 {
3765 int i;
3766 const int iters = 5;
3767
3768 tw32(cpu_base + CPU_STATE, 0xffffffff);
3769 tw32_f(cpu_base + CPU_PC, pc);
3770
3771 for (i = 0; i < iters; i++) {
3772 if (tr32(cpu_base + CPU_PC) == pc)
3773 break;
3774 tw32(cpu_base + CPU_STATE, 0xffffffff);
3775 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3776 tw32_f(cpu_base + CPU_PC, pc);
3777 udelay(1000);
3778 }
3779
3780 return (i == iters) ? -EBUSY : 0;
3781 }
3782
3783 /* tp->lock is held. */
3784 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3785 {
3786 const struct tg3_firmware_hdr *fw_hdr;
3787 int err;
3788
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790
3791 /* Firmware blob starts with version numbers, followed by
3792 start address and length. We are setting complete length.
3793 length = end_address_of_bss - start_address_of_text.
3794 Remainder is the blob to be loaded contiguously
3795 from start address. */
3796
3797 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3798 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3799 fw_hdr);
3800 if (err)
3801 return err;
3802
3803 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3804 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3805 fw_hdr);
3806 if (err)
3807 return err;
3808
3809 /* Now startup only the RX cpu. */
3810 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3811 be32_to_cpu(fw_hdr->base_addr));
3812 if (err) {
3813 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3814 "should be %08x\n", __func__,
3815 tr32(RX_CPU_BASE + CPU_PC),
3816 be32_to_cpu(fw_hdr->base_addr));
3817 return -ENODEV;
3818 }
3819
3820 tg3_rxcpu_resume(tp);
3821
3822 return 0;
3823 }
3824
3825 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3826 {
3827 const int iters = 1000;
3828 int i;
3829 u32 val;
3830
3831 /* Wait for boot code to complete initialization and enter service
3832 * loop. It is then safe to download service patches
3833 */
3834 for (i = 0; i < iters; i++) {
3835 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3836 break;
3837
3838 udelay(10);
3839 }
3840
3841 if (i == iters) {
3842 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3843 return -EBUSY;
3844 }
3845
3846 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3847 if (val & 0xff) {
3848 netdev_warn(tp->dev,
3849 "Other patches exist. Not downloading EEE patch\n");
3850 return -EEXIST;
3851 }
3852
3853 return 0;
3854 }
3855
3856 /* tp->lock is held. */
3857 static void tg3_load_57766_firmware(struct tg3 *tp)
3858 {
3859 struct tg3_firmware_hdr *fw_hdr;
3860
3861 if (!tg3_flag(tp, NO_NVRAM))
3862 return;
3863
3864 if (tg3_validate_rxcpu_state(tp))
3865 return;
3866
3867 if (!tp->fw)
3868 return;
3869
3870 /* This firmware blob has a different format than older firmware
3871 * releases as given below. The main difference is we have fragmented
3872 * data to be written to non-contiguous locations.
3873 *
3874 * In the beginning we have a firmware header identical to other
3875 * firmware which consists of version, base addr and length. The length
3876 * here is unused and set to 0xffffffff.
3877 *
3878 * This is followed by a series of firmware fragments which are
3879 * individually identical to previous firmware. i.e. they have the
3880 * firmware header and followed by data for that fragment. The version
3881 * field of the individual fragment header is unused.
3882 */
3883
3884 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3885 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3886 return;
3887
3888 if (tg3_rxcpu_pause(tp))
3889 return;
3890
3891 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3892 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3893
3894 tg3_rxcpu_resume(tp);
3895 }
3896
3897 /* tp->lock is held. */
3898 static int tg3_load_tso_firmware(struct tg3 *tp)
3899 {
3900 const struct tg3_firmware_hdr *fw_hdr;
3901 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3902 int err;
3903
3904 if (!tg3_flag(tp, FW_TSO))
3905 return 0;
3906
3907 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3908
3909 /* Firmware blob starts with version numbers, followed by
3910 start address and length. We are setting complete length.
3911 length = end_address_of_bss - start_address_of_text.
3912 Remainder is the blob to be loaded contiguously
3913 from start address. */
3914
3915 cpu_scratch_size = tp->fw_len;
3916
3917 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3918 cpu_base = RX_CPU_BASE;
3919 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3920 } else {
3921 cpu_base = TX_CPU_BASE;
3922 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3923 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3924 }
3925
3926 err = tg3_load_firmware_cpu(tp, cpu_base,
3927 cpu_scratch_base, cpu_scratch_size,
3928 fw_hdr);
3929 if (err)
3930 return err;
3931
3932 /* Now startup the cpu. */
3933 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3934 be32_to_cpu(fw_hdr->base_addr));
3935 if (err) {
3936 netdev_err(tp->dev,
3937 "%s fails to set CPU PC, is %08x should be %08x\n",
3938 __func__, tr32(cpu_base + CPU_PC),
3939 be32_to_cpu(fw_hdr->base_addr));
3940 return -ENODEV;
3941 }
3942
3943 tg3_resume_cpu(tp, cpu_base);
3944 return 0;
3945 }
3946
3947 /* tp->lock is held. */
3948 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3949 {
3950 u32 addr_high, addr_low;
3951
3952 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 (mac_addr[4] << 8) | mac_addr[5]);
3955
3956 if (index < 4) {
3957 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 } else {
3960 index -= 4;
3961 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 }
3964 }
3965
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 u32 addr_high;
3970 int i;
3971
3972 for (i = 0; i < 4; i++) {
3973 if (i == 1 && skip_mac_1)
3974 continue;
3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 }
3977
3978 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 for (i = 4; i < 16; i++)
3981 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 }
3983
3984 addr_high = (tp->dev->dev_addr[0] +
3985 tp->dev->dev_addr[1] +
3986 tp->dev->dev_addr[2] +
3987 tp->dev->dev_addr[3] +
3988 tp->dev->dev_addr[4] +
3989 tp->dev->dev_addr[5]) &
3990 TX_BACKOFF_SEED_MASK;
3991 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993
3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 /*
3997 * Make sure register accesses (indirect or otherwise) will function
3998 * correctly.
3999 */
4000 pci_write_config_dword(tp->pdev,
4001 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003
4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 int err;
4007
4008 tg3_enable_register_access(tp);
4009
4010 err = pci_set_power_state(tp->pdev, PCI_D0);
4011 if (!err) {
4012 /* Switch out of Vaux if it is a NIC */
4013 tg3_pwrsrc_switch_to_vmain(tp);
4014 } else {
4015 netdev_err(tp->dev, "Transition to D0 failed\n");
4016 }
4017
4018 return err;
4019 }
4020
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 u32 misc_host_ctrl;
4026 bool device_should_wake, do_low_power;
4027
4028 tg3_enable_register_access(tp);
4029
4030 /* Restore the CLKREQ setting. */
4031 if (tg3_flag(tp, CLKREQ_BUG))
4032 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 PCI_EXP_LNKCTL_CLKREQ_EN);
4034
4035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 tw32(TG3PCI_MISC_HOST_CTRL,
4037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038
4039 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 tg3_flag(tp, WOL_ENABLE);
4041
4042 if (tg3_flag(tp, USE_PHYLIB)) {
4043 do_low_power = false;
4044 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 struct phy_device *phydev;
4047 u32 phyid, advertising;
4048
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 tp->link_config.advertising = phydev->advertising;
4057
4058 advertising = ADVERTISED_TP |
4059 ADVERTISED_Pause |
4060 ADVERTISED_Autoneg |
4061 ADVERTISED_10baseT_Half;
4062
4063 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4064 if (tg3_flag(tp, WOL_SPEED_100MB))
4065 advertising |=
4066 ADVERTISED_100baseT_Half |
4067 ADVERTISED_100baseT_Full |
4068 ADVERTISED_10baseT_Full;
4069 else
4070 advertising |= ADVERTISED_10baseT_Full;
4071 }
4072
4073 phydev->advertising = advertising;
4074
4075 phy_start_aneg(phydev);
4076
4077 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4078 if (phyid != PHY_ID_BCMAC131) {
4079 phyid &= PHY_BCM_OUI_MASK;
4080 if (phyid == PHY_BCM_OUI_1 ||
4081 phyid == PHY_BCM_OUI_2 ||
4082 phyid == PHY_BCM_OUI_3)
4083 do_low_power = true;
4084 }
4085 }
4086 } else {
4087 do_low_power = true;
4088
4089 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4090 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4091
4092 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4093 tg3_setup_phy(tp, false);
4094 }
4095
4096 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4097 u32 val;
4098
4099 val = tr32(GRC_VCPU_EXT_CTRL);
4100 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4101 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4102 int i;
4103 u32 val;
4104
4105 for (i = 0; i < 200; i++) {
4106 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4107 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4108 break;
4109 msleep(1);
4110 }
4111 }
4112 if (tg3_flag(tp, WOL_CAP))
4113 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4114 WOL_DRV_STATE_SHUTDOWN |
4115 WOL_DRV_WOL |
4116 WOL_SET_MAGIC_PKT);
4117
4118 if (device_should_wake) {
4119 u32 mac_mode;
4120
4121 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4122 if (do_low_power &&
4123 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4124 tg3_phy_auxctl_write(tp,
4125 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4126 MII_TG3_AUXCTL_PCTL_WOL_EN |
4127 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4128 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4129 udelay(40);
4130 }
4131
4132 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4133 mac_mode = MAC_MODE_PORT_MODE_GMII;
4134 else if (tp->phy_flags &
4135 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4136 if (tp->link_config.active_speed == SPEED_1000)
4137 mac_mode = MAC_MODE_PORT_MODE_GMII;
4138 else
4139 mac_mode = MAC_MODE_PORT_MODE_MII;
4140 } else
4141 mac_mode = MAC_MODE_PORT_MODE_MII;
4142
4143 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4144 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4145 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4146 SPEED_100 : SPEED_10;
4147 if (tg3_5700_link_polarity(tp, speed))
4148 mac_mode |= MAC_MODE_LINK_POLARITY;
4149 else
4150 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4151 }
4152 } else {
4153 mac_mode = MAC_MODE_PORT_MODE_TBI;
4154 }
4155
4156 if (!tg3_flag(tp, 5750_PLUS))
4157 tw32(MAC_LED_CTRL, tp->led_ctrl);
4158
4159 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4160 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4161 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4162 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4163
4164 if (tg3_flag(tp, ENABLE_APE))
4165 mac_mode |= MAC_MODE_APE_TX_EN |
4166 MAC_MODE_APE_RX_EN |
4167 MAC_MODE_TDE_ENABLE;
4168
4169 tw32_f(MAC_MODE, mac_mode);
4170 udelay(100);
4171
4172 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4173 udelay(10);
4174 }
4175
4176 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4177 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4178 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4179 u32 base_val;
4180
4181 base_val = tp->pci_clock_ctrl;
4182 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4183 CLOCK_CTRL_TXCLK_DISABLE);
4184
4185 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4186 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4187 } else if (tg3_flag(tp, 5780_CLASS) ||
4188 tg3_flag(tp, CPMU_PRESENT) ||
4189 tg3_asic_rev(tp) == ASIC_REV_5906) {
4190 /* do nothing */
4191 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4192 u32 newbits1, newbits2;
4193
4194 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4195 tg3_asic_rev(tp) == ASIC_REV_5701) {
4196 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4197 CLOCK_CTRL_TXCLK_DISABLE |
4198 CLOCK_CTRL_ALTCLK);
4199 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4200 } else if (tg3_flag(tp, 5705_PLUS)) {
4201 newbits1 = CLOCK_CTRL_625_CORE;
4202 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4203 } else {
4204 newbits1 = CLOCK_CTRL_ALTCLK;
4205 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206 }
4207
4208 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4209 40);
4210
4211 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4212 40);
4213
4214 if (!tg3_flag(tp, 5705_PLUS)) {
4215 u32 newbits3;
4216
4217 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4218 tg3_asic_rev(tp) == ASIC_REV_5701) {
4219 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4220 CLOCK_CTRL_TXCLK_DISABLE |
4221 CLOCK_CTRL_44MHZ_CORE);
4222 } else {
4223 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4224 }
4225
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4227 tp->pci_clock_ctrl | newbits3, 40);
4228 }
4229 }
4230
4231 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4232 tg3_power_down_phy(tp, do_low_power);
4233
4234 tg3_frob_aux_power(tp, true);
4235
4236 /* Workaround for unstable PLL clock */
4237 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4238 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4239 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4240 u32 val = tr32(0x7d00);
4241
4242 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4243 tw32(0x7d00, val);
4244 if (!tg3_flag(tp, ENABLE_ASF)) {
4245 int err;
4246
4247 err = tg3_nvram_lock(tp);
4248 tg3_halt_cpu(tp, RX_CPU_BASE);
4249 if (!err)
4250 tg3_nvram_unlock(tp);
4251 }
4252 }
4253
4254 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4255
4256 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4257
4258 return 0;
4259 }
4260
4261 static void tg3_power_down(struct tg3 *tp)
4262 {
4263 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4264 pci_set_power_state(tp->pdev, PCI_D3hot);
4265 }
4266
4267 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4268 {
4269 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4270 case MII_TG3_AUX_STAT_10HALF:
4271 *speed = SPEED_10;
4272 *duplex = DUPLEX_HALF;
4273 break;
4274
4275 case MII_TG3_AUX_STAT_10FULL:
4276 *speed = SPEED_10;
4277 *duplex = DUPLEX_FULL;
4278 break;
4279
4280 case MII_TG3_AUX_STAT_100HALF:
4281 *speed = SPEED_100;
4282 *duplex = DUPLEX_HALF;
4283 break;
4284
4285 case MII_TG3_AUX_STAT_100FULL:
4286 *speed = SPEED_100;
4287 *duplex = DUPLEX_FULL;
4288 break;
4289
4290 case MII_TG3_AUX_STAT_1000HALF:
4291 *speed = SPEED_1000;
4292 *duplex = DUPLEX_HALF;
4293 break;
4294
4295 case MII_TG3_AUX_STAT_1000FULL:
4296 *speed = SPEED_1000;
4297 *duplex = DUPLEX_FULL;
4298 break;
4299
4300 default:
4301 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4302 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4303 SPEED_10;
4304 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4305 DUPLEX_HALF;
4306 break;
4307 }
4308 *speed = SPEED_UNKNOWN;
4309 *duplex = DUPLEX_UNKNOWN;
4310 break;
4311 }
4312 }
4313
4314 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4315 {
4316 int err = 0;
4317 u32 val, new_adv;
4318
4319 new_adv = ADVERTISE_CSMA;
4320 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4321 new_adv |= mii_advertise_flowctrl(flowctrl);
4322
4323 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4324 if (err)
4325 goto done;
4326
4327 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4328 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4329
4330 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4331 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4332 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4333
4334 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4335 if (err)
4336 goto done;
4337 }
4338
4339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4340 goto done;
4341
4342 tw32(TG3_CPMU_EEE_MODE,
4343 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4344
4345 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4346 if (!err) {
4347 u32 err2;
4348
4349 val = 0;
4350 /* Advertise 100-BaseTX EEE ability */
4351 if (advertise & ADVERTISED_100baseT_Full)
4352 val |= MDIO_AN_EEE_ADV_100TX;
4353 /* Advertise 1000-BaseT EEE ability */
4354 if (advertise & ADVERTISED_1000baseT_Full)
4355 val |= MDIO_AN_EEE_ADV_1000T;
4356
4357 if (!tp->eee.eee_enabled) {
4358 val = 0;
4359 tp->eee.advertised = 0;
4360 } else {
4361 tp->eee.advertised = advertise &
4362 (ADVERTISED_100baseT_Full |
4363 ADVERTISED_1000baseT_Full);
4364 }
4365
4366 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4367 if (err)
4368 val = 0;
4369
4370 switch (tg3_asic_rev(tp)) {
4371 case ASIC_REV_5717:
4372 case ASIC_REV_57765:
4373 case ASIC_REV_57766:
4374 case ASIC_REV_5719:
4375 /* If we advertised any eee advertisements above... */
4376 if (val)
4377 val = MII_TG3_DSP_TAP26_ALNOKO |
4378 MII_TG3_DSP_TAP26_RMRXSTO |
4379 MII_TG3_DSP_TAP26_OPCSINPT;
4380 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4381 /* Fall through */
4382 case ASIC_REV_5720:
4383 case ASIC_REV_5762:
4384 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4385 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4386 MII_TG3_DSP_CH34TP2_HIBW01);
4387 }
4388
4389 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4390 if (!err)
4391 err = err2;
4392 }
4393
4394 done:
4395 return err;
4396 }
4397
4398 static void tg3_phy_copper_begin(struct tg3 *tp)
4399 {
4400 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4401 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4402 u32 adv, fc;
4403
4404 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4405 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4406 adv = ADVERTISED_10baseT_Half |
4407 ADVERTISED_10baseT_Full;
4408 if (tg3_flag(tp, WOL_SPEED_100MB))
4409 adv |= ADVERTISED_100baseT_Half |
4410 ADVERTISED_100baseT_Full;
4411 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4412 if (!(tp->phy_flags &
4413 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4414 adv |= ADVERTISED_1000baseT_Half;
4415 adv |= ADVERTISED_1000baseT_Full;
4416 }
4417
4418 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4419 } else {
4420 adv = tp->link_config.advertising;
4421 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4422 adv &= ~(ADVERTISED_1000baseT_Half |
4423 ADVERTISED_1000baseT_Full);
4424
4425 fc = tp->link_config.flowctrl;
4426 }
4427
4428 tg3_phy_autoneg_cfg(tp, adv, fc);
4429
4430 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4431 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4432 /* Normally during power down we want to autonegotiate
4433 * the lowest possible speed for WOL. However, to avoid
4434 * link flap, we leave it untouched.
4435 */
4436 return;
4437 }
4438
4439 tg3_writephy(tp, MII_BMCR,
4440 BMCR_ANENABLE | BMCR_ANRESTART);
4441 } else {
4442 int i;
4443 u32 bmcr, orig_bmcr;
4444
4445 tp->link_config.active_speed = tp->link_config.speed;
4446 tp->link_config.active_duplex = tp->link_config.duplex;
4447
4448 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4449 /* With autoneg disabled, 5715 only links up when the
4450 * advertisement register has the configured speed
4451 * enabled.
4452 */
4453 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4454 }
4455
4456 bmcr = 0;
4457 switch (tp->link_config.speed) {
4458 default:
4459 case SPEED_10:
4460 break;
4461
4462 case SPEED_100:
4463 bmcr |= BMCR_SPEED100;
4464 break;
4465
4466 case SPEED_1000:
4467 bmcr |= BMCR_SPEED1000;
4468 break;
4469 }
4470
4471 if (tp->link_config.duplex == DUPLEX_FULL)
4472 bmcr |= BMCR_FULLDPLX;
4473
4474 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4475 (bmcr != orig_bmcr)) {
4476 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4477 for (i = 0; i < 1500; i++) {
4478 u32 tmp;
4479
4480 udelay(10);
4481 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4482 tg3_readphy(tp, MII_BMSR, &tmp))
4483 continue;
4484 if (!(tmp & BMSR_LSTATUS)) {
4485 udelay(40);
4486 break;
4487 }
4488 }
4489 tg3_writephy(tp, MII_BMCR, bmcr);
4490 udelay(40);
4491 }
4492 }
4493 }
4494
4495 static int tg3_phy_pull_config(struct tg3 *tp)
4496 {
4497 int err;
4498 u32 val;
4499
4500 err = tg3_readphy(tp, MII_BMCR, &val);
4501 if (err)
4502 goto done;
4503
4504 if (!(val & BMCR_ANENABLE)) {
4505 tp->link_config.autoneg = AUTONEG_DISABLE;
4506 tp->link_config.advertising = 0;
4507 tg3_flag_clear(tp, PAUSE_AUTONEG);
4508
4509 err = -EIO;
4510
4511 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4512 case 0:
4513 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4514 goto done;
4515
4516 tp->link_config.speed = SPEED_10;
4517 break;
4518 case BMCR_SPEED100:
4519 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520 goto done;
4521
4522 tp->link_config.speed = SPEED_100;
4523 break;
4524 case BMCR_SPEED1000:
4525 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4526 tp->link_config.speed = SPEED_1000;
4527 break;
4528 }
4529 /* Fall through */
4530 default:
4531 goto done;
4532 }
4533
4534 if (val & BMCR_FULLDPLX)
4535 tp->link_config.duplex = DUPLEX_FULL;
4536 else
4537 tp->link_config.duplex = DUPLEX_HALF;
4538
4539 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4540
4541 err = 0;
4542 goto done;
4543 }
4544
4545 tp->link_config.autoneg = AUTONEG_ENABLE;
4546 tp->link_config.advertising = ADVERTISED_Autoneg;
4547 tg3_flag_set(tp, PAUSE_AUTONEG);
4548
4549 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4550 u32 adv;
4551
4552 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4553 if (err)
4554 goto done;
4555
4556 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4557 tp->link_config.advertising |= adv | ADVERTISED_TP;
4558
4559 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4560 } else {
4561 tp->link_config.advertising |= ADVERTISED_FIBRE;
4562 }
4563
4564 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4565 u32 adv;
4566
4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568 err = tg3_readphy(tp, MII_CTRL1000, &val);
4569 if (err)
4570 goto done;
4571
4572 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4573 } else {
4574 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4575 if (err)
4576 goto done;
4577
4578 adv = tg3_decode_flowctrl_1000X(val);
4579 tp->link_config.flowctrl = adv;
4580
4581 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4582 adv = mii_adv_to_ethtool_adv_x(val);
4583 }
4584
4585 tp->link_config.advertising |= adv;
4586 }
4587
4588 done:
4589 return err;
4590 }
4591
4592 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4593 {
4594 int err;
4595
4596 /* Turn off tap power management. */
4597 /* Set Extended packet length bit */
4598 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4599
4600 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4601 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4602 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4603 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4604 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4605
4606 udelay(40);
4607
4608 return err;
4609 }
4610
4611 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4612 {
4613 struct ethtool_eee eee;
4614
4615 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4616 return true;
4617
4618 tg3_eee_pull_config(tp, &eee);
4619
4620 if (tp->eee.eee_enabled) {
4621 if (tp->eee.advertised != eee.advertised ||
4622 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4623 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4624 return false;
4625 } else {
4626 /* EEE is disabled but we're advertising */
4627 if (eee.advertised)
4628 return false;
4629 }
4630
4631 return true;
4632 }
4633
4634 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4635 {
4636 u32 advmsk, tgtadv, advertising;
4637
4638 advertising = tp->link_config.advertising;
4639 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4640
4641 advmsk = ADVERTISE_ALL;
4642 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4643 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4644 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4645 }
4646
4647 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4648 return false;
4649
4650 if ((*lcladv & advmsk) != tgtadv)
4651 return false;
4652
4653 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4654 u32 tg3_ctrl;
4655
4656 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4657
4658 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4659 return false;
4660
4661 if (tgtadv &&
4662 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4663 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4664 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4665 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4666 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4667 } else {
4668 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4669 }
4670
4671 if (tg3_ctrl != tgtadv)
4672 return false;
4673 }
4674
4675 return true;
4676 }
4677
4678 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4679 {
4680 u32 lpeth = 0;
4681
4682 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4683 u32 val;
4684
4685 if (tg3_readphy(tp, MII_STAT1000, &val))
4686 return false;
4687
4688 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4689 }
4690
4691 if (tg3_readphy(tp, MII_LPA, rmtadv))
4692 return false;
4693
4694 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4695 tp->link_config.rmt_adv = lpeth;
4696
4697 return true;
4698 }
4699
4700 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4701 {
4702 if (curr_link_up != tp->link_up) {
4703 if (curr_link_up) {
4704 netif_carrier_on(tp->dev);
4705 } else {
4706 netif_carrier_off(tp->dev);
4707 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4708 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4709 }
4710
4711 tg3_link_report(tp);
4712 return true;
4713 }
4714
4715 return false;
4716 }
4717
4718 static void tg3_clear_mac_status(struct tg3 *tp)
4719 {
4720 tw32(MAC_EVENT, 0);
4721
4722 tw32_f(MAC_STATUS,
4723 MAC_STATUS_SYNC_CHANGED |
4724 MAC_STATUS_CFG_CHANGED |
4725 MAC_STATUS_MI_COMPLETION |
4726 MAC_STATUS_LNKSTATE_CHANGED);
4727 udelay(40);
4728 }
4729
4730 static void tg3_setup_eee(struct tg3 *tp)
4731 {
4732 u32 val;
4733
4734 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4735 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4736 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4737 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4738
4739 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4740
4741 tw32_f(TG3_CPMU_EEE_CTRL,
4742 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4743
4744 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4745 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4746 TG3_CPMU_EEEMD_LPI_IN_RX |
4747 TG3_CPMU_EEEMD_EEE_ENABLE;
4748
4749 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4750 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4751
4752 if (tg3_flag(tp, ENABLE_APE))
4753 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4754
4755 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4756
4757 tw32_f(TG3_CPMU_EEE_DBTMR1,
4758 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4759 (tp->eee.tx_lpi_timer & 0xffff));
4760
4761 tw32_f(TG3_CPMU_EEE_DBTMR2,
4762 TG3_CPMU_DBTMR2_APE_TX_2047US |
4763 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4764 }
4765
4766 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4767 {
4768 bool current_link_up;
4769 u32 bmsr, val;
4770 u32 lcl_adv, rmt_adv;
4771 u16 current_speed;
4772 u8 current_duplex;
4773 int i, err;
4774
4775 tg3_clear_mac_status(tp);
4776
4777 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4778 tw32_f(MAC_MI_MODE,
4779 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4780 udelay(80);
4781 }
4782
4783 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4784
4785 /* Some third-party PHYs need to be reset on link going
4786 * down.
4787 */
4788 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4789 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4790 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4791 tp->link_up) {
4792 tg3_readphy(tp, MII_BMSR, &bmsr);
4793 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4794 !(bmsr & BMSR_LSTATUS))
4795 force_reset = true;
4796 }
4797 if (force_reset)
4798 tg3_phy_reset(tp);
4799
4800 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4801 tg3_readphy(tp, MII_BMSR, &bmsr);
4802 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4803 !tg3_flag(tp, INIT_COMPLETE))
4804 bmsr = 0;
4805
4806 if (!(bmsr & BMSR_LSTATUS)) {
4807 err = tg3_init_5401phy_dsp(tp);
4808 if (err)
4809 return err;
4810
4811 tg3_readphy(tp, MII_BMSR, &bmsr);
4812 for (i = 0; i < 1000; i++) {
4813 udelay(10);
4814 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4815 (bmsr & BMSR_LSTATUS)) {
4816 udelay(40);
4817 break;
4818 }
4819 }
4820
4821 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4822 TG3_PHY_REV_BCM5401_B0 &&
4823 !(bmsr & BMSR_LSTATUS) &&
4824 tp->link_config.active_speed == SPEED_1000) {
4825 err = tg3_phy_reset(tp);
4826 if (!err)
4827 err = tg3_init_5401phy_dsp(tp);
4828 if (err)
4829 return err;
4830 }
4831 }
4832 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4833 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4834 /* 5701 {A0,B0} CRC bug workaround */
4835 tg3_writephy(tp, 0x15, 0x0a75);
4836 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4838 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839 }
4840
4841 /* Clear pending interrupts... */
4842 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4843 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4844
4845 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4846 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4847 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4848 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4849
4850 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4851 tg3_asic_rev(tp) == ASIC_REV_5701) {
4852 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4853 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4854 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4855 else
4856 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4857 }
4858
4859 current_link_up = false;
4860 current_speed = SPEED_UNKNOWN;
4861 current_duplex = DUPLEX_UNKNOWN;
4862 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4863 tp->link_config.rmt_adv = 0;
4864
4865 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4866 err = tg3_phy_auxctl_read(tp,
4867 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4868 &val);
4869 if (!err && !(val & (1 << 10))) {
4870 tg3_phy_auxctl_write(tp,
4871 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4872 val | (1 << 10));
4873 goto relink;
4874 }
4875 }
4876
4877 bmsr = 0;
4878 for (i = 0; i < 100; i++) {
4879 tg3_readphy(tp, MII_BMSR, &bmsr);
4880 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4881 (bmsr & BMSR_LSTATUS))
4882 break;
4883 udelay(40);
4884 }
4885
4886 if (bmsr & BMSR_LSTATUS) {
4887 u32 aux_stat, bmcr;
4888
4889 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4890 for (i = 0; i < 2000; i++) {
4891 udelay(10);
4892 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4893 aux_stat)
4894 break;
4895 }
4896
4897 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4898 &current_speed,
4899 &current_duplex);
4900
4901 bmcr = 0;
4902 for (i = 0; i < 200; i++) {
4903 tg3_readphy(tp, MII_BMCR, &bmcr);
4904 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4905 continue;
4906 if (bmcr && bmcr != 0x7fff)
4907 break;
4908 udelay(10);
4909 }
4910
4911 lcl_adv = 0;
4912 rmt_adv = 0;
4913
4914 tp->link_config.active_speed = current_speed;
4915 tp->link_config.active_duplex = current_duplex;
4916
4917 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4918 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4919
4920 if ((bmcr & BMCR_ANENABLE) &&
4921 eee_config_ok &&
4922 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4923 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4924 current_link_up = true;
4925
4926 /* EEE settings changes take effect only after a phy
4927 * reset. If we have skipped a reset due to Link Flap
4928 * Avoidance being enabled, do it now.
4929 */
4930 if (!eee_config_ok &&
4931 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4932 !force_reset) {
4933 tg3_setup_eee(tp);
4934 tg3_phy_reset(tp);
4935 }
4936 } else {
4937 if (!(bmcr & BMCR_ANENABLE) &&
4938 tp->link_config.speed == current_speed &&
4939 tp->link_config.duplex == current_duplex) {
4940 current_link_up = true;
4941 }
4942 }
4943
4944 if (current_link_up &&
4945 tp->link_config.active_duplex == DUPLEX_FULL) {
4946 u32 reg, bit;
4947
4948 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4949 reg = MII_TG3_FET_GEN_STAT;
4950 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4951 } else {
4952 reg = MII_TG3_EXT_STAT;
4953 bit = MII_TG3_EXT_STAT_MDIX;
4954 }
4955
4956 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4957 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4958
4959 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4960 }
4961 }
4962
4963 relink:
4964 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4965 tg3_phy_copper_begin(tp);
4966
4967 if (tg3_flag(tp, ROBOSWITCH)) {
4968 current_link_up = true;
4969 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4970 current_speed = SPEED_1000;
4971 current_duplex = DUPLEX_FULL;
4972 tp->link_config.active_speed = current_speed;
4973 tp->link_config.active_duplex = current_duplex;
4974 }
4975
4976 tg3_readphy(tp, MII_BMSR, &bmsr);
4977 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4978 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4979 current_link_up = true;
4980 }
4981
4982 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4983 if (current_link_up) {
4984 if (tp->link_config.active_speed == SPEED_100 ||
4985 tp->link_config.active_speed == SPEED_10)
4986 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4987 else
4988 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4989 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4991 else
4992 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4993
4994 /* In order for the 5750 core in BCM4785 chip to work properly
4995 * in RGMII mode, the Led Control Register must be set up.
4996 */
4997 if (tg3_flag(tp, RGMII_MODE)) {
4998 u32 led_ctrl = tr32(MAC_LED_CTRL);
4999 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5000
5001 if (tp->link_config.active_speed == SPEED_10)
5002 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5003 else if (tp->link_config.active_speed == SPEED_100)
5004 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005 LED_CTRL_100MBPS_ON);
5006 else if (tp->link_config.active_speed == SPEED_1000)
5007 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008 LED_CTRL_1000MBPS_ON);
5009
5010 tw32(MAC_LED_CTRL, led_ctrl);
5011 udelay(40);
5012 }
5013
5014 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5015 if (tp->link_config.active_duplex == DUPLEX_HALF)
5016 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5017
5018 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5019 if (current_link_up &&
5020 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5021 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5022 else
5023 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5024 }
5025
5026 /* ??? Without this setting Netgear GA302T PHY does not
5027 * ??? send/receive packets...
5028 */
5029 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5030 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5031 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5032 tw32_f(MAC_MI_MODE, tp->mi_mode);
5033 udelay(80);
5034 }
5035
5036 tw32_f(MAC_MODE, tp->mac_mode);
5037 udelay(40);
5038
5039 tg3_phy_eee_adjust(tp, current_link_up);
5040
5041 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5042 /* Polled via timer. */
5043 tw32_f(MAC_EVENT, 0);
5044 } else {
5045 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5046 }
5047 udelay(40);
5048
5049 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5050 current_link_up &&
5051 tp->link_config.active_speed == SPEED_1000 &&
5052 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5053 udelay(120);
5054 tw32_f(MAC_STATUS,
5055 (MAC_STATUS_SYNC_CHANGED |
5056 MAC_STATUS_CFG_CHANGED));
5057 udelay(40);
5058 tg3_write_mem(tp,
5059 NIC_SRAM_FIRMWARE_MBOX,
5060 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5061 }
5062
5063 /* Prevent send BD corruption. */
5064 if (tg3_flag(tp, CLKREQ_BUG)) {
5065 if (tp->link_config.active_speed == SPEED_100 ||
5066 tp->link_config.active_speed == SPEED_10)
5067 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5068 PCI_EXP_LNKCTL_CLKREQ_EN);
5069 else
5070 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5071 PCI_EXP_LNKCTL_CLKREQ_EN);
5072 }
5073
5074 tg3_test_and_report_link_chg(tp, current_link_up);
5075
5076 return 0;
5077 }
5078
5079 struct tg3_fiber_aneginfo {
5080 int state;
5081 #define ANEG_STATE_UNKNOWN 0
5082 #define ANEG_STATE_AN_ENABLE 1
5083 #define ANEG_STATE_RESTART_INIT 2
5084 #define ANEG_STATE_RESTART 3
5085 #define ANEG_STATE_DISABLE_LINK_OK 4
5086 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5087 #define ANEG_STATE_ABILITY_DETECT 6
5088 #define ANEG_STATE_ACK_DETECT_INIT 7
5089 #define ANEG_STATE_ACK_DETECT 8
5090 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5091 #define ANEG_STATE_COMPLETE_ACK 10
5092 #define ANEG_STATE_IDLE_DETECT_INIT 11
5093 #define ANEG_STATE_IDLE_DETECT 12
5094 #define ANEG_STATE_LINK_OK 13
5095 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5096 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5097
5098 u32 flags;
5099 #define MR_AN_ENABLE 0x00000001
5100 #define MR_RESTART_AN 0x00000002
5101 #define MR_AN_COMPLETE 0x00000004
5102 #define MR_PAGE_RX 0x00000008
5103 #define MR_NP_LOADED 0x00000010
5104 #define MR_TOGGLE_TX 0x00000020
5105 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5106 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5107 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5108 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5109 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5110 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5111 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5112 #define MR_TOGGLE_RX 0x00002000
5113 #define MR_NP_RX 0x00004000
5114
5115 #define MR_LINK_OK 0x80000000
5116
5117 unsigned long link_time, cur_time;
5118
5119 u32 ability_match_cfg;
5120 int ability_match_count;
5121
5122 char ability_match, idle_match, ack_match;
5123
5124 u32 txconfig, rxconfig;
5125 #define ANEG_CFG_NP 0x00000080
5126 #define ANEG_CFG_ACK 0x00000040
5127 #define ANEG_CFG_RF2 0x00000020
5128 #define ANEG_CFG_RF1 0x00000010
5129 #define ANEG_CFG_PS2 0x00000001
5130 #define ANEG_CFG_PS1 0x00008000
5131 #define ANEG_CFG_HD 0x00004000
5132 #define ANEG_CFG_FD 0x00002000
5133 #define ANEG_CFG_INVAL 0x00001f06
5134
5135 };
5136 #define ANEG_OK 0
5137 #define ANEG_DONE 1
5138 #define ANEG_TIMER_ENAB 2
5139 #define ANEG_FAILED -1
5140
5141 #define ANEG_STATE_SETTLE_TIME 10000
5142
5143 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5144 struct tg3_fiber_aneginfo *ap)
5145 {
5146 u16 flowctrl;
5147 unsigned long delta;
5148 u32 rx_cfg_reg;
5149 int ret;
5150
5151 if (ap->state == ANEG_STATE_UNKNOWN) {
5152 ap->rxconfig = 0;
5153 ap->link_time = 0;
5154 ap->cur_time = 0;
5155 ap->ability_match_cfg = 0;
5156 ap->ability_match_count = 0;
5157 ap->ability_match = 0;
5158 ap->idle_match = 0;
5159 ap->ack_match = 0;
5160 }
5161 ap->cur_time++;
5162
5163 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5164 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5165
5166 if (rx_cfg_reg != ap->ability_match_cfg) {
5167 ap->ability_match_cfg = rx_cfg_reg;
5168 ap->ability_match = 0;
5169 ap->ability_match_count = 0;
5170 } else {
5171 if (++ap->ability_match_count > 1) {
5172 ap->ability_match = 1;
5173 ap->ability_match_cfg = rx_cfg_reg;
5174 }
5175 }
5176 if (rx_cfg_reg & ANEG_CFG_ACK)
5177 ap->ack_match = 1;
5178 else
5179 ap->ack_match = 0;
5180
5181 ap->idle_match = 0;
5182 } else {
5183 ap->idle_match = 1;
5184 ap->ability_match_cfg = 0;
5185 ap->ability_match_count = 0;
5186 ap->ability_match = 0;
5187 ap->ack_match = 0;
5188
5189 rx_cfg_reg = 0;
5190 }
5191
5192 ap->rxconfig = rx_cfg_reg;
5193 ret = ANEG_OK;
5194
5195 switch (ap->state) {
5196 case ANEG_STATE_UNKNOWN:
5197 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5198 ap->state = ANEG_STATE_AN_ENABLE;
5199
5200 /* fallthru */
5201 case ANEG_STATE_AN_ENABLE:
5202 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5203 if (ap->flags & MR_AN_ENABLE) {
5204 ap->link_time = 0;
5205 ap->cur_time = 0;
5206 ap->ability_match_cfg = 0;
5207 ap->ability_match_count = 0;
5208 ap->ability_match = 0;
5209 ap->idle_match = 0;
5210 ap->ack_match = 0;
5211
5212 ap->state = ANEG_STATE_RESTART_INIT;
5213 } else {
5214 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5215 }
5216 break;
5217
5218 case ANEG_STATE_RESTART_INIT:
5219 ap->link_time = ap->cur_time;
5220 ap->flags &= ~(MR_NP_LOADED);
5221 ap->txconfig = 0;
5222 tw32(MAC_TX_AUTO_NEG, 0);
5223 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5224 tw32_f(MAC_MODE, tp->mac_mode);
5225 udelay(40);
5226
5227 ret = ANEG_TIMER_ENAB;
5228 ap->state = ANEG_STATE_RESTART;
5229
5230 /* fallthru */
5231 case ANEG_STATE_RESTART:
5232 delta = ap->cur_time - ap->link_time;
5233 if (delta > ANEG_STATE_SETTLE_TIME)
5234 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5235 else
5236 ret = ANEG_TIMER_ENAB;
5237 break;
5238
5239 case ANEG_STATE_DISABLE_LINK_OK:
5240 ret = ANEG_DONE;
5241 break;
5242
5243 case ANEG_STATE_ABILITY_DETECT_INIT:
5244 ap->flags &= ~(MR_TOGGLE_TX);
5245 ap->txconfig = ANEG_CFG_FD;
5246 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247 if (flowctrl & ADVERTISE_1000XPAUSE)
5248 ap->txconfig |= ANEG_CFG_PS1;
5249 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5250 ap->txconfig |= ANEG_CFG_PS2;
5251 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5252 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5253 tw32_f(MAC_MODE, tp->mac_mode);
5254 udelay(40);
5255
5256 ap->state = ANEG_STATE_ABILITY_DETECT;
5257 break;
5258
5259 case ANEG_STATE_ABILITY_DETECT:
5260 if (ap->ability_match != 0 && ap->rxconfig != 0)
5261 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5262 break;
5263
5264 case ANEG_STATE_ACK_DETECT_INIT:
5265 ap->txconfig |= ANEG_CFG_ACK;
5266 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5267 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5268 tw32_f(MAC_MODE, tp->mac_mode);
5269 udelay(40);
5270
5271 ap->state = ANEG_STATE_ACK_DETECT;
5272
5273 /* fallthru */
5274 case ANEG_STATE_ACK_DETECT:
5275 if (ap->ack_match != 0) {
5276 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5277 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5278 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5279 } else {
5280 ap->state = ANEG_STATE_AN_ENABLE;
5281 }
5282 } else if (ap->ability_match != 0 &&
5283 ap->rxconfig == 0) {
5284 ap->state = ANEG_STATE_AN_ENABLE;
5285 }
5286 break;
5287
5288 case ANEG_STATE_COMPLETE_ACK_INIT:
5289 if (ap->rxconfig & ANEG_CFG_INVAL) {
5290 ret = ANEG_FAILED;
5291 break;
5292 }
5293 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5294 MR_LP_ADV_HALF_DUPLEX |
5295 MR_LP_ADV_SYM_PAUSE |
5296 MR_LP_ADV_ASYM_PAUSE |
5297 MR_LP_ADV_REMOTE_FAULT1 |
5298 MR_LP_ADV_REMOTE_FAULT2 |
5299 MR_LP_ADV_NEXT_PAGE |
5300 MR_TOGGLE_RX |
5301 MR_NP_RX);
5302 if (ap->rxconfig & ANEG_CFG_FD)
5303 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5304 if (ap->rxconfig & ANEG_CFG_HD)
5305 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5306 if (ap->rxconfig & ANEG_CFG_PS1)
5307 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5308 if (ap->rxconfig & ANEG_CFG_PS2)
5309 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5310 if (ap->rxconfig & ANEG_CFG_RF1)
5311 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5312 if (ap->rxconfig & ANEG_CFG_RF2)
5313 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5314 if (ap->rxconfig & ANEG_CFG_NP)
5315 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5316
5317 ap->link_time = ap->cur_time;
5318
5319 ap->flags ^= (MR_TOGGLE_TX);
5320 if (ap->rxconfig & 0x0008)
5321 ap->flags |= MR_TOGGLE_RX;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_NP_RX;
5324 ap->flags |= MR_PAGE_RX;
5325
5326 ap->state = ANEG_STATE_COMPLETE_ACK;
5327 ret = ANEG_TIMER_ENAB;
5328 break;
5329
5330 case ANEG_STATE_COMPLETE_ACK:
5331 if (ap->ability_match != 0 &&
5332 ap->rxconfig == 0) {
5333 ap->state = ANEG_STATE_AN_ENABLE;
5334 break;
5335 }
5336 delta = ap->cur_time - ap->link_time;
5337 if (delta > ANEG_STATE_SETTLE_TIME) {
5338 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5339 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5340 } else {
5341 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5342 !(ap->flags & MR_NP_RX)) {
5343 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5344 } else {
5345 ret = ANEG_FAILED;
5346 }
5347 }
5348 }
5349 break;
5350
5351 case ANEG_STATE_IDLE_DETECT_INIT:
5352 ap->link_time = ap->cur_time;
5353 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5354 tw32_f(MAC_MODE, tp->mac_mode);
5355 udelay(40);
5356
5357 ap->state = ANEG_STATE_IDLE_DETECT;
5358 ret = ANEG_TIMER_ENAB;
5359 break;
5360
5361 case ANEG_STATE_IDLE_DETECT:
5362 if (ap->ability_match != 0 &&
5363 ap->rxconfig == 0) {
5364 ap->state = ANEG_STATE_AN_ENABLE;
5365 break;
5366 }
5367 delta = ap->cur_time - ap->link_time;
5368 if (delta > ANEG_STATE_SETTLE_TIME) {
5369 /* XXX another gem from the Broadcom driver :( */
5370 ap->state = ANEG_STATE_LINK_OK;
5371 }
5372 break;
5373
5374 case ANEG_STATE_LINK_OK:
5375 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5376 ret = ANEG_DONE;
5377 break;
5378
5379 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5380 /* ??? unimplemented */
5381 break;
5382
5383 case ANEG_STATE_NEXT_PAGE_WAIT:
5384 /* ??? unimplemented */
5385 break;
5386
5387 default:
5388 ret = ANEG_FAILED;
5389 break;
5390 }
5391
5392 return ret;
5393 }
5394
5395 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5396 {
5397 int res = 0;
5398 struct tg3_fiber_aneginfo aninfo;
5399 int status = ANEG_FAILED;
5400 unsigned int tick;
5401 u32 tmp;
5402
5403 tw32_f(MAC_TX_AUTO_NEG, 0);
5404
5405 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5406 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5407 udelay(40);
5408
5409 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5410 udelay(40);
5411
5412 memset(&aninfo, 0, sizeof(aninfo));
5413 aninfo.flags |= MR_AN_ENABLE;
5414 aninfo.state = ANEG_STATE_UNKNOWN;
5415 aninfo.cur_time = 0;
5416 tick = 0;
5417 while (++tick < 195000) {
5418 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5419 if (status == ANEG_DONE || status == ANEG_FAILED)
5420 break;
5421
5422 udelay(1);
5423 }
5424
5425 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5426 tw32_f(MAC_MODE, tp->mac_mode);
5427 udelay(40);
5428
5429 *txflags = aninfo.txconfig;
5430 *rxflags = aninfo.flags;
5431
5432 if (status == ANEG_DONE &&
5433 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5434 MR_LP_ADV_FULL_DUPLEX)))
5435 res = 1;
5436
5437 return res;
5438 }
5439
5440 static void tg3_init_bcm8002(struct tg3 *tp)
5441 {
5442 u32 mac_status = tr32(MAC_STATUS);
5443 int i;
5444
5445 /* Reset when initting first time or we have a link. */
5446 if (tg3_flag(tp, INIT_COMPLETE) &&
5447 !(mac_status & MAC_STATUS_PCS_SYNCED))
5448 return;
5449
5450 /* Set PLL lock range. */
5451 tg3_writephy(tp, 0x16, 0x8007);
5452
5453 /* SW reset */
5454 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5455
5456 /* Wait for reset to complete. */
5457 /* XXX schedule_timeout() ... */
5458 for (i = 0; i < 500; i++)
5459 udelay(10);
5460
5461 /* Config mode; select PMA/Ch 1 regs. */
5462 tg3_writephy(tp, 0x10, 0x8411);
5463
5464 /* Enable auto-lock and comdet, select txclk for tx. */
5465 tg3_writephy(tp, 0x11, 0x0a10);
5466
5467 tg3_writephy(tp, 0x18, 0x00a0);
5468 tg3_writephy(tp, 0x16, 0x41ff);
5469
5470 /* Assert and deassert POR. */
5471 tg3_writephy(tp, 0x13, 0x0400);
5472 udelay(40);
5473 tg3_writephy(tp, 0x13, 0x0000);
5474
5475 tg3_writephy(tp, 0x11, 0x0a50);
5476 udelay(40);
5477 tg3_writephy(tp, 0x11, 0x0a10);
5478
5479 /* Wait for signal to stabilize */
5480 /* XXX schedule_timeout() ... */
5481 for (i = 0; i < 15000; i++)
5482 udelay(10);
5483
5484 /* Deselect the channel register so we can read the PHYID
5485 * later.
5486 */
5487 tg3_writephy(tp, 0x10, 0x8011);
5488 }
5489
5490 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5491 {
5492 u16 flowctrl;
5493 bool current_link_up;
5494 u32 sg_dig_ctrl, sg_dig_status;
5495 u32 serdes_cfg, expected_sg_dig_ctrl;
5496 int workaround, port_a;
5497
5498 serdes_cfg = 0;
5499 expected_sg_dig_ctrl = 0;
5500 workaround = 0;
5501 port_a = 1;
5502 current_link_up = false;
5503
5504 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5505 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5506 workaround = 1;
5507 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5508 port_a = 0;
5509
5510 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5511 /* preserve bits 20-23 for voltage regulator */
5512 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5513 }
5514
5515 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5516
5517 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5518 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5519 if (workaround) {
5520 u32 val = serdes_cfg;
5521
5522 if (port_a)
5523 val |= 0xc010000;
5524 else
5525 val |= 0x4010000;
5526 tw32_f(MAC_SERDES_CFG, val);
5527 }
5528
5529 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5530 }
5531 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5532 tg3_setup_flow_control(tp, 0, 0);
5533 current_link_up = true;
5534 }
5535 goto out;
5536 }
5537
5538 /* Want auto-negotiation. */
5539 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5540
5541 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5542 if (flowctrl & ADVERTISE_1000XPAUSE)
5543 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5544 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5545 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5546
5547 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5548 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5549 tp->serdes_counter &&
5550 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5551 MAC_STATUS_RCVD_CFG)) ==
5552 MAC_STATUS_PCS_SYNCED)) {
5553 tp->serdes_counter--;
5554 current_link_up = true;
5555 goto out;
5556 }
5557 restart_autoneg:
5558 if (workaround)
5559 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5560 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5561 udelay(5);
5562 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5563
5564 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5565 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5566 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5567 MAC_STATUS_SIGNAL_DET)) {
5568 sg_dig_status = tr32(SG_DIG_STATUS);
5569 mac_status = tr32(MAC_STATUS);
5570
5571 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5572 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5573 u32 local_adv = 0, remote_adv = 0;
5574
5575 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5576 local_adv |= ADVERTISE_1000XPAUSE;
5577 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5578 local_adv |= ADVERTISE_1000XPSE_ASYM;
5579
5580 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5581 remote_adv |= LPA_1000XPAUSE;
5582 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5583 remote_adv |= LPA_1000XPAUSE_ASYM;
5584
5585 tp->link_config.rmt_adv =
5586 mii_adv_to_ethtool_adv_x(remote_adv);
5587
5588 tg3_setup_flow_control(tp, local_adv, remote_adv);
5589 current_link_up = true;
5590 tp->serdes_counter = 0;
5591 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5592 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5593 if (tp->serdes_counter)
5594 tp->serdes_counter--;
5595 else {
5596 if (workaround) {
5597 u32 val = serdes_cfg;
5598
5599 if (port_a)
5600 val |= 0xc010000;
5601 else
5602 val |= 0x4010000;
5603
5604 tw32_f(MAC_SERDES_CFG, val);
5605 }
5606
5607 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5608 udelay(40);
5609
5610 /* Link parallel detection - link is up */
5611 /* only if we have PCS_SYNC and not */
5612 /* receiving config code words */
5613 mac_status = tr32(MAC_STATUS);
5614 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5615 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5616 tg3_setup_flow_control(tp, 0, 0);
5617 current_link_up = true;
5618 tp->phy_flags |=
5619 TG3_PHYFLG_PARALLEL_DETECT;
5620 tp->serdes_counter =
5621 SERDES_PARALLEL_DET_TIMEOUT;
5622 } else
5623 goto restart_autoneg;
5624 }
5625 }
5626 } else {
5627 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5628 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5629 }
5630
5631 out:
5632 return current_link_up;
5633 }
5634
5635 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5636 {
5637 bool current_link_up = false;
5638
5639 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5640 goto out;
5641
5642 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5643 u32 txflags, rxflags;
5644 int i;
5645
5646 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5647 u32 local_adv = 0, remote_adv = 0;
5648
5649 if (txflags & ANEG_CFG_PS1)
5650 local_adv |= ADVERTISE_1000XPAUSE;
5651 if (txflags & ANEG_CFG_PS2)
5652 local_adv |= ADVERTISE_1000XPSE_ASYM;
5653
5654 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5655 remote_adv |= LPA_1000XPAUSE;
5656 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5657 remote_adv |= LPA_1000XPAUSE_ASYM;
5658
5659 tp->link_config.rmt_adv =
5660 mii_adv_to_ethtool_adv_x(remote_adv);
5661
5662 tg3_setup_flow_control(tp, local_adv, remote_adv);
5663
5664 current_link_up = true;
5665 }
5666 for (i = 0; i < 30; i++) {
5667 udelay(20);
5668 tw32_f(MAC_STATUS,
5669 (MAC_STATUS_SYNC_CHANGED |
5670 MAC_STATUS_CFG_CHANGED));
5671 udelay(40);
5672 if ((tr32(MAC_STATUS) &
5673 (MAC_STATUS_SYNC_CHANGED |
5674 MAC_STATUS_CFG_CHANGED)) == 0)
5675 break;
5676 }
5677
5678 mac_status = tr32(MAC_STATUS);
5679 if (!current_link_up &&
5680 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5681 !(mac_status & MAC_STATUS_RCVD_CFG))
5682 current_link_up = true;
5683 } else {
5684 tg3_setup_flow_control(tp, 0, 0);
5685
5686 /* Forcing 1000FD link up. */
5687 current_link_up = true;
5688
5689 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5690 udelay(40);
5691
5692 tw32_f(MAC_MODE, tp->mac_mode);
5693 udelay(40);
5694 }
5695
5696 out:
5697 return current_link_up;
5698 }
5699
5700 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5701 {
5702 u32 orig_pause_cfg;
5703 u16 orig_active_speed;
5704 u8 orig_active_duplex;
5705 u32 mac_status;
5706 bool current_link_up;
5707 int i;
5708
5709 orig_pause_cfg = tp->link_config.active_flowctrl;
5710 orig_active_speed = tp->link_config.active_speed;
5711 orig_active_duplex = tp->link_config.active_duplex;
5712
5713 if (!tg3_flag(tp, HW_AUTONEG) &&
5714 tp->link_up &&
5715 tg3_flag(tp, INIT_COMPLETE)) {
5716 mac_status = tr32(MAC_STATUS);
5717 mac_status &= (MAC_STATUS_PCS_SYNCED |
5718 MAC_STATUS_SIGNAL_DET |
5719 MAC_STATUS_CFG_CHANGED |
5720 MAC_STATUS_RCVD_CFG);
5721 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5722 MAC_STATUS_SIGNAL_DET)) {
5723 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5724 MAC_STATUS_CFG_CHANGED));
5725 return 0;
5726 }
5727 }
5728
5729 tw32_f(MAC_TX_AUTO_NEG, 0);
5730
5731 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5732 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5733 tw32_f(MAC_MODE, tp->mac_mode);
5734 udelay(40);
5735
5736 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5737 tg3_init_bcm8002(tp);
5738
5739 /* Enable link change event even when serdes polling. */
5740 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5741 udelay(40);
5742
5743 current_link_up = false;
5744 tp->link_config.rmt_adv = 0;
5745 mac_status = tr32(MAC_STATUS);
5746
5747 if (tg3_flag(tp, HW_AUTONEG))
5748 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5749 else
5750 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5751
5752 tp->napi[0].hw_status->status =
5753 (SD_STATUS_UPDATED |
5754 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5755
5756 for (i = 0; i < 100; i++) {
5757 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5758 MAC_STATUS_CFG_CHANGED));
5759 udelay(5);
5760 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5761 MAC_STATUS_CFG_CHANGED |
5762 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5763 break;
5764 }
5765
5766 mac_status = tr32(MAC_STATUS);
5767 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5768 current_link_up = false;
5769 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5770 tp->serdes_counter == 0) {
5771 tw32_f(MAC_MODE, (tp->mac_mode |
5772 MAC_MODE_SEND_CONFIGS));
5773 udelay(1);
5774 tw32_f(MAC_MODE, tp->mac_mode);
5775 }
5776 }
5777
5778 if (current_link_up) {
5779 tp->link_config.active_speed = SPEED_1000;
5780 tp->link_config.active_duplex = DUPLEX_FULL;
5781 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5782 LED_CTRL_LNKLED_OVERRIDE |
5783 LED_CTRL_1000MBPS_ON));
5784 } else {
5785 tp->link_config.active_speed = SPEED_UNKNOWN;
5786 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_TRAFFIC_OVERRIDE));
5790 }
5791
5792 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5793 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5794 if (orig_pause_cfg != now_pause_cfg ||
5795 orig_active_speed != tp->link_config.active_speed ||
5796 orig_active_duplex != tp->link_config.active_duplex)
5797 tg3_link_report(tp);
5798 }
5799
5800 return 0;
5801 }
5802
5803 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5804 {
5805 int err = 0;
5806 u32 bmsr, bmcr;
5807 u16 current_speed = SPEED_UNKNOWN;
5808 u8 current_duplex = DUPLEX_UNKNOWN;
5809 bool current_link_up = false;
5810 u32 local_adv, remote_adv, sgsr;
5811
5812 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5813 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5814 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5815 (sgsr & SERDES_TG3_SGMII_MODE)) {
5816
5817 if (force_reset)
5818 tg3_phy_reset(tp);
5819
5820 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5821
5822 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5823 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824 } else {
5825 current_link_up = true;
5826 if (sgsr & SERDES_TG3_SPEED_1000) {
5827 current_speed = SPEED_1000;
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 } else if (sgsr & SERDES_TG3_SPEED_100) {
5830 current_speed = SPEED_100;
5831 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5832 } else {
5833 current_speed = SPEED_10;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5835 }
5836
5837 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5838 current_duplex = DUPLEX_FULL;
5839 else
5840 current_duplex = DUPLEX_HALF;
5841 }
5842
5843 tw32_f(MAC_MODE, tp->mac_mode);
5844 udelay(40);
5845
5846 tg3_clear_mac_status(tp);
5847
5848 goto fiber_setup_done;
5849 }
5850
5851 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5852 tw32_f(MAC_MODE, tp->mac_mode);
5853 udelay(40);
5854
5855 tg3_clear_mac_status(tp);
5856
5857 if (force_reset)
5858 tg3_phy_reset(tp);
5859
5860 tp->link_config.rmt_adv = 0;
5861
5862 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5863 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5864 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5865 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5866 bmsr |= BMSR_LSTATUS;
5867 else
5868 bmsr &= ~BMSR_LSTATUS;
5869 }
5870
5871 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5872
5873 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5874 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5875 /* do nothing, just check for link up at the end */
5876 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5877 u32 adv, newadv;
5878
5879 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5880 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5881 ADVERTISE_1000XPAUSE |
5882 ADVERTISE_1000XPSE_ASYM |
5883 ADVERTISE_SLCT);
5884
5885 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5886 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5887
5888 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5889 tg3_writephy(tp, MII_ADVERTISE, newadv);
5890 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5891 tg3_writephy(tp, MII_BMCR, bmcr);
5892
5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5895 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5896
5897 return err;
5898 }
5899 } else {
5900 u32 new_bmcr;
5901
5902 bmcr &= ~BMCR_SPEED1000;
5903 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5904
5905 if (tp->link_config.duplex == DUPLEX_FULL)
5906 new_bmcr |= BMCR_FULLDPLX;
5907
5908 if (new_bmcr != bmcr) {
5909 /* BMCR_SPEED1000 is a reserved bit that needs
5910 * to be set on write.
5911 */
5912 new_bmcr |= BMCR_SPEED1000;
5913
5914 /* Force a linkdown */
5915 if (tp->link_up) {
5916 u32 adv;
5917
5918 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5919 adv &= ~(ADVERTISE_1000XFULL |
5920 ADVERTISE_1000XHALF |
5921 ADVERTISE_SLCT);
5922 tg3_writephy(tp, MII_ADVERTISE, adv);
5923 tg3_writephy(tp, MII_BMCR, bmcr |
5924 BMCR_ANRESTART |
5925 BMCR_ANENABLE);
5926 udelay(10);
5927 tg3_carrier_off(tp);
5928 }
5929 tg3_writephy(tp, MII_BMCR, new_bmcr);
5930 bmcr = new_bmcr;
5931 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5932 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5933 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5935 bmsr |= BMSR_LSTATUS;
5936 else
5937 bmsr &= ~BMSR_LSTATUS;
5938 }
5939 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5940 }
5941 }
5942
5943 if (bmsr & BMSR_LSTATUS) {
5944 current_speed = SPEED_1000;
5945 current_link_up = true;
5946 if (bmcr & BMCR_FULLDPLX)
5947 current_duplex = DUPLEX_FULL;
5948 else
5949 current_duplex = DUPLEX_HALF;
5950
5951 local_adv = 0;
5952 remote_adv = 0;
5953
5954 if (bmcr & BMCR_ANENABLE) {
5955 u32 common;
5956
5957 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5958 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5959 common = local_adv & remote_adv;
5960 if (common & (ADVERTISE_1000XHALF |
5961 ADVERTISE_1000XFULL)) {
5962 if (common & ADVERTISE_1000XFULL)
5963 current_duplex = DUPLEX_FULL;
5964 else
5965 current_duplex = DUPLEX_HALF;
5966
5967 tp->link_config.rmt_adv =
5968 mii_adv_to_ethtool_adv_x(remote_adv);
5969 } else if (!tg3_flag(tp, 5780_CLASS)) {
5970 /* Link is up via parallel detect */
5971 } else {
5972 current_link_up = false;
5973 }
5974 }
5975 }
5976
5977 fiber_setup_done:
5978 if (current_link_up && current_duplex == DUPLEX_FULL)
5979 tg3_setup_flow_control(tp, local_adv, remote_adv);
5980
5981 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5982 if (tp->link_config.active_duplex == DUPLEX_HALF)
5983 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5984
5985 tw32_f(MAC_MODE, tp->mac_mode);
5986 udelay(40);
5987
5988 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5989
5990 tp->link_config.active_speed = current_speed;
5991 tp->link_config.active_duplex = current_duplex;
5992
5993 tg3_test_and_report_link_chg(tp, current_link_up);
5994 return err;
5995 }
5996
5997 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5998 {
5999 if (tp->serdes_counter) {
6000 /* Give autoneg time to complete. */
6001 tp->serdes_counter--;
6002 return;
6003 }
6004
6005 if (!tp->link_up &&
6006 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6007 u32 bmcr;
6008
6009 tg3_readphy(tp, MII_BMCR, &bmcr);
6010 if (bmcr & BMCR_ANENABLE) {
6011 u32 phy1, phy2;
6012
6013 /* Select shadow register 0x1f */
6014 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6015 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6016
6017 /* Select expansion interrupt status register */
6018 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6019 MII_TG3_DSP_EXP1_INT_STAT);
6020 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6021 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6022
6023 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6024 /* We have signal detect and not receiving
6025 * config code words, link is up by parallel
6026 * detection.
6027 */
6028
6029 bmcr &= ~BMCR_ANENABLE;
6030 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6031 tg3_writephy(tp, MII_BMCR, bmcr);
6032 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6033 }
6034 }
6035 } else if (tp->link_up &&
6036 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6037 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6038 u32 phy2;
6039
6040 /* Select expansion interrupt status register */
6041 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6042 MII_TG3_DSP_EXP1_INT_STAT);
6043 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6044 if (phy2 & 0x20) {
6045 u32 bmcr;
6046
6047 /* Config code words received, turn on autoneg. */
6048 tg3_readphy(tp, MII_BMCR, &bmcr);
6049 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6050
6051 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6052
6053 }
6054 }
6055 }
6056
6057 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6058 {
6059 u32 val;
6060 int err;
6061
6062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6063 err = tg3_setup_fiber_phy(tp, force_reset);
6064 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6065 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6066 else
6067 err = tg3_setup_copper_phy(tp, force_reset);
6068
6069 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6070 u32 scale;
6071
6072 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6073 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6074 scale = 65;
6075 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6076 scale = 6;
6077 else
6078 scale = 12;
6079
6080 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6081 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6082 tw32(GRC_MISC_CFG, val);
6083 }
6084
6085 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6086 (6 << TX_LENGTHS_IPG_SHIFT);
6087 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6088 tg3_asic_rev(tp) == ASIC_REV_5762)
6089 val |= tr32(MAC_TX_LENGTHS) &
6090 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6091 TX_LENGTHS_CNT_DWN_VAL_MSK);
6092
6093 if (tp->link_config.active_speed == SPEED_1000 &&
6094 tp->link_config.active_duplex == DUPLEX_HALF)
6095 tw32(MAC_TX_LENGTHS, val |
6096 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6097 else
6098 tw32(MAC_TX_LENGTHS, val |
6099 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6100
6101 if (!tg3_flag(tp, 5705_PLUS)) {
6102 if (tp->link_up) {
6103 tw32(HOSTCC_STAT_COAL_TICKS,
6104 tp->coal.stats_block_coalesce_usecs);
6105 } else {
6106 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6107 }
6108 }
6109
6110 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6111 val = tr32(PCIE_PWR_MGMT_THRESH);
6112 if (!tp->link_up)
6113 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6114 tp->pwrmgmt_thresh;
6115 else
6116 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6117 tw32(PCIE_PWR_MGMT_THRESH, val);
6118 }
6119
6120 return err;
6121 }
6122
6123 /* tp->lock must be held */
6124 static u64 tg3_refclk_read(struct tg3 *tp)
6125 {
6126 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6127 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6128 }
6129
6130 /* tp->lock must be held */
6131 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6132 {
6133 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6134
6135 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6136 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6137 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6138 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6139 }
6140
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6142 static inline void tg3_full_unlock(struct tg3 *tp);
6143 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6144 {
6145 struct tg3 *tp = netdev_priv(dev);
6146
6147 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6148 SOF_TIMESTAMPING_RX_SOFTWARE |
6149 SOF_TIMESTAMPING_SOFTWARE;
6150
6151 if (tg3_flag(tp, PTP_CAPABLE)) {
6152 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6153 SOF_TIMESTAMPING_RX_HARDWARE |
6154 SOF_TIMESTAMPING_RAW_HARDWARE;
6155 }
6156
6157 if (tp->ptp_clock)
6158 info->phc_index = ptp_clock_index(tp->ptp_clock);
6159 else
6160 info->phc_index = -1;
6161
6162 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6163
6164 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6165 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6166 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6167 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6168 return 0;
6169 }
6170
6171 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6172 {
6173 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6174 bool neg_adj = false;
6175 u32 correction = 0;
6176
6177 if (ppb < 0) {
6178 neg_adj = true;
6179 ppb = -ppb;
6180 }
6181
6182 /* Frequency adjustment is performed using hardware with a 24 bit
6183 * accumulator and a programmable correction value. On each clk, the
6184 * correction value gets added to the accumulator and when it
6185 * overflows, the time counter is incremented/decremented.
6186 *
6187 * So conversion from ppb to correction value is
6188 * ppb * (1 << 24) / 1000000000
6189 */
6190 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6191 TG3_EAV_REF_CLK_CORRECT_MASK;
6192
6193 tg3_full_lock(tp, 0);
6194
6195 if (correction)
6196 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6197 TG3_EAV_REF_CLK_CORRECT_EN |
6198 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6199 else
6200 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6201
6202 tg3_full_unlock(tp);
6203
6204 return 0;
6205 }
6206
6207 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6208 {
6209 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210
6211 tg3_full_lock(tp, 0);
6212 tp->ptp_adjust += delta;
6213 tg3_full_unlock(tp);
6214
6215 return 0;
6216 }
6217
6218 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6219 {
6220 u64 ns;
6221 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222
6223 tg3_full_lock(tp, 0);
6224 ns = tg3_refclk_read(tp);
6225 ns += tp->ptp_adjust;
6226 tg3_full_unlock(tp);
6227
6228 *ts = ns_to_timespec64(ns);
6229
6230 return 0;
6231 }
6232
6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6234 const struct timespec64 *ts)
6235 {
6236 u64 ns;
6237 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6238
6239 ns = timespec64_to_ns(ts);
6240
6241 tg3_full_lock(tp, 0);
6242 tg3_refclk_write(tp, ns);
6243 tp->ptp_adjust = 0;
6244 tg3_full_unlock(tp);
6245
6246 return 0;
6247 }
6248
6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6250 struct ptp_clock_request *rq, int on)
6251 {
6252 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6253 u32 clock_ctl;
6254 int rval = 0;
6255
6256 switch (rq->type) {
6257 case PTP_CLK_REQ_PEROUT:
6258 if (rq->perout.index != 0)
6259 return -EINVAL;
6260
6261 tg3_full_lock(tp, 0);
6262 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6263 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6264
6265 if (on) {
6266 u64 nsec;
6267
6268 nsec = rq->perout.start.sec * 1000000000ULL +
6269 rq->perout.start.nsec;
6270
6271 if (rq->perout.period.sec || rq->perout.period.nsec) {
6272 netdev_warn(tp->dev,
6273 "Device supports only a one-shot timesync output, period must be 0\n");
6274 rval = -EINVAL;
6275 goto err_out;
6276 }
6277
6278 if (nsec & (1ULL << 63)) {
6279 netdev_warn(tp->dev,
6280 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6281 rval = -EINVAL;
6282 goto err_out;
6283 }
6284
6285 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6286 tw32(TG3_EAV_WATCHDOG0_MSB,
6287 TG3_EAV_WATCHDOG0_EN |
6288 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6289
6290 tw32(TG3_EAV_REF_CLCK_CTL,
6291 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6292 } else {
6293 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6294 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6295 }
6296
6297 err_out:
6298 tg3_full_unlock(tp);
6299 return rval;
6300
6301 default:
6302 break;
6303 }
6304
6305 return -EOPNOTSUPP;
6306 }
6307
6308 static const struct ptp_clock_info tg3_ptp_caps = {
6309 .owner = THIS_MODULE,
6310 .name = "tg3 clock",
6311 .max_adj = 250000000,
6312 .n_alarm = 0,
6313 .n_ext_ts = 0,
6314 .n_per_out = 1,
6315 .n_pins = 0,
6316 .pps = 0,
6317 .adjfreq = tg3_ptp_adjfreq,
6318 .adjtime = tg3_ptp_adjtime,
6319 .gettime64 = tg3_ptp_gettime,
6320 .settime64 = tg3_ptp_settime,
6321 .enable = tg3_ptp_enable,
6322 };
6323
6324 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6325 struct skb_shared_hwtstamps *timestamp)
6326 {
6327 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6328 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6329 tp->ptp_adjust);
6330 }
6331
6332 /* tp->lock must be held */
6333 static void tg3_ptp_init(struct tg3 *tp)
6334 {
6335 if (!tg3_flag(tp, PTP_CAPABLE))
6336 return;
6337
6338 /* Initialize the hardware clock to the system time. */
6339 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6340 tp->ptp_adjust = 0;
6341 tp->ptp_info = tg3_ptp_caps;
6342 }
6343
6344 /* tp->lock must be held */
6345 static void tg3_ptp_resume(struct tg3 *tp)
6346 {
6347 if (!tg3_flag(tp, PTP_CAPABLE))
6348 return;
6349
6350 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6351 tp->ptp_adjust = 0;
6352 }
6353
6354 static void tg3_ptp_fini(struct tg3 *tp)
6355 {
6356 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6357 return;
6358
6359 ptp_clock_unregister(tp->ptp_clock);
6360 tp->ptp_clock = NULL;
6361 tp->ptp_adjust = 0;
6362 }
6363
6364 static inline int tg3_irq_sync(struct tg3 *tp)
6365 {
6366 return tp->irq_sync;
6367 }
6368
6369 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6370 {
6371 int i;
6372
6373 dst = (u32 *)((u8 *)dst + off);
6374 for (i = 0; i < len; i += sizeof(u32))
6375 *dst++ = tr32(off + i);
6376 }
6377
6378 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6379 {
6380 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6381 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6382 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6383 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6384 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6385 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6386 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6387 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6388 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6389 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6390 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6391 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6392 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6393 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6394 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6395 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6396 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6397 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6398 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6399
6400 if (tg3_flag(tp, SUPPORT_MSIX))
6401 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6402
6403 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6404 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6405 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6408 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6409 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6410 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6411
6412 if (!tg3_flag(tp, 5705_PLUS)) {
6413 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6414 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6415 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6416 }
6417
6418 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6419 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6420 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6421 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6422 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6423
6424 if (tg3_flag(tp, NVRAM))
6425 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6426 }
6427
6428 static void tg3_dump_state(struct tg3 *tp)
6429 {
6430 int i;
6431 u32 *regs;
6432
6433 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6434 if (!regs)
6435 return;
6436
6437 if (tg3_flag(tp, PCI_EXPRESS)) {
6438 /* Read up to but not including private PCI registers */
6439 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6440 regs[i / sizeof(u32)] = tr32(i);
6441 } else
6442 tg3_dump_legacy_regs(tp, regs);
6443
6444 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6445 if (!regs[i + 0] && !regs[i + 1] &&
6446 !regs[i + 2] && !regs[i + 3])
6447 continue;
6448
6449 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6450 i * 4,
6451 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6452 }
6453
6454 kfree(regs);
6455
6456 for (i = 0; i < tp->irq_cnt; i++) {
6457 struct tg3_napi *tnapi = &tp->napi[i];
6458
6459 /* SW status block */
6460 netdev_err(tp->dev,
6461 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6462 i,
6463 tnapi->hw_status->status,
6464 tnapi->hw_status->status_tag,
6465 tnapi->hw_status->rx_jumbo_consumer,
6466 tnapi->hw_status->rx_consumer,
6467 tnapi->hw_status->rx_mini_consumer,
6468 tnapi->hw_status->idx[0].rx_producer,
6469 tnapi->hw_status->idx[0].tx_consumer);
6470
6471 netdev_err(tp->dev,
6472 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6473 i,
6474 tnapi->last_tag, tnapi->last_irq_tag,
6475 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6476 tnapi->rx_rcb_ptr,
6477 tnapi->prodring.rx_std_prod_idx,
6478 tnapi->prodring.rx_std_cons_idx,
6479 tnapi->prodring.rx_jmb_prod_idx,
6480 tnapi->prodring.rx_jmb_cons_idx);
6481 }
6482 }
6483
6484 /* This is called whenever we suspect that the system chipset is re-
6485 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6486 * is bogus tx completions. We try to recover by setting the
6487 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6488 * in the workqueue.
6489 */
6490 static void tg3_tx_recover(struct tg3 *tp)
6491 {
6492 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6493 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6494
6495 netdev_warn(tp->dev,
6496 "The system may be re-ordering memory-mapped I/O "
6497 "cycles to the network device, attempting to recover. "
6498 "Please report the problem to the driver maintainer "
6499 "and include system chipset information.\n");
6500
6501 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6502 }
6503
6504 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6505 {
6506 /* Tell compiler to fetch tx indices from memory. */
6507 barrier();
6508 return tnapi->tx_pending -
6509 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6510 }
6511
6512 /* Tigon3 never reports partial packet sends. So we do not
6513 * need special logic to handle SKBs that have not had all
6514 * of their frags sent yet, like SunGEM does.
6515 */
6516 static void tg3_tx(struct tg3_napi *tnapi)
6517 {
6518 struct tg3 *tp = tnapi->tp;
6519 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6520 u32 sw_idx = tnapi->tx_cons;
6521 struct netdev_queue *txq;
6522 int index = tnapi - tp->napi;
6523 unsigned int pkts_compl = 0, bytes_compl = 0;
6524
6525 if (tg3_flag(tp, ENABLE_TSS))
6526 index--;
6527
6528 txq = netdev_get_tx_queue(tp->dev, index);
6529
6530 while (sw_idx != hw_idx) {
6531 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6532 struct sk_buff *skb = ri->skb;
6533 int i, tx_bug = 0;
6534
6535 if (unlikely(skb == NULL)) {
6536 tg3_tx_recover(tp);
6537 return;
6538 }
6539
6540 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6541 struct skb_shared_hwtstamps timestamp;
6542 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6543 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6544
6545 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6546
6547 skb_tstamp_tx(skb, &timestamp);
6548 }
6549
6550 pci_unmap_single(tp->pdev,
6551 dma_unmap_addr(ri, mapping),
6552 skb_headlen(skb),
6553 PCI_DMA_TODEVICE);
6554
6555 ri->skb = NULL;
6556
6557 while (ri->fragmented) {
6558 ri->fragmented = false;
6559 sw_idx = NEXT_TX(sw_idx);
6560 ri = &tnapi->tx_buffers[sw_idx];
6561 }
6562
6563 sw_idx = NEXT_TX(sw_idx);
6564
6565 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6566 ri = &tnapi->tx_buffers[sw_idx];
6567 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6568 tx_bug = 1;
6569
6570 pci_unmap_page(tp->pdev,
6571 dma_unmap_addr(ri, mapping),
6572 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6573 PCI_DMA_TODEVICE);
6574
6575 while (ri->fragmented) {
6576 ri->fragmented = false;
6577 sw_idx = NEXT_TX(sw_idx);
6578 ri = &tnapi->tx_buffers[sw_idx];
6579 }
6580
6581 sw_idx = NEXT_TX(sw_idx);
6582 }
6583
6584 pkts_compl++;
6585 bytes_compl += skb->len;
6586
6587 dev_kfree_skb_any(skb);
6588
6589 if (unlikely(tx_bug)) {
6590 tg3_tx_recover(tp);
6591 return;
6592 }
6593 }
6594
6595 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6596
6597 tnapi->tx_cons = sw_idx;
6598
6599 /* Need to make the tx_cons update visible to tg3_start_xmit()
6600 * before checking for netif_queue_stopped(). Without the
6601 * memory barrier, there is a small possibility that tg3_start_xmit()
6602 * will miss it and cause the queue to be stopped forever.
6603 */
6604 smp_mb();
6605
6606 if (unlikely(netif_tx_queue_stopped(txq) &&
6607 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6608 __netif_tx_lock(txq, smp_processor_id());
6609 if (netif_tx_queue_stopped(txq) &&
6610 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6611 netif_tx_wake_queue(txq);
6612 __netif_tx_unlock(txq);
6613 }
6614 }
6615
6616 static void tg3_frag_free(bool is_frag, void *data)
6617 {
6618 if (is_frag)
6619 skb_free_frag(data);
6620 else
6621 kfree(data);
6622 }
6623
6624 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6625 {
6626 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6628
6629 if (!ri->data)
6630 return;
6631
6632 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6633 map_sz, PCI_DMA_FROMDEVICE);
6634 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6635 ri->data = NULL;
6636 }
6637
6638
6639 /* Returns size of skb allocated or < 0 on error.
6640 *
6641 * We only need to fill in the address because the other members
6642 * of the RX descriptor are invariant, see tg3_init_rings.
6643 *
6644 * Note the purposeful assymetry of cpu vs. chip accesses. For
6645 * posting buffers we only dirty the first cache line of the RX
6646 * descriptor (containing the address). Whereas for the RX status
6647 * buffers the cpu only reads the last cacheline of the RX descriptor
6648 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6649 */
6650 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6651 u32 opaque_key, u32 dest_idx_unmasked,
6652 unsigned int *frag_size)
6653 {
6654 struct tg3_rx_buffer_desc *desc;
6655 struct ring_info *map;
6656 u8 *data;
6657 dma_addr_t mapping;
6658 int skb_size, data_size, dest_idx;
6659
6660 switch (opaque_key) {
6661 case RXD_OPAQUE_RING_STD:
6662 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6663 desc = &tpr->rx_std[dest_idx];
6664 map = &tpr->rx_std_buffers[dest_idx];
6665 data_size = tp->rx_pkt_map_sz;
6666 break;
6667
6668 case RXD_OPAQUE_RING_JUMBO:
6669 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6670 desc = &tpr->rx_jmb[dest_idx].std;
6671 map = &tpr->rx_jmb_buffers[dest_idx];
6672 data_size = TG3_RX_JMB_MAP_SZ;
6673 break;
6674
6675 default:
6676 return -EINVAL;
6677 }
6678
6679 /* Do not overwrite any of the map or rp information
6680 * until we are sure we can commit to a new buffer.
6681 *
6682 * Callers depend upon this behavior and assume that
6683 * we leave everything unchanged if we fail.
6684 */
6685 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6687 if (skb_size <= PAGE_SIZE) {
6688 data = netdev_alloc_frag(skb_size);
6689 *frag_size = skb_size;
6690 } else {
6691 data = kmalloc(skb_size, GFP_ATOMIC);
6692 *frag_size = 0;
6693 }
6694 if (!data)
6695 return -ENOMEM;
6696
6697 mapping = pci_map_single(tp->pdev,
6698 data + TG3_RX_OFFSET(tp),
6699 data_size,
6700 PCI_DMA_FROMDEVICE);
6701 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6702 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6703 return -EIO;
6704 }
6705
6706 map->data = data;
6707 dma_unmap_addr_set(map, mapping, mapping);
6708
6709 desc->addr_hi = ((u64)mapping >> 32);
6710 desc->addr_lo = ((u64)mapping & 0xffffffff);
6711
6712 return data_size;
6713 }
6714
6715 /* We only need to move over in the address because the other
6716 * members of the RX descriptor are invariant. See notes above
6717 * tg3_alloc_rx_data for full details.
6718 */
6719 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6720 struct tg3_rx_prodring_set *dpr,
6721 u32 opaque_key, int src_idx,
6722 u32 dest_idx_unmasked)
6723 {
6724 struct tg3 *tp = tnapi->tp;
6725 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6726 struct ring_info *src_map, *dest_map;
6727 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6728 int dest_idx;
6729
6730 switch (opaque_key) {
6731 case RXD_OPAQUE_RING_STD:
6732 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6733 dest_desc = &dpr->rx_std[dest_idx];
6734 dest_map = &dpr->rx_std_buffers[dest_idx];
6735 src_desc = &spr->rx_std[src_idx];
6736 src_map = &spr->rx_std_buffers[src_idx];
6737 break;
6738
6739 case RXD_OPAQUE_RING_JUMBO:
6740 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6741 dest_desc = &dpr->rx_jmb[dest_idx].std;
6742 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6743 src_desc = &spr->rx_jmb[src_idx].std;
6744 src_map = &spr->rx_jmb_buffers[src_idx];
6745 break;
6746
6747 default:
6748 return;
6749 }
6750
6751 dest_map->data = src_map->data;
6752 dma_unmap_addr_set(dest_map, mapping,
6753 dma_unmap_addr(src_map, mapping));
6754 dest_desc->addr_hi = src_desc->addr_hi;
6755 dest_desc->addr_lo = src_desc->addr_lo;
6756
6757 /* Ensure that the update to the skb happens after the physical
6758 * addresses have been transferred to the new BD location.
6759 */
6760 smp_wmb();
6761
6762 src_map->data = NULL;
6763 }
6764
6765 /* The RX ring scheme is composed of multiple rings which post fresh
6766 * buffers to the chip, and one special ring the chip uses to report
6767 * status back to the host.
6768 *
6769 * The special ring reports the status of received packets to the
6770 * host. The chip does not write into the original descriptor the
6771 * RX buffer was obtained from. The chip simply takes the original
6772 * descriptor as provided by the host, updates the status and length
6773 * field, then writes this into the next status ring entry.
6774 *
6775 * Each ring the host uses to post buffers to the chip is described
6776 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6777 * it is first placed into the on-chip ram. When the packet's length
6778 * is known, it walks down the TG3_BDINFO entries to select the ring.
6779 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6780 * which is within the range of the new packet's length is chosen.
6781 *
6782 * The "separate ring for rx status" scheme may sound queer, but it makes
6783 * sense from a cache coherency perspective. If only the host writes
6784 * to the buffer post rings, and only the chip writes to the rx status
6785 * rings, then cache lines never move beyond shared-modified state.
6786 * If both the host and chip were to write into the same ring, cache line
6787 * eviction could occur since both entities want it in an exclusive state.
6788 */
6789 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6790 {
6791 struct tg3 *tp = tnapi->tp;
6792 u32 work_mask, rx_std_posted = 0;
6793 u32 std_prod_idx, jmb_prod_idx;
6794 u32 sw_idx = tnapi->rx_rcb_ptr;
6795 u16 hw_idx;
6796 int received;
6797 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6798
6799 hw_idx = *(tnapi->rx_rcb_prod_idx);
6800 /*
6801 * We need to order the read of hw_idx and the read of
6802 * the opaque cookie.
6803 */
6804 rmb();
6805 work_mask = 0;
6806 received = 0;
6807 std_prod_idx = tpr->rx_std_prod_idx;
6808 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6809 while (sw_idx != hw_idx && budget > 0) {
6810 struct ring_info *ri;
6811 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6812 unsigned int len;
6813 struct sk_buff *skb;
6814 dma_addr_t dma_addr;
6815 u32 opaque_key, desc_idx, *post_ptr;
6816 u8 *data;
6817 u64 tstamp = 0;
6818
6819 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6820 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6821 if (opaque_key == RXD_OPAQUE_RING_STD) {
6822 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6823 dma_addr = dma_unmap_addr(ri, mapping);
6824 data = ri->data;
6825 post_ptr = &std_prod_idx;
6826 rx_std_posted++;
6827 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6828 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6829 dma_addr = dma_unmap_addr(ri, mapping);
6830 data = ri->data;
6831 post_ptr = &jmb_prod_idx;
6832 } else
6833 goto next_pkt_nopost;
6834
6835 work_mask |= opaque_key;
6836
6837 if (desc->err_vlan & RXD_ERR_MASK) {
6838 drop_it:
6839 tg3_recycle_rx(tnapi, tpr, opaque_key,
6840 desc_idx, *post_ptr);
6841 drop_it_no_recycle:
6842 /* Other statistics kept track of by card. */
6843 tp->rx_dropped++;
6844 goto next_pkt;
6845 }
6846
6847 prefetch(data + TG3_RX_OFFSET(tp));
6848 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6849 ETH_FCS_LEN;
6850
6851 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6852 RXD_FLAG_PTPSTAT_PTPV1 ||
6853 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854 RXD_FLAG_PTPSTAT_PTPV2) {
6855 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6856 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6857 }
6858
6859 if (len > TG3_RX_COPY_THRESH(tp)) {
6860 int skb_size;
6861 unsigned int frag_size;
6862
6863 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6864 *post_ptr, &frag_size);
6865 if (skb_size < 0)
6866 goto drop_it;
6867
6868 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6869 PCI_DMA_FROMDEVICE);
6870
6871 /* Ensure that the update to the data happens
6872 * after the usage of the old DMA mapping.
6873 */
6874 smp_wmb();
6875
6876 ri->data = NULL;
6877
6878 skb = build_skb(data, frag_size);
6879 if (!skb) {
6880 tg3_frag_free(frag_size != 0, data);
6881 goto drop_it_no_recycle;
6882 }
6883 skb_reserve(skb, TG3_RX_OFFSET(tp));
6884 } else {
6885 tg3_recycle_rx(tnapi, tpr, opaque_key,
6886 desc_idx, *post_ptr);
6887
6888 skb = netdev_alloc_skb(tp->dev,
6889 len + TG3_RAW_IP_ALIGN);
6890 if (skb == NULL)
6891 goto drop_it_no_recycle;
6892
6893 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6894 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6895 memcpy(skb->data,
6896 data + TG3_RX_OFFSET(tp),
6897 len);
6898 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6899 }
6900
6901 skb_put(skb, len);
6902 if (tstamp)
6903 tg3_hwclock_to_timestamp(tp, tstamp,
6904 skb_hwtstamps(skb));
6905
6906 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6907 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6908 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6909 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6910 skb->ip_summed = CHECKSUM_UNNECESSARY;
6911 else
6912 skb_checksum_none_assert(skb);
6913
6914 skb->protocol = eth_type_trans(skb, tp->dev);
6915
6916 if (len > (tp->dev->mtu + ETH_HLEN) &&
6917 skb->protocol != htons(ETH_P_8021Q) &&
6918 skb->protocol != htons(ETH_P_8021AD)) {
6919 dev_kfree_skb_any(skb);
6920 goto drop_it_no_recycle;
6921 }
6922
6923 if (desc->type_flags & RXD_FLAG_VLAN &&
6924 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6925 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6926 desc->err_vlan & RXD_VLAN_MASK);
6927
6928 napi_gro_receive(&tnapi->napi, skb);
6929
6930 received++;
6931 budget--;
6932
6933 next_pkt:
6934 (*post_ptr)++;
6935
6936 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6937 tpr->rx_std_prod_idx = std_prod_idx &
6938 tp->rx_std_ring_mask;
6939 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6940 tpr->rx_std_prod_idx);
6941 work_mask &= ~RXD_OPAQUE_RING_STD;
6942 rx_std_posted = 0;
6943 }
6944 next_pkt_nopost:
6945 sw_idx++;
6946 sw_idx &= tp->rx_ret_ring_mask;
6947
6948 /* Refresh hw_idx to see if there is new work */
6949 if (sw_idx == hw_idx) {
6950 hw_idx = *(tnapi->rx_rcb_prod_idx);
6951 rmb();
6952 }
6953 }
6954
6955 /* ACK the status ring. */
6956 tnapi->rx_rcb_ptr = sw_idx;
6957 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6958
6959 /* Refill RX ring(s). */
6960 if (!tg3_flag(tp, ENABLE_RSS)) {
6961 /* Sync BD data before updating mailbox */
6962 wmb();
6963
6964 if (work_mask & RXD_OPAQUE_RING_STD) {
6965 tpr->rx_std_prod_idx = std_prod_idx &
6966 tp->rx_std_ring_mask;
6967 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6968 tpr->rx_std_prod_idx);
6969 }
6970 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6971 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6972 tp->rx_jmb_ring_mask;
6973 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6974 tpr->rx_jmb_prod_idx);
6975 }
6976 mmiowb();
6977 } else if (work_mask) {
6978 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6979 * updated before the producer indices can be updated.
6980 */
6981 smp_wmb();
6982
6983 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6984 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6985
6986 if (tnapi != &tp->napi[1]) {
6987 tp->rx_refill = true;
6988 napi_schedule(&tp->napi[1].napi);
6989 }
6990 }
6991
6992 return received;
6993 }
6994
6995 static void tg3_poll_link(struct tg3 *tp)
6996 {
6997 /* handle link change and other phy events */
6998 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6999 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7000
7001 if (sblk->status & SD_STATUS_LINK_CHG) {
7002 sblk->status = SD_STATUS_UPDATED |
7003 (sblk->status & ~SD_STATUS_LINK_CHG);
7004 spin_lock(&tp->lock);
7005 if (tg3_flag(tp, USE_PHYLIB)) {
7006 tw32_f(MAC_STATUS,
7007 (MAC_STATUS_SYNC_CHANGED |
7008 MAC_STATUS_CFG_CHANGED |
7009 MAC_STATUS_MI_COMPLETION |
7010 MAC_STATUS_LNKSTATE_CHANGED));
7011 udelay(40);
7012 } else
7013 tg3_setup_phy(tp, false);
7014 spin_unlock(&tp->lock);
7015 }
7016 }
7017 }
7018
7019 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7020 struct tg3_rx_prodring_set *dpr,
7021 struct tg3_rx_prodring_set *spr)
7022 {
7023 u32 si, di, cpycnt, src_prod_idx;
7024 int i, err = 0;
7025
7026 while (1) {
7027 src_prod_idx = spr->rx_std_prod_idx;
7028
7029 /* Make sure updates to the rx_std_buffers[] entries and the
7030 * standard producer index are seen in the correct order.
7031 */
7032 smp_rmb();
7033
7034 if (spr->rx_std_cons_idx == src_prod_idx)
7035 break;
7036
7037 if (spr->rx_std_cons_idx < src_prod_idx)
7038 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7039 else
7040 cpycnt = tp->rx_std_ring_mask + 1 -
7041 spr->rx_std_cons_idx;
7042
7043 cpycnt = min(cpycnt,
7044 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7045
7046 si = spr->rx_std_cons_idx;
7047 di = dpr->rx_std_prod_idx;
7048
7049 for (i = di; i < di + cpycnt; i++) {
7050 if (dpr->rx_std_buffers[i].data) {
7051 cpycnt = i - di;
7052 err = -ENOSPC;
7053 break;
7054 }
7055 }
7056
7057 if (!cpycnt)
7058 break;
7059
7060 /* Ensure that updates to the rx_std_buffers ring and the
7061 * shadowed hardware producer ring from tg3_recycle_skb() are
7062 * ordered correctly WRT the skb check above.
7063 */
7064 smp_rmb();
7065
7066 memcpy(&dpr->rx_std_buffers[di],
7067 &spr->rx_std_buffers[si],
7068 cpycnt * sizeof(struct ring_info));
7069
7070 for (i = 0; i < cpycnt; i++, di++, si++) {
7071 struct tg3_rx_buffer_desc *sbd, *dbd;
7072 sbd = &spr->rx_std[si];
7073 dbd = &dpr->rx_std[di];
7074 dbd->addr_hi = sbd->addr_hi;
7075 dbd->addr_lo = sbd->addr_lo;
7076 }
7077
7078 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7079 tp->rx_std_ring_mask;
7080 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7081 tp->rx_std_ring_mask;
7082 }
7083
7084 while (1) {
7085 src_prod_idx = spr->rx_jmb_prod_idx;
7086
7087 /* Make sure updates to the rx_jmb_buffers[] entries and
7088 * the jumbo producer index are seen in the correct order.
7089 */
7090 smp_rmb();
7091
7092 if (spr->rx_jmb_cons_idx == src_prod_idx)
7093 break;
7094
7095 if (spr->rx_jmb_cons_idx < src_prod_idx)
7096 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7097 else
7098 cpycnt = tp->rx_jmb_ring_mask + 1 -
7099 spr->rx_jmb_cons_idx;
7100
7101 cpycnt = min(cpycnt,
7102 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7103
7104 si = spr->rx_jmb_cons_idx;
7105 di = dpr->rx_jmb_prod_idx;
7106
7107 for (i = di; i < di + cpycnt; i++) {
7108 if (dpr->rx_jmb_buffers[i].data) {
7109 cpycnt = i - di;
7110 err = -ENOSPC;
7111 break;
7112 }
7113 }
7114
7115 if (!cpycnt)
7116 break;
7117
7118 /* Ensure that updates to the rx_jmb_buffers ring and the
7119 * shadowed hardware producer ring from tg3_recycle_skb() are
7120 * ordered correctly WRT the skb check above.
7121 */
7122 smp_rmb();
7123
7124 memcpy(&dpr->rx_jmb_buffers[di],
7125 &spr->rx_jmb_buffers[si],
7126 cpycnt * sizeof(struct ring_info));
7127
7128 for (i = 0; i < cpycnt; i++, di++, si++) {
7129 struct tg3_rx_buffer_desc *sbd, *dbd;
7130 sbd = &spr->rx_jmb[si].std;
7131 dbd = &dpr->rx_jmb[di].std;
7132 dbd->addr_hi = sbd->addr_hi;
7133 dbd->addr_lo = sbd->addr_lo;
7134 }
7135
7136 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7137 tp->rx_jmb_ring_mask;
7138 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7139 tp->rx_jmb_ring_mask;
7140 }
7141
7142 return err;
7143 }
7144
7145 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7146 {
7147 struct tg3 *tp = tnapi->tp;
7148
7149 /* run TX completion thread */
7150 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7151 tg3_tx(tnapi);
7152 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7153 return work_done;
7154 }
7155
7156 if (!tnapi->rx_rcb_prod_idx)
7157 return work_done;
7158
7159 /* run RX thread, within the bounds set by NAPI.
7160 * All RX "locking" is done by ensuring outside
7161 * code synchronizes with tg3->napi.poll()
7162 */
7163 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7164 work_done += tg3_rx(tnapi, budget - work_done);
7165
7166 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7167 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7168 int i, err = 0;
7169 u32 std_prod_idx = dpr->rx_std_prod_idx;
7170 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7171
7172 tp->rx_refill = false;
7173 for (i = 1; i <= tp->rxq_cnt; i++)
7174 err |= tg3_rx_prodring_xfer(tp, dpr,
7175 &tp->napi[i].prodring);
7176
7177 wmb();
7178
7179 if (std_prod_idx != dpr->rx_std_prod_idx)
7180 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7181 dpr->rx_std_prod_idx);
7182
7183 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7184 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7185 dpr->rx_jmb_prod_idx);
7186
7187 mmiowb();
7188
7189 if (err)
7190 tw32_f(HOSTCC_MODE, tp->coal_now);
7191 }
7192
7193 return work_done;
7194 }
7195
7196 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7197 {
7198 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7199 schedule_work(&tp->reset_task);
7200 }
7201
7202 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7203 {
7204 cancel_work_sync(&tp->reset_task);
7205 tg3_flag_clear(tp, RESET_TASK_PENDING);
7206 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7207 }
7208
7209 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7210 {
7211 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7212 struct tg3 *tp = tnapi->tp;
7213 int work_done = 0;
7214 struct tg3_hw_status *sblk = tnapi->hw_status;
7215
7216 while (1) {
7217 work_done = tg3_poll_work(tnapi, work_done, budget);
7218
7219 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7220 goto tx_recovery;
7221
7222 if (unlikely(work_done >= budget))
7223 break;
7224
7225 /* tp->last_tag is used in tg3_int_reenable() below
7226 * to tell the hw how much work has been processed,
7227 * so we must read it before checking for more work.
7228 */
7229 tnapi->last_tag = sblk->status_tag;
7230 tnapi->last_irq_tag = tnapi->last_tag;
7231 rmb();
7232
7233 /* check for RX/TX work to do */
7234 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7235 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7236
7237 /* This test here is not race free, but will reduce
7238 * the number of interrupts by looping again.
7239 */
7240 if (tnapi == &tp->napi[1] && tp->rx_refill)
7241 continue;
7242
7243 napi_complete_done(napi, work_done);
7244 /* Reenable interrupts. */
7245 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7246
7247 /* This test here is synchronized by napi_schedule()
7248 * and napi_complete() to close the race condition.
7249 */
7250 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7251 tw32(HOSTCC_MODE, tp->coalesce_mode |
7252 HOSTCC_MODE_ENABLE |
7253 tnapi->coal_now);
7254 }
7255 mmiowb();
7256 break;
7257 }
7258 }
7259
7260 return work_done;
7261
7262 tx_recovery:
7263 /* work_done is guaranteed to be less than budget. */
7264 napi_complete(napi);
7265 tg3_reset_task_schedule(tp);
7266 return work_done;
7267 }
7268
7269 static void tg3_process_error(struct tg3 *tp)
7270 {
7271 u32 val;
7272 bool real_error = false;
7273
7274 if (tg3_flag(tp, ERROR_PROCESSED))
7275 return;
7276
7277 /* Check Flow Attention register */
7278 val = tr32(HOSTCC_FLOW_ATTN);
7279 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7280 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7281 real_error = true;
7282 }
7283
7284 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7285 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7286 real_error = true;
7287 }
7288
7289 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7290 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7291 real_error = true;
7292 }
7293
7294 if (!real_error)
7295 return;
7296
7297 tg3_dump_state(tp);
7298
7299 tg3_flag_set(tp, ERROR_PROCESSED);
7300 tg3_reset_task_schedule(tp);
7301 }
7302
7303 static int tg3_poll(struct napi_struct *napi, int budget)
7304 {
7305 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7306 struct tg3 *tp = tnapi->tp;
7307 int work_done = 0;
7308 struct tg3_hw_status *sblk = tnapi->hw_status;
7309
7310 while (1) {
7311 if (sblk->status & SD_STATUS_ERROR)
7312 tg3_process_error(tp);
7313
7314 tg3_poll_link(tp);
7315
7316 work_done = tg3_poll_work(tnapi, work_done, budget);
7317
7318 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7319 goto tx_recovery;
7320
7321 if (unlikely(work_done >= budget))
7322 break;
7323
7324 if (tg3_flag(tp, TAGGED_STATUS)) {
7325 /* tp->last_tag is used in tg3_int_reenable() below
7326 * to tell the hw how much work has been processed,
7327 * so we must read it before checking for more work.
7328 */
7329 tnapi->last_tag = sblk->status_tag;
7330 tnapi->last_irq_tag = tnapi->last_tag;
7331 rmb();
7332 } else
7333 sblk->status &= ~SD_STATUS_UPDATED;
7334
7335 if (likely(!tg3_has_work(tnapi))) {
7336 napi_complete_done(napi, work_done);
7337 tg3_int_reenable(tnapi);
7338 break;
7339 }
7340 }
7341
7342 return work_done;
7343
7344 tx_recovery:
7345 /* work_done is guaranteed to be less than budget. */
7346 napi_complete(napi);
7347 tg3_reset_task_schedule(tp);
7348 return work_done;
7349 }
7350
7351 static void tg3_napi_disable(struct tg3 *tp)
7352 {
7353 int i;
7354
7355 for (i = tp->irq_cnt - 1; i >= 0; i--)
7356 napi_disable(&tp->napi[i].napi);
7357 }
7358
7359 static void tg3_napi_enable(struct tg3 *tp)
7360 {
7361 int i;
7362
7363 for (i = 0; i < tp->irq_cnt; i++)
7364 napi_enable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_init(struct tg3 *tp)
7368 {
7369 int i;
7370
7371 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7372 for (i = 1; i < tp->irq_cnt; i++)
7373 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7374 }
7375
7376 static void tg3_napi_fini(struct tg3 *tp)
7377 {
7378 int i;
7379
7380 for (i = 0; i < tp->irq_cnt; i++)
7381 netif_napi_del(&tp->napi[i].napi);
7382 }
7383
7384 static inline void tg3_netif_stop(struct tg3 *tp)
7385 {
7386 netif_trans_update(tp->dev); /* prevent tx timeout */
7387 tg3_napi_disable(tp);
7388 netif_carrier_off(tp->dev);
7389 netif_tx_disable(tp->dev);
7390 }
7391
7392 /* tp->lock must be held */
7393 static inline void tg3_netif_start(struct tg3 *tp)
7394 {
7395 tg3_ptp_resume(tp);
7396
7397 /* NOTE: unconditional netif_tx_wake_all_queues is only
7398 * appropriate so long as all callers are assured to
7399 * have free tx slots (such as after tg3_init_hw)
7400 */
7401 netif_tx_wake_all_queues(tp->dev);
7402
7403 if (tp->link_up)
7404 netif_carrier_on(tp->dev);
7405
7406 tg3_napi_enable(tp);
7407 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7408 tg3_enable_ints(tp);
7409 }
7410
7411 static void tg3_irq_quiesce(struct tg3 *tp)
7412 __releases(tp->lock)
7413 __acquires(tp->lock)
7414 {
7415 int i;
7416
7417 BUG_ON(tp->irq_sync);
7418
7419 tp->irq_sync = 1;
7420 smp_mb();
7421
7422 spin_unlock_bh(&tp->lock);
7423
7424 for (i = 0; i < tp->irq_cnt; i++)
7425 synchronize_irq(tp->napi[i].irq_vec);
7426
7427 spin_lock_bh(&tp->lock);
7428 }
7429
7430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7431 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7432 * with as well. Most of the time, this is not necessary except when
7433 * shutting down the device.
7434 */
7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7436 {
7437 spin_lock_bh(&tp->lock);
7438 if (irq_sync)
7439 tg3_irq_quiesce(tp);
7440 }
7441
7442 static inline void tg3_full_unlock(struct tg3 *tp)
7443 {
7444 spin_unlock_bh(&tp->lock);
7445 }
7446
7447 /* One-shot MSI handler - Chip automatically disables interrupt
7448 * after sending MSI so driver doesn't have to do it.
7449 */
7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7451 {
7452 struct tg3_napi *tnapi = dev_id;
7453 struct tg3 *tp = tnapi->tp;
7454
7455 prefetch(tnapi->hw_status);
7456 if (tnapi->rx_rcb)
7457 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7458
7459 if (likely(!tg3_irq_sync(tp)))
7460 napi_schedule(&tnapi->napi);
7461
7462 return IRQ_HANDLED;
7463 }
7464
7465 /* MSI ISR - No need to check for interrupt sharing and no need to
7466 * flush status block and interrupt mailbox. PCI ordering rules
7467 * guarantee that MSI will arrive after the status block.
7468 */
7469 static irqreturn_t tg3_msi(int irq, void *dev_id)
7470 {
7471 struct tg3_napi *tnapi = dev_id;
7472 struct tg3 *tp = tnapi->tp;
7473
7474 prefetch(tnapi->hw_status);
7475 if (tnapi->rx_rcb)
7476 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7477 /*
7478 * Writing any value to intr-mbox-0 clears PCI INTA# and
7479 * chip-internal interrupt pending events.
7480 * Writing non-zero to intr-mbox-0 additional tells the
7481 * NIC to stop sending us irqs, engaging "in-intr-handler"
7482 * event coalescing.
7483 */
7484 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7485 if (likely(!tg3_irq_sync(tp)))
7486 napi_schedule(&tnapi->napi);
7487
7488 return IRQ_RETVAL(1);
7489 }
7490
7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7492 {
7493 struct tg3_napi *tnapi = dev_id;
7494 struct tg3 *tp = tnapi->tp;
7495 struct tg3_hw_status *sblk = tnapi->hw_status;
7496 unsigned int handled = 1;
7497
7498 /* In INTx mode, it is possible for the interrupt to arrive at
7499 * the CPU before the status block posted prior to the interrupt.
7500 * Reading the PCI State register will confirm whether the
7501 * interrupt is ours and will flush the status block.
7502 */
7503 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7504 if (tg3_flag(tp, CHIP_RESETTING) ||
7505 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506 handled = 0;
7507 goto out;
7508 }
7509 }
7510
7511 /*
7512 * Writing any value to intr-mbox-0 clears PCI INTA# and
7513 * chip-internal interrupt pending events.
7514 * Writing non-zero to intr-mbox-0 additional tells the
7515 * NIC to stop sending us irqs, engaging "in-intr-handler"
7516 * event coalescing.
7517 *
7518 * Flush the mailbox to de-assert the IRQ immediately to prevent
7519 * spurious interrupts. The flush impacts performance but
7520 * excessive spurious interrupts can be worse in some cases.
7521 */
7522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7523 if (tg3_irq_sync(tp))
7524 goto out;
7525 sblk->status &= ~SD_STATUS_UPDATED;
7526 if (likely(tg3_has_work(tnapi))) {
7527 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7528 napi_schedule(&tnapi->napi);
7529 } else {
7530 /* No work, shared interrupt perhaps? re-enable
7531 * interrupts, and flush that PCI write
7532 */
7533 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7534 0x00000000);
7535 }
7536 out:
7537 return IRQ_RETVAL(handled);
7538 }
7539
7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7541 {
7542 struct tg3_napi *tnapi = dev_id;
7543 struct tg3 *tp = tnapi->tp;
7544 struct tg3_hw_status *sblk = tnapi->hw_status;
7545 unsigned int handled = 1;
7546
7547 /* In INTx mode, it is possible for the interrupt to arrive at
7548 * the CPU before the status block posted prior to the interrupt.
7549 * Reading the PCI State register will confirm whether the
7550 * interrupt is ours and will flush the status block.
7551 */
7552 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7553 if (tg3_flag(tp, CHIP_RESETTING) ||
7554 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7555 handled = 0;
7556 goto out;
7557 }
7558 }
7559
7560 /*
7561 * writing any value to intr-mbox-0 clears PCI INTA# and
7562 * chip-internal interrupt pending events.
7563 * writing non-zero to intr-mbox-0 additional tells the
7564 * NIC to stop sending us irqs, engaging "in-intr-handler"
7565 * event coalescing.
7566 *
7567 * Flush the mailbox to de-assert the IRQ immediately to prevent
7568 * spurious interrupts. The flush impacts performance but
7569 * excessive spurious interrupts can be worse in some cases.
7570 */
7571 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7572
7573 /*
7574 * In a shared interrupt configuration, sometimes other devices'
7575 * interrupts will scream. We record the current status tag here
7576 * so that the above check can report that the screaming interrupts
7577 * are unhandled. Eventually they will be silenced.
7578 */
7579 tnapi->last_irq_tag = sblk->status_tag;
7580
7581 if (tg3_irq_sync(tp))
7582 goto out;
7583
7584 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7585
7586 napi_schedule(&tnapi->napi);
7587
7588 out:
7589 return IRQ_RETVAL(handled);
7590 }
7591
7592 /* ISR for interrupt test */
7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7594 {
7595 struct tg3_napi *tnapi = dev_id;
7596 struct tg3 *tp = tnapi->tp;
7597 struct tg3_hw_status *sblk = tnapi->hw_status;
7598
7599 if ((sblk->status & SD_STATUS_UPDATED) ||
7600 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7601 tg3_disable_ints(tp);
7602 return IRQ_RETVAL(1);
7603 }
7604 return IRQ_RETVAL(0);
7605 }
7606
7607 #ifdef CONFIG_NET_POLL_CONTROLLER
7608 static void tg3_poll_controller(struct net_device *dev)
7609 {
7610 int i;
7611 struct tg3 *tp = netdev_priv(dev);
7612
7613 if (tg3_irq_sync(tp))
7614 return;
7615
7616 for (i = 0; i < tp->irq_cnt; i++)
7617 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7618 }
7619 #endif
7620
7621 static void tg3_tx_timeout(struct net_device *dev)
7622 {
7623 struct tg3 *tp = netdev_priv(dev);
7624
7625 if (netif_msg_tx_err(tp)) {
7626 netdev_err(dev, "transmit timed out, resetting\n");
7627 tg3_dump_state(tp);
7628 }
7629
7630 tg3_reset_task_schedule(tp);
7631 }
7632
7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7635 {
7636 u32 base = (u32) mapping & 0xffffffff;
7637
7638 return base + len + 8 < base;
7639 }
7640
7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7642 * of any 4GB boundaries: 4G, 8G, etc
7643 */
7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7645 u32 len, u32 mss)
7646 {
7647 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7648 u32 base = (u32) mapping & 0xffffffff;
7649
7650 return ((base + len + (mss & 0x3fff)) < base);
7651 }
7652 return 0;
7653 }
7654
7655 /* Test for DMA addresses > 40-bit */
7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7657 int len)
7658 {
7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7660 if (tg3_flag(tp, 40BIT_DMA_BUG))
7661 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7662 return 0;
7663 #else
7664 return 0;
7665 #endif
7666 }
7667
7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7669 dma_addr_t mapping, u32 len, u32 flags,
7670 u32 mss, u32 vlan)
7671 {
7672 txbd->addr_hi = ((u64) mapping >> 32);
7673 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7674 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7675 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7676 }
7677
7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7679 dma_addr_t map, u32 len, u32 flags,
7680 u32 mss, u32 vlan)
7681 {
7682 struct tg3 *tp = tnapi->tp;
7683 bool hwbug = false;
7684
7685 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7686 hwbug = true;
7687
7688 if (tg3_4g_overflow_test(map, len))
7689 hwbug = true;
7690
7691 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7692 hwbug = true;
7693
7694 if (tg3_40bit_overflow_test(tp, map, len))
7695 hwbug = true;
7696
7697 if (tp->dma_limit) {
7698 u32 prvidx = *entry;
7699 u32 tmp_flag = flags & ~TXD_FLAG_END;
7700 while (len > tp->dma_limit && *budget) {
7701 u32 frag_len = tp->dma_limit;
7702 len -= tp->dma_limit;
7703
7704 /* Avoid the 8byte DMA problem */
7705 if (len <= 8) {
7706 len += tp->dma_limit / 2;
7707 frag_len = tp->dma_limit / 2;
7708 }
7709
7710 tnapi->tx_buffers[*entry].fragmented = true;
7711
7712 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7713 frag_len, tmp_flag, mss, vlan);
7714 *budget -= 1;
7715 prvidx = *entry;
7716 *entry = NEXT_TX(*entry);
7717
7718 map += frag_len;
7719 }
7720
7721 if (len) {
7722 if (*budget) {
7723 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724 len, flags, mss, vlan);
7725 *budget -= 1;
7726 *entry = NEXT_TX(*entry);
7727 } else {
7728 hwbug = true;
7729 tnapi->tx_buffers[prvidx].fragmented = false;
7730 }
7731 }
7732 } else {
7733 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734 len, flags, mss, vlan);
7735 *entry = NEXT_TX(*entry);
7736 }
7737
7738 return hwbug;
7739 }
7740
7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7742 {
7743 int i;
7744 struct sk_buff *skb;
7745 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7746
7747 skb = txb->skb;
7748 txb->skb = NULL;
7749
7750 pci_unmap_single(tnapi->tp->pdev,
7751 dma_unmap_addr(txb, mapping),
7752 skb_headlen(skb),
7753 PCI_DMA_TODEVICE);
7754
7755 while (txb->fragmented) {
7756 txb->fragmented = false;
7757 entry = NEXT_TX(entry);
7758 txb = &tnapi->tx_buffers[entry];
7759 }
7760
7761 for (i = 0; i <= last; i++) {
7762 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7763
7764 entry = NEXT_TX(entry);
7765 txb = &tnapi->tx_buffers[entry];
7766
7767 pci_unmap_page(tnapi->tp->pdev,
7768 dma_unmap_addr(txb, mapping),
7769 skb_frag_size(frag), PCI_DMA_TODEVICE);
7770
7771 while (txb->fragmented) {
7772 txb->fragmented = false;
7773 entry = NEXT_TX(entry);
7774 txb = &tnapi->tx_buffers[entry];
7775 }
7776 }
7777 }
7778
7779 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7780 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7781 struct sk_buff **pskb,
7782 u32 *entry, u32 *budget,
7783 u32 base_flags, u32 mss, u32 vlan)
7784 {
7785 struct tg3 *tp = tnapi->tp;
7786 struct sk_buff *new_skb, *skb = *pskb;
7787 dma_addr_t new_addr = 0;
7788 int ret = 0;
7789
7790 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7791 new_skb = skb_copy(skb, GFP_ATOMIC);
7792 else {
7793 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7794
7795 new_skb = skb_copy_expand(skb,
7796 skb_headroom(skb) + more_headroom,
7797 skb_tailroom(skb), GFP_ATOMIC);
7798 }
7799
7800 if (!new_skb) {
7801 ret = -1;
7802 } else {
7803 /* New SKB is guaranteed to be linear. */
7804 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7805 PCI_DMA_TODEVICE);
7806 /* Make sure the mapping succeeded */
7807 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7808 dev_kfree_skb_any(new_skb);
7809 ret = -1;
7810 } else {
7811 u32 save_entry = *entry;
7812
7813 base_flags |= TXD_FLAG_END;
7814
7815 tnapi->tx_buffers[*entry].skb = new_skb;
7816 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7817 mapping, new_addr);
7818
7819 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7820 new_skb->len, base_flags,
7821 mss, vlan)) {
7822 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7823 dev_kfree_skb_any(new_skb);
7824 ret = -1;
7825 }
7826 }
7827 }
7828
7829 dev_kfree_skb_any(skb);
7830 *pskb = new_skb;
7831 return ret;
7832 }
7833
7834 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7835 {
7836 /* Check if we will never have enough descriptors,
7837 * as gso_segs can be more than current ring size
7838 */
7839 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840 }
7841
7842 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7843
7844 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7845 * indicated in tg3_tx_frag_set()
7846 */
7847 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7848 struct netdev_queue *txq, struct sk_buff *skb)
7849 {
7850 struct sk_buff *segs, *nskb;
7851 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7852
7853 /* Estimate the number of fragments in the worst case */
7854 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7855 netif_tx_stop_queue(txq);
7856
7857 /* netif_tx_stop_queue() must be done before checking
7858 * checking tx index in tg3_tx_avail() below, because in
7859 * tg3_tx(), we update tx index before checking for
7860 * netif_tx_queue_stopped().
7861 */
7862 smp_mb();
7863 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7864 return NETDEV_TX_BUSY;
7865
7866 netif_tx_wake_queue(txq);
7867 }
7868
7869 segs = skb_gso_segment(skb, tp->dev->features &
7870 ~(NETIF_F_TSO | NETIF_F_TSO6));
7871 if (IS_ERR(segs) || !segs)
7872 goto tg3_tso_bug_end;
7873
7874 do {
7875 nskb = segs;
7876 segs = segs->next;
7877 nskb->next = NULL;
7878 tg3_start_xmit(nskb, tp->dev);
7879 } while (segs);
7880
7881 tg3_tso_bug_end:
7882 dev_kfree_skb_any(skb);
7883
7884 return NETDEV_TX_OK;
7885 }
7886
7887 /* hard_start_xmit for all devices */
7888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7889 {
7890 struct tg3 *tp = netdev_priv(dev);
7891 u32 len, entry, base_flags, mss, vlan = 0;
7892 u32 budget;
7893 int i = -1, would_hit_hwbug;
7894 dma_addr_t mapping;
7895 struct tg3_napi *tnapi;
7896 struct netdev_queue *txq;
7897 unsigned int last;
7898 struct iphdr *iph = NULL;
7899 struct tcphdr *tcph = NULL;
7900 __sum16 tcp_csum = 0, ip_csum = 0;
7901 __be16 ip_tot_len = 0;
7902
7903 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7904 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7905 if (tg3_flag(tp, ENABLE_TSS))
7906 tnapi++;
7907
7908 budget = tg3_tx_avail(tnapi);
7909
7910 /* We are running in BH disabled context with netif_tx_lock
7911 * and TX reclaim runs via tp->napi.poll inside of a software
7912 * interrupt. Furthermore, IRQ processing runs lockless so we have
7913 * no IRQ context deadlocks to worry about either. Rejoice!
7914 */
7915 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7916 if (!netif_tx_queue_stopped(txq)) {
7917 netif_tx_stop_queue(txq);
7918
7919 /* This is a hard error, log it. */
7920 netdev_err(dev,
7921 "BUG! Tx Ring full when queue awake!\n");
7922 }
7923 return NETDEV_TX_BUSY;
7924 }
7925
7926 entry = tnapi->tx_prod;
7927 base_flags = 0;
7928
7929 mss = skb_shinfo(skb)->gso_size;
7930 if (mss) {
7931 u32 tcp_opt_len, hdr_len;
7932
7933 if (skb_cow_head(skb, 0))
7934 goto drop;
7935
7936 iph = ip_hdr(skb);
7937 tcp_opt_len = tcp_optlen(skb);
7938
7939 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7940
7941 /* HW/FW can not correctly segment packets that have been
7942 * vlan encapsulated.
7943 */
7944 if (skb->protocol == htons(ETH_P_8021Q) ||
7945 skb->protocol == htons(ETH_P_8021AD)) {
7946 if (tg3_tso_bug_gso_check(tnapi, skb))
7947 return tg3_tso_bug(tp, tnapi, txq, skb);
7948 goto drop;
7949 }
7950
7951 if (!skb_is_gso_v6(skb)) {
7952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7953 tg3_flag(tp, TSO_BUG)) {
7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7956 goto drop;
7957 }
7958 ip_csum = iph->check;
7959 ip_tot_len = iph->tot_len;
7960 iph->check = 0;
7961 iph->tot_len = htons(mss + hdr_len);
7962 }
7963
7964 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7965 TXD_FLAG_CPU_POST_DMA);
7966
7967 tcph = tcp_hdr(skb);
7968 tcp_csum = tcph->check;
7969
7970 if (tg3_flag(tp, HW_TSO_1) ||
7971 tg3_flag(tp, HW_TSO_2) ||
7972 tg3_flag(tp, HW_TSO_3)) {
7973 tcph->check = 0;
7974 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7975 } else {
7976 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7977 0, IPPROTO_TCP, 0);
7978 }
7979
7980 if (tg3_flag(tp, HW_TSO_3)) {
7981 mss |= (hdr_len & 0xc) << 12;
7982 if (hdr_len & 0x10)
7983 base_flags |= 0x00000010;
7984 base_flags |= (hdr_len & 0x3e0) << 5;
7985 } else if (tg3_flag(tp, HW_TSO_2))
7986 mss |= hdr_len << 9;
7987 else if (tg3_flag(tp, HW_TSO_1) ||
7988 tg3_asic_rev(tp) == ASIC_REV_5705) {
7989 if (tcp_opt_len || iph->ihl > 5) {
7990 int tsflags;
7991
7992 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993 mss |= (tsflags << 11);
7994 }
7995 } else {
7996 if (tcp_opt_len || iph->ihl > 5) {
7997 int tsflags;
7998
7999 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8000 base_flags |= tsflags << 12;
8001 }
8002 }
8003 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8004 /* HW/FW can not correctly checksum packets that have been
8005 * vlan encapsulated.
8006 */
8007 if (skb->protocol == htons(ETH_P_8021Q) ||
8008 skb->protocol == htons(ETH_P_8021AD)) {
8009 if (skb_checksum_help(skb))
8010 goto drop;
8011 } else {
8012 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8013 }
8014 }
8015
8016 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8017 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8018 base_flags |= TXD_FLAG_JMB_PKT;
8019
8020 if (skb_vlan_tag_present(skb)) {
8021 base_flags |= TXD_FLAG_VLAN;
8022 vlan = skb_vlan_tag_get(skb);
8023 }
8024
8025 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8026 tg3_flag(tp, TX_TSTAMP_EN)) {
8027 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8028 base_flags |= TXD_FLAG_HWTSTAMP;
8029 }
8030
8031 len = skb_headlen(skb);
8032
8033 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8034 if (pci_dma_mapping_error(tp->pdev, mapping))
8035 goto drop;
8036
8037
8038 tnapi->tx_buffers[entry].skb = skb;
8039 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8040
8041 would_hit_hwbug = 0;
8042
8043 if (tg3_flag(tp, 5701_DMA_BUG))
8044 would_hit_hwbug = 1;
8045
8046 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8047 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8048 mss, vlan)) {
8049 would_hit_hwbug = 1;
8050 } else if (skb_shinfo(skb)->nr_frags > 0) {
8051 u32 tmp_mss = mss;
8052
8053 if (!tg3_flag(tp, HW_TSO_1) &&
8054 !tg3_flag(tp, HW_TSO_2) &&
8055 !tg3_flag(tp, HW_TSO_3))
8056 tmp_mss = 0;
8057
8058 /* Now loop through additional data
8059 * fragments, and queue them.
8060 */
8061 last = skb_shinfo(skb)->nr_frags - 1;
8062 for (i = 0; i <= last; i++) {
8063 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8064
8065 len = skb_frag_size(frag);
8066 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8067 len, DMA_TO_DEVICE);
8068
8069 tnapi->tx_buffers[entry].skb = NULL;
8070 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8071 mapping);
8072 if (dma_mapping_error(&tp->pdev->dev, mapping))
8073 goto dma_error;
8074
8075 if (!budget ||
8076 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8077 len, base_flags |
8078 ((i == last) ? TXD_FLAG_END : 0),
8079 tmp_mss, vlan)) {
8080 would_hit_hwbug = 1;
8081 break;
8082 }
8083 }
8084 }
8085
8086 if (would_hit_hwbug) {
8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8088
8089 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8090 /* If it's a TSO packet, do GSO instead of
8091 * allocating and copying to a large linear SKB
8092 */
8093 if (ip_tot_len) {
8094 iph->check = ip_csum;
8095 iph->tot_len = ip_tot_len;
8096 }
8097 tcph->check = tcp_csum;
8098 return tg3_tso_bug(tp, tnapi, txq, skb);
8099 }
8100
8101 /* If the workaround fails due to memory/mapping
8102 * failure, silently drop this packet.
8103 */
8104 entry = tnapi->tx_prod;
8105 budget = tg3_tx_avail(tnapi);
8106 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8107 base_flags, mss, vlan))
8108 goto drop_nofree;
8109 }
8110
8111 skb_tx_timestamp(skb);
8112 netdev_tx_sent_queue(txq, skb->len);
8113
8114 /* Sync BD data before updating mailbox */
8115 wmb();
8116
8117 tnapi->tx_prod = entry;
8118 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8119 netif_tx_stop_queue(txq);
8120
8121 /* netif_tx_stop_queue() must be done before checking
8122 * checking tx index in tg3_tx_avail() below, because in
8123 * tg3_tx(), we update tx index before checking for
8124 * netif_tx_queue_stopped().
8125 */
8126 smp_mb();
8127 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8128 netif_tx_wake_queue(txq);
8129 }
8130
8131 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8132 /* Packets are ready, update Tx producer idx on card. */
8133 tw32_tx_mbox(tnapi->prodmbox, entry);
8134 mmiowb();
8135 }
8136
8137 return NETDEV_TX_OK;
8138
8139 dma_error:
8140 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8141 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8142 drop:
8143 dev_kfree_skb_any(skb);
8144 drop_nofree:
8145 tp->tx_dropped++;
8146 return NETDEV_TX_OK;
8147 }
8148
8149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8150 {
8151 if (enable) {
8152 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8153 MAC_MODE_PORT_MODE_MASK);
8154
8155 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8156
8157 if (!tg3_flag(tp, 5705_PLUS))
8158 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8159
8160 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8161 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8162 else
8163 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8164 } else {
8165 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8166
8167 if (tg3_flag(tp, 5705_PLUS) ||
8168 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8169 tg3_asic_rev(tp) == ASIC_REV_5700)
8170 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8171 }
8172
8173 tw32(MAC_MODE, tp->mac_mode);
8174 udelay(40);
8175 }
8176
8177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8178 {
8179 u32 val, bmcr, mac_mode, ptest = 0;
8180
8181 tg3_phy_toggle_apd(tp, false);
8182 tg3_phy_toggle_automdix(tp, false);
8183
8184 if (extlpbk && tg3_phy_set_extloopbk(tp))
8185 return -EIO;
8186
8187 bmcr = BMCR_FULLDPLX;
8188 switch (speed) {
8189 case SPEED_10:
8190 break;
8191 case SPEED_100:
8192 bmcr |= BMCR_SPEED100;
8193 break;
8194 case SPEED_1000:
8195 default:
8196 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8197 speed = SPEED_100;
8198 bmcr |= BMCR_SPEED100;
8199 } else {
8200 speed = SPEED_1000;
8201 bmcr |= BMCR_SPEED1000;
8202 }
8203 }
8204
8205 if (extlpbk) {
8206 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8207 tg3_readphy(tp, MII_CTRL1000, &val);
8208 val |= CTL1000_AS_MASTER |
8209 CTL1000_ENABLE_MASTER;
8210 tg3_writephy(tp, MII_CTRL1000, val);
8211 } else {
8212 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8213 MII_TG3_FET_PTEST_TRIM_2;
8214 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8215 }
8216 } else
8217 bmcr |= BMCR_LOOPBACK;
8218
8219 tg3_writephy(tp, MII_BMCR, bmcr);
8220
8221 /* The write needs to be flushed for the FETs */
8222 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8223 tg3_readphy(tp, MII_BMCR, &bmcr);
8224
8225 udelay(40);
8226
8227 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8228 tg3_asic_rev(tp) == ASIC_REV_5785) {
8229 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8230 MII_TG3_FET_PTEST_FRC_TX_LINK |
8231 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8232
8233 /* The write needs to be flushed for the AC131 */
8234 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8235 }
8236
8237 /* Reset to prevent losing 1st rx packet intermittently */
8238 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8239 tg3_flag(tp, 5780_CLASS)) {
8240 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8241 udelay(10);
8242 tw32_f(MAC_RX_MODE, tp->rx_mode);
8243 }
8244
8245 mac_mode = tp->mac_mode &
8246 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8247 if (speed == SPEED_1000)
8248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8249 else
8250 mac_mode |= MAC_MODE_PORT_MODE_MII;
8251
8252 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8253 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8254
8255 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8256 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8258 mac_mode |= MAC_MODE_LINK_POLARITY;
8259
8260 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8261 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8262 }
8263
8264 tw32(MAC_MODE, mac_mode);
8265 udelay(40);
8266
8267 return 0;
8268 }
8269
8270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8271 {
8272 struct tg3 *tp = netdev_priv(dev);
8273
8274 if (features & NETIF_F_LOOPBACK) {
8275 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8276 return;
8277
8278 spin_lock_bh(&tp->lock);
8279 tg3_mac_loopback(tp, true);
8280 netif_carrier_on(tp->dev);
8281 spin_unlock_bh(&tp->lock);
8282 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8283 } else {
8284 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8285 return;
8286
8287 spin_lock_bh(&tp->lock);
8288 tg3_mac_loopback(tp, false);
8289 /* Force link status check */
8290 tg3_setup_phy(tp, true);
8291 spin_unlock_bh(&tp->lock);
8292 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8293 }
8294 }
8295
8296 static netdev_features_t tg3_fix_features(struct net_device *dev,
8297 netdev_features_t features)
8298 {
8299 struct tg3 *tp = netdev_priv(dev);
8300
8301 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8302 features &= ~NETIF_F_ALL_TSO;
8303
8304 return features;
8305 }
8306
8307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8308 {
8309 netdev_features_t changed = dev->features ^ features;
8310
8311 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8312 tg3_set_loopback(dev, features);
8313
8314 return 0;
8315 }
8316
8317 static void tg3_rx_prodring_free(struct tg3 *tp,
8318 struct tg3_rx_prodring_set *tpr)
8319 {
8320 int i;
8321
8322 if (tpr != &tp->napi[0].prodring) {
8323 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8324 i = (i + 1) & tp->rx_std_ring_mask)
8325 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8326 tp->rx_pkt_map_sz);
8327
8328 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8329 for (i = tpr->rx_jmb_cons_idx;
8330 i != tpr->rx_jmb_prod_idx;
8331 i = (i + 1) & tp->rx_jmb_ring_mask) {
8332 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8333 TG3_RX_JMB_MAP_SZ);
8334 }
8335 }
8336
8337 return;
8338 }
8339
8340 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8341 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8342 tp->rx_pkt_map_sz);
8343
8344 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8345 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8346 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347 TG3_RX_JMB_MAP_SZ);
8348 }
8349 }
8350
8351 /* Initialize rx rings for packet processing.
8352 *
8353 * The chip has been shut down and the driver detached from
8354 * the networking, so no interrupts or new tx packets will
8355 * end up in the driver. tp->{tx,}lock are held and thus
8356 * we may not sleep.
8357 */
8358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8359 struct tg3_rx_prodring_set *tpr)
8360 {
8361 u32 i, rx_pkt_dma_sz;
8362
8363 tpr->rx_std_cons_idx = 0;
8364 tpr->rx_std_prod_idx = 0;
8365 tpr->rx_jmb_cons_idx = 0;
8366 tpr->rx_jmb_prod_idx = 0;
8367
8368 if (tpr != &tp->napi[0].prodring) {
8369 memset(&tpr->rx_std_buffers[0], 0,
8370 TG3_RX_STD_BUFF_RING_SIZE(tp));
8371 if (tpr->rx_jmb_buffers)
8372 memset(&tpr->rx_jmb_buffers[0], 0,
8373 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8374 goto done;
8375 }
8376
8377 /* Zero out all descriptors. */
8378 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8379
8380 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8381 if (tg3_flag(tp, 5780_CLASS) &&
8382 tp->dev->mtu > ETH_DATA_LEN)
8383 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8384 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8385
8386 /* Initialize invariants of the rings, we only set this
8387 * stuff once. This works because the card does not
8388 * write into the rx buffer posting rings.
8389 */
8390 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8391 struct tg3_rx_buffer_desc *rxd;
8392
8393 rxd = &tpr->rx_std[i];
8394 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8395 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8396 rxd->opaque = (RXD_OPAQUE_RING_STD |
8397 (i << RXD_OPAQUE_INDEX_SHIFT));
8398 }
8399
8400 /* Now allocate fresh SKBs for each rx ring. */
8401 for (i = 0; i < tp->rx_pending; i++) {
8402 unsigned int frag_size;
8403
8404 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8405 &frag_size) < 0) {
8406 netdev_warn(tp->dev,
8407 "Using a smaller RX standard ring. Only "
8408 "%d out of %d buffers were allocated "
8409 "successfully\n", i, tp->rx_pending);
8410 if (i == 0)
8411 goto initfail;
8412 tp->rx_pending = i;
8413 break;
8414 }
8415 }
8416
8417 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8418 goto done;
8419
8420 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8421
8422 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8423 goto done;
8424
8425 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8426 struct tg3_rx_buffer_desc *rxd;
8427
8428 rxd = &tpr->rx_jmb[i].std;
8429 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8430 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8431 RXD_FLAG_JUMBO;
8432 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8433 (i << RXD_OPAQUE_INDEX_SHIFT));
8434 }
8435
8436 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8437 unsigned int frag_size;
8438
8439 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8440 &frag_size) < 0) {
8441 netdev_warn(tp->dev,
8442 "Using a smaller RX jumbo ring. Only %d "
8443 "out of %d buffers were allocated "
8444 "successfully\n", i, tp->rx_jumbo_pending);
8445 if (i == 0)
8446 goto initfail;
8447 tp->rx_jumbo_pending = i;
8448 break;
8449 }
8450 }
8451
8452 done:
8453 return 0;
8454
8455 initfail:
8456 tg3_rx_prodring_free(tp, tpr);
8457 return -ENOMEM;
8458 }
8459
8460 static void tg3_rx_prodring_fini(struct tg3 *tp,
8461 struct tg3_rx_prodring_set *tpr)
8462 {
8463 kfree(tpr->rx_std_buffers);
8464 tpr->rx_std_buffers = NULL;
8465 kfree(tpr->rx_jmb_buffers);
8466 tpr->rx_jmb_buffers = NULL;
8467 if (tpr->rx_std) {
8468 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8469 tpr->rx_std, tpr->rx_std_mapping);
8470 tpr->rx_std = NULL;
8471 }
8472 if (tpr->rx_jmb) {
8473 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8474 tpr->rx_jmb, tpr->rx_jmb_mapping);
8475 tpr->rx_jmb = NULL;
8476 }
8477 }
8478
8479 static int tg3_rx_prodring_init(struct tg3 *tp,
8480 struct tg3_rx_prodring_set *tpr)
8481 {
8482 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8483 GFP_KERNEL);
8484 if (!tpr->rx_std_buffers)
8485 return -ENOMEM;
8486
8487 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8488 TG3_RX_STD_RING_BYTES(tp),
8489 &tpr->rx_std_mapping,
8490 GFP_KERNEL);
8491 if (!tpr->rx_std)
8492 goto err_out;
8493
8494 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8495 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8496 GFP_KERNEL);
8497 if (!tpr->rx_jmb_buffers)
8498 goto err_out;
8499
8500 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8501 TG3_RX_JMB_RING_BYTES(tp),
8502 &tpr->rx_jmb_mapping,
8503 GFP_KERNEL);
8504 if (!tpr->rx_jmb)
8505 goto err_out;
8506 }
8507
8508 return 0;
8509
8510 err_out:
8511 tg3_rx_prodring_fini(tp, tpr);
8512 return -ENOMEM;
8513 }
8514
8515 /* Free up pending packets in all rx/tx rings.
8516 *
8517 * The chip has been shut down and the driver detached from
8518 * the networking, so no interrupts or new tx packets will
8519 * end up in the driver. tp->{tx,}lock is not held and we are not
8520 * in an interrupt context and thus may sleep.
8521 */
8522 static void tg3_free_rings(struct tg3 *tp)
8523 {
8524 int i, j;
8525
8526 for (j = 0; j < tp->irq_cnt; j++) {
8527 struct tg3_napi *tnapi = &tp->napi[j];
8528
8529 tg3_rx_prodring_free(tp, &tnapi->prodring);
8530
8531 if (!tnapi->tx_buffers)
8532 continue;
8533
8534 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8535 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8536
8537 if (!skb)
8538 continue;
8539
8540 tg3_tx_skb_unmap(tnapi, i,
8541 skb_shinfo(skb)->nr_frags - 1);
8542
8543 dev_kfree_skb_any(skb);
8544 }
8545 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8546 }
8547 }
8548
8549 /* Initialize tx/rx rings for packet processing.
8550 *
8551 * The chip has been shut down and the driver detached from
8552 * the networking, so no interrupts or new tx packets will
8553 * end up in the driver. tp->{tx,}lock are held and thus
8554 * we may not sleep.
8555 */
8556 static int tg3_init_rings(struct tg3 *tp)
8557 {
8558 int i;
8559
8560 /* Free up all the SKBs. */
8561 tg3_free_rings(tp);
8562
8563 for (i = 0; i < tp->irq_cnt; i++) {
8564 struct tg3_napi *tnapi = &tp->napi[i];
8565
8566 tnapi->last_tag = 0;
8567 tnapi->last_irq_tag = 0;
8568 tnapi->hw_status->status = 0;
8569 tnapi->hw_status->status_tag = 0;
8570 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8571
8572 tnapi->tx_prod = 0;
8573 tnapi->tx_cons = 0;
8574 if (tnapi->tx_ring)
8575 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8576
8577 tnapi->rx_rcb_ptr = 0;
8578 if (tnapi->rx_rcb)
8579 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8580
8581 if (tnapi->prodring.rx_std &&
8582 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8583 tg3_free_rings(tp);
8584 return -ENOMEM;
8585 }
8586 }
8587
8588 return 0;
8589 }
8590
8591 static void tg3_mem_tx_release(struct tg3 *tp)
8592 {
8593 int i;
8594
8595 for (i = 0; i < tp->irq_max; i++) {
8596 struct tg3_napi *tnapi = &tp->napi[i];
8597
8598 if (tnapi->tx_ring) {
8599 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8600 tnapi->tx_ring, tnapi->tx_desc_mapping);
8601 tnapi->tx_ring = NULL;
8602 }
8603
8604 kfree(tnapi->tx_buffers);
8605 tnapi->tx_buffers = NULL;
8606 }
8607 }
8608
8609 static int tg3_mem_tx_acquire(struct tg3 *tp)
8610 {
8611 int i;
8612 struct tg3_napi *tnapi = &tp->napi[0];
8613
8614 /* If multivector TSS is enabled, vector 0 does not handle
8615 * tx interrupts. Don't allocate any resources for it.
8616 */
8617 if (tg3_flag(tp, ENABLE_TSS))
8618 tnapi++;
8619
8620 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8621 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8622 TG3_TX_RING_SIZE, GFP_KERNEL);
8623 if (!tnapi->tx_buffers)
8624 goto err_out;
8625
8626 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8627 TG3_TX_RING_BYTES,
8628 &tnapi->tx_desc_mapping,
8629 GFP_KERNEL);
8630 if (!tnapi->tx_ring)
8631 goto err_out;
8632 }
8633
8634 return 0;
8635
8636 err_out:
8637 tg3_mem_tx_release(tp);
8638 return -ENOMEM;
8639 }
8640
8641 static void tg3_mem_rx_release(struct tg3 *tp)
8642 {
8643 int i;
8644
8645 for (i = 0; i < tp->irq_max; i++) {
8646 struct tg3_napi *tnapi = &tp->napi[i];
8647
8648 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8649
8650 if (!tnapi->rx_rcb)
8651 continue;
8652
8653 dma_free_coherent(&tp->pdev->dev,
8654 TG3_RX_RCB_RING_BYTES(tp),
8655 tnapi->rx_rcb,
8656 tnapi->rx_rcb_mapping);
8657 tnapi->rx_rcb = NULL;
8658 }
8659 }
8660
8661 static int tg3_mem_rx_acquire(struct tg3 *tp)
8662 {
8663 unsigned int i, limit;
8664
8665 limit = tp->rxq_cnt;
8666
8667 /* If RSS is enabled, we need a (dummy) producer ring
8668 * set on vector zero. This is the true hw prodring.
8669 */
8670 if (tg3_flag(tp, ENABLE_RSS))
8671 limit++;
8672
8673 for (i = 0; i < limit; i++) {
8674 struct tg3_napi *tnapi = &tp->napi[i];
8675
8676 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8677 goto err_out;
8678
8679 /* If multivector RSS is enabled, vector 0
8680 * does not handle rx or tx interrupts.
8681 * Don't allocate any resources for it.
8682 */
8683 if (!i && tg3_flag(tp, ENABLE_RSS))
8684 continue;
8685
8686 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8687 TG3_RX_RCB_RING_BYTES(tp),
8688 &tnapi->rx_rcb_mapping,
8689 GFP_KERNEL);
8690 if (!tnapi->rx_rcb)
8691 goto err_out;
8692 }
8693
8694 return 0;
8695
8696 err_out:
8697 tg3_mem_rx_release(tp);
8698 return -ENOMEM;
8699 }
8700
8701 /*
8702 * Must not be invoked with interrupt sources disabled and
8703 * the hardware shutdown down.
8704 */
8705 static void tg3_free_consistent(struct tg3 *tp)
8706 {
8707 int i;
8708
8709 for (i = 0; i < tp->irq_cnt; i++) {
8710 struct tg3_napi *tnapi = &tp->napi[i];
8711
8712 if (tnapi->hw_status) {
8713 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8714 tnapi->hw_status,
8715 tnapi->status_mapping);
8716 tnapi->hw_status = NULL;
8717 }
8718 }
8719
8720 tg3_mem_rx_release(tp);
8721 tg3_mem_tx_release(tp);
8722
8723 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8724 tg3_full_lock(tp, 0);
8725 if (tp->hw_stats) {
8726 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8727 tp->hw_stats, tp->stats_mapping);
8728 tp->hw_stats = NULL;
8729 }
8730 tg3_full_unlock(tp);
8731 }
8732
8733 /*
8734 * Must not be invoked with interrupt sources disabled and
8735 * the hardware shutdown down. Can sleep.
8736 */
8737 static int tg3_alloc_consistent(struct tg3 *tp)
8738 {
8739 int i;
8740
8741 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8742 sizeof(struct tg3_hw_stats),
8743 &tp->stats_mapping, GFP_KERNEL);
8744 if (!tp->hw_stats)
8745 goto err_out;
8746
8747 for (i = 0; i < tp->irq_cnt; i++) {
8748 struct tg3_napi *tnapi = &tp->napi[i];
8749 struct tg3_hw_status *sblk;
8750
8751 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8752 TG3_HW_STATUS_SIZE,
8753 &tnapi->status_mapping,
8754 GFP_KERNEL);
8755 if (!tnapi->hw_status)
8756 goto err_out;
8757
8758 sblk = tnapi->hw_status;
8759
8760 if (tg3_flag(tp, ENABLE_RSS)) {
8761 u16 *prodptr = NULL;
8762
8763 /*
8764 * When RSS is enabled, the status block format changes
8765 * slightly. The "rx_jumbo_consumer", "reserved",
8766 * and "rx_mini_consumer" members get mapped to the
8767 * other three rx return ring producer indexes.
8768 */
8769 switch (i) {
8770 case 1:
8771 prodptr = &sblk->idx[0].rx_producer;
8772 break;
8773 case 2:
8774 prodptr = &sblk->rx_jumbo_consumer;
8775 break;
8776 case 3:
8777 prodptr = &sblk->reserved;
8778 break;
8779 case 4:
8780 prodptr = &sblk->rx_mini_consumer;
8781 break;
8782 }
8783 tnapi->rx_rcb_prod_idx = prodptr;
8784 } else {
8785 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8786 }
8787 }
8788
8789 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8790 goto err_out;
8791
8792 return 0;
8793
8794 err_out:
8795 tg3_free_consistent(tp);
8796 return -ENOMEM;
8797 }
8798
8799 #define MAX_WAIT_CNT 1000
8800
8801 /* To stop a block, clear the enable bit and poll till it
8802 * clears. tp->lock is held.
8803 */
8804 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8805 {
8806 unsigned int i;
8807 u32 val;
8808
8809 if (tg3_flag(tp, 5705_PLUS)) {
8810 switch (ofs) {
8811 case RCVLSC_MODE:
8812 case DMAC_MODE:
8813 case MBFREE_MODE:
8814 case BUFMGR_MODE:
8815 case MEMARB_MODE:
8816 /* We can't enable/disable these bits of the
8817 * 5705/5750, just say success.
8818 */
8819 return 0;
8820
8821 default:
8822 break;
8823 }
8824 }
8825
8826 val = tr32(ofs);
8827 val &= ~enable_bit;
8828 tw32_f(ofs, val);
8829
8830 for (i = 0; i < MAX_WAIT_CNT; i++) {
8831 if (pci_channel_offline(tp->pdev)) {
8832 dev_err(&tp->pdev->dev,
8833 "tg3_stop_block device offline, "
8834 "ofs=%lx enable_bit=%x\n",
8835 ofs, enable_bit);
8836 return -ENODEV;
8837 }
8838
8839 udelay(100);
8840 val = tr32(ofs);
8841 if ((val & enable_bit) == 0)
8842 break;
8843 }
8844
8845 if (i == MAX_WAIT_CNT && !silent) {
8846 dev_err(&tp->pdev->dev,
8847 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8848 ofs, enable_bit);
8849 return -ENODEV;
8850 }
8851
8852 return 0;
8853 }
8854
8855 /* tp->lock is held. */
8856 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8857 {
8858 int i, err;
8859
8860 tg3_disable_ints(tp);
8861
8862 if (pci_channel_offline(tp->pdev)) {
8863 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8864 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8865 err = -ENODEV;
8866 goto err_no_dev;
8867 }
8868
8869 tp->rx_mode &= ~RX_MODE_ENABLE;
8870 tw32_f(MAC_RX_MODE, tp->rx_mode);
8871 udelay(10);
8872
8873 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8874 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8875 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8876 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8877 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8879
8880 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8887
8888 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8889 tw32_f(MAC_MODE, tp->mac_mode);
8890 udelay(40);
8891
8892 tp->tx_mode &= ~TX_MODE_ENABLE;
8893 tw32_f(MAC_TX_MODE, tp->tx_mode);
8894
8895 for (i = 0; i < MAX_WAIT_CNT; i++) {
8896 udelay(100);
8897 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8898 break;
8899 }
8900 if (i >= MAX_WAIT_CNT) {
8901 dev_err(&tp->pdev->dev,
8902 "%s timed out, TX_MODE_ENABLE will not clear "
8903 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8904 err |= -ENODEV;
8905 }
8906
8907 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8908 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8909 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8910
8911 tw32(FTQ_RESET, 0xffffffff);
8912 tw32(FTQ_RESET, 0x00000000);
8913
8914 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8915 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8916
8917 err_no_dev:
8918 for (i = 0; i < tp->irq_cnt; i++) {
8919 struct tg3_napi *tnapi = &tp->napi[i];
8920 if (tnapi->hw_status)
8921 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8922 }
8923
8924 return err;
8925 }
8926
8927 /* Save PCI command register before chip reset */
8928 static void tg3_save_pci_state(struct tg3 *tp)
8929 {
8930 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8931 }
8932
8933 /* Restore PCI state after chip reset */
8934 static void tg3_restore_pci_state(struct tg3 *tp)
8935 {
8936 u32 val;
8937
8938 /* Re-enable indirect register accesses. */
8939 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8940 tp->misc_host_ctrl);
8941
8942 /* Set MAX PCI retry to zero. */
8943 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8944 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8945 tg3_flag(tp, PCIX_MODE))
8946 val |= PCISTATE_RETRY_SAME_DMA;
8947 /* Allow reads and writes to the APE register and memory space. */
8948 if (tg3_flag(tp, ENABLE_APE))
8949 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8950 PCISTATE_ALLOW_APE_SHMEM_WR |
8951 PCISTATE_ALLOW_APE_PSPACE_WR;
8952 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8953
8954 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8955
8956 if (!tg3_flag(tp, PCI_EXPRESS)) {
8957 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8958 tp->pci_cacheline_sz);
8959 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8960 tp->pci_lat_timer);
8961 }
8962
8963 /* Make sure PCI-X relaxed ordering bit is clear. */
8964 if (tg3_flag(tp, PCIX_MODE)) {
8965 u16 pcix_cmd;
8966
8967 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8968 &pcix_cmd);
8969 pcix_cmd &= ~PCI_X_CMD_ERO;
8970 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971 pcix_cmd);
8972 }
8973
8974 if (tg3_flag(tp, 5780_CLASS)) {
8975
8976 /* Chip reset on 5780 will reset MSI enable bit,
8977 * so need to restore it.
8978 */
8979 if (tg3_flag(tp, USING_MSI)) {
8980 u16 ctrl;
8981
8982 pci_read_config_word(tp->pdev,
8983 tp->msi_cap + PCI_MSI_FLAGS,
8984 &ctrl);
8985 pci_write_config_word(tp->pdev,
8986 tp->msi_cap + PCI_MSI_FLAGS,
8987 ctrl | PCI_MSI_FLAGS_ENABLE);
8988 val = tr32(MSGINT_MODE);
8989 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8990 }
8991 }
8992 }
8993
8994 static void tg3_override_clk(struct tg3 *tp)
8995 {
8996 u32 val;
8997
8998 switch (tg3_asic_rev(tp)) {
8999 case ASIC_REV_5717:
9000 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9001 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9002 TG3_CPMU_MAC_ORIDE_ENABLE);
9003 break;
9004
9005 case ASIC_REV_5719:
9006 case ASIC_REV_5720:
9007 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9008 break;
9009
9010 default:
9011 return;
9012 }
9013 }
9014
9015 static void tg3_restore_clk(struct tg3 *tp)
9016 {
9017 u32 val;
9018
9019 switch (tg3_asic_rev(tp)) {
9020 case ASIC_REV_5717:
9021 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9022 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9023 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9024 break;
9025
9026 case ASIC_REV_5719:
9027 case ASIC_REV_5720:
9028 val = tr32(TG3_CPMU_CLCK_ORIDE);
9029 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9030 break;
9031
9032 default:
9033 return;
9034 }
9035 }
9036
9037 /* tp->lock is held. */
9038 static int tg3_chip_reset(struct tg3 *tp)
9039 __releases(tp->lock)
9040 __acquires(tp->lock)
9041 {
9042 u32 val;
9043 void (*write_op)(struct tg3 *, u32, u32);
9044 int i, err;
9045
9046 if (!pci_device_is_present(tp->pdev))
9047 return -ENODEV;
9048
9049 tg3_nvram_lock(tp);
9050
9051 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9052
9053 /* No matching tg3_nvram_unlock() after this because
9054 * chip reset below will undo the nvram lock.
9055 */
9056 tp->nvram_lock_cnt = 0;
9057
9058 /* GRC_MISC_CFG core clock reset will clear the memory
9059 * enable bit in PCI register 4 and the MSI enable bit
9060 * on some chips, so we save relevant registers here.
9061 */
9062 tg3_save_pci_state(tp);
9063
9064 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9065 tg3_flag(tp, 5755_PLUS))
9066 tw32(GRC_FASTBOOT_PC, 0);
9067
9068 /*
9069 * We must avoid the readl() that normally takes place.
9070 * It locks machines, causes machine checks, and other
9071 * fun things. So, temporarily disable the 5701
9072 * hardware workaround, while we do the reset.
9073 */
9074 write_op = tp->write32;
9075 if (write_op == tg3_write_flush_reg32)
9076 tp->write32 = tg3_write32;
9077
9078 /* Prevent the irq handler from reading or writing PCI registers
9079 * during chip reset when the memory enable bit in the PCI command
9080 * register may be cleared. The chip does not generate interrupt
9081 * at this time, but the irq handler may still be called due to irq
9082 * sharing or irqpoll.
9083 */
9084 tg3_flag_set(tp, CHIP_RESETTING);
9085 for (i = 0; i < tp->irq_cnt; i++) {
9086 struct tg3_napi *tnapi = &tp->napi[i];
9087 if (tnapi->hw_status) {
9088 tnapi->hw_status->status = 0;
9089 tnapi->hw_status->status_tag = 0;
9090 }
9091 tnapi->last_tag = 0;
9092 tnapi->last_irq_tag = 0;
9093 }
9094 smp_mb();
9095
9096 tg3_full_unlock(tp);
9097
9098 for (i = 0; i < tp->irq_cnt; i++)
9099 synchronize_irq(tp->napi[i].irq_vec);
9100
9101 tg3_full_lock(tp, 0);
9102
9103 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9104 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9105 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9106 }
9107
9108 /* do the reset */
9109 val = GRC_MISC_CFG_CORECLK_RESET;
9110
9111 if (tg3_flag(tp, PCI_EXPRESS)) {
9112 /* Force PCIe 1.0a mode */
9113 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9114 !tg3_flag(tp, 57765_PLUS) &&
9115 tr32(TG3_PCIE_PHY_TSTCTL) ==
9116 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9117 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9118
9119 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9120 tw32(GRC_MISC_CFG, (1 << 29));
9121 val |= (1 << 29);
9122 }
9123 }
9124
9125 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9126 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9127 tw32(GRC_VCPU_EXT_CTRL,
9128 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9129 }
9130
9131 /* Set the clock to the highest frequency to avoid timeouts. With link
9132 * aware mode, the clock speed could be slow and bootcode does not
9133 * complete within the expected time. Override the clock to allow the
9134 * bootcode to finish sooner and then restore it.
9135 */
9136 tg3_override_clk(tp);
9137
9138 /* Manage gphy power for all CPMU absent PCIe devices. */
9139 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9140 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9141
9142 tw32(GRC_MISC_CFG, val);
9143
9144 /* restore 5701 hardware bug workaround write method */
9145 tp->write32 = write_op;
9146
9147 /* Unfortunately, we have to delay before the PCI read back.
9148 * Some 575X chips even will not respond to a PCI cfg access
9149 * when the reset command is given to the chip.
9150 *
9151 * How do these hardware designers expect things to work
9152 * properly if the PCI write is posted for a long period
9153 * of time? It is always necessary to have some method by
9154 * which a register read back can occur to push the write
9155 * out which does the reset.
9156 *
9157 * For most tg3 variants the trick below was working.
9158 * Ho hum...
9159 */
9160 udelay(120);
9161
9162 /* Flush PCI posted writes. The normal MMIO registers
9163 * are inaccessible at this time so this is the only
9164 * way to make this reliably (actually, this is no longer
9165 * the case, see above). I tried to use indirect
9166 * register read/write but this upset some 5701 variants.
9167 */
9168 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9169
9170 udelay(120);
9171
9172 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9173 u16 val16;
9174
9175 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9176 int j;
9177 u32 cfg_val;
9178
9179 /* Wait for link training to complete. */
9180 for (j = 0; j < 5000; j++)
9181 udelay(100);
9182
9183 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9184 pci_write_config_dword(tp->pdev, 0xc4,
9185 cfg_val | (1 << 15));
9186 }
9187
9188 /* Clear the "no snoop" and "relaxed ordering" bits. */
9189 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9190 /*
9191 * Older PCIe devices only support the 128 byte
9192 * MPS setting. Enforce the restriction.
9193 */
9194 if (!tg3_flag(tp, CPMU_PRESENT))
9195 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9196 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9197
9198 /* Clear error status */
9199 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9200 PCI_EXP_DEVSTA_CED |
9201 PCI_EXP_DEVSTA_NFED |
9202 PCI_EXP_DEVSTA_FED |
9203 PCI_EXP_DEVSTA_URD);
9204 }
9205
9206 tg3_restore_pci_state(tp);
9207
9208 tg3_flag_clear(tp, CHIP_RESETTING);
9209 tg3_flag_clear(tp, ERROR_PROCESSED);
9210
9211 val = 0;
9212 if (tg3_flag(tp, 5780_CLASS))
9213 val = tr32(MEMARB_MODE);
9214 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9215
9216 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9217 tg3_stop_fw(tp);
9218 tw32(0x5000, 0x400);
9219 }
9220
9221 if (tg3_flag(tp, IS_SSB_CORE)) {
9222 /*
9223 * BCM4785: In order to avoid repercussions from using
9224 * potentially defective internal ROM, stop the Rx RISC CPU,
9225 * which is not required.
9226 */
9227 tg3_stop_fw(tp);
9228 tg3_halt_cpu(tp, RX_CPU_BASE);
9229 }
9230
9231 err = tg3_poll_fw(tp);
9232 if (err)
9233 return err;
9234
9235 tw32(GRC_MODE, tp->grc_mode);
9236
9237 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9238 val = tr32(0xc4);
9239
9240 tw32(0xc4, val | (1 << 15));
9241 }
9242
9243 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9244 tg3_asic_rev(tp) == ASIC_REV_5705) {
9245 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9246 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9247 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9248 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9249 }
9250
9251 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9252 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9253 val = tp->mac_mode;
9254 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9255 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9256 val = tp->mac_mode;
9257 } else
9258 val = 0;
9259
9260 tw32_f(MAC_MODE, val);
9261 udelay(40);
9262
9263 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9264
9265 tg3_mdio_start(tp);
9266
9267 if (tg3_flag(tp, PCI_EXPRESS) &&
9268 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9269 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9270 !tg3_flag(tp, 57765_PLUS)) {
9271 val = tr32(0x7c00);
9272
9273 tw32(0x7c00, val | (1 << 25));
9274 }
9275
9276 tg3_restore_clk(tp);
9277
9278 /* Reprobe ASF enable state. */
9279 tg3_flag_clear(tp, ENABLE_ASF);
9280 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9281 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9282
9283 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9284 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9285 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9286 u32 nic_cfg;
9287
9288 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9289 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9290 tg3_flag_set(tp, ENABLE_ASF);
9291 tp->last_event_jiffies = jiffies;
9292 if (tg3_flag(tp, 5750_PLUS))
9293 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9294
9295 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9296 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9297 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9298 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9299 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9300 }
9301 }
9302
9303 return 0;
9304 }
9305
9306 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9307 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9308 static void __tg3_set_rx_mode(struct net_device *);
9309
9310 /* tp->lock is held. */
9311 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9312 {
9313 int err;
9314
9315 tg3_stop_fw(tp);
9316
9317 tg3_write_sig_pre_reset(tp, kind);
9318
9319 tg3_abort_hw(tp, silent);
9320 err = tg3_chip_reset(tp);
9321
9322 __tg3_set_mac_addr(tp, false);
9323
9324 tg3_write_sig_legacy(tp, kind);
9325 tg3_write_sig_post_reset(tp, kind);
9326
9327 if (tp->hw_stats) {
9328 /* Save the stats across chip resets... */
9329 tg3_get_nstats(tp, &tp->net_stats_prev);
9330 tg3_get_estats(tp, &tp->estats_prev);
9331
9332 /* And make sure the next sample is new data */
9333 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9334 }
9335
9336 return err;
9337 }
9338
9339 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9340 {
9341 struct tg3 *tp = netdev_priv(dev);
9342 struct sockaddr *addr = p;
9343 int err = 0;
9344 bool skip_mac_1 = false;
9345
9346 if (!is_valid_ether_addr(addr->sa_data))
9347 return -EADDRNOTAVAIL;
9348
9349 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9350
9351 if (!netif_running(dev))
9352 return 0;
9353
9354 if (tg3_flag(tp, ENABLE_ASF)) {
9355 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9356
9357 addr0_high = tr32(MAC_ADDR_0_HIGH);
9358 addr0_low = tr32(MAC_ADDR_0_LOW);
9359 addr1_high = tr32(MAC_ADDR_1_HIGH);
9360 addr1_low = tr32(MAC_ADDR_1_LOW);
9361
9362 /* Skip MAC addr 1 if ASF is using it. */
9363 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9364 !(addr1_high == 0 && addr1_low == 0))
9365 skip_mac_1 = true;
9366 }
9367 spin_lock_bh(&tp->lock);
9368 __tg3_set_mac_addr(tp, skip_mac_1);
9369 __tg3_set_rx_mode(dev);
9370 spin_unlock_bh(&tp->lock);
9371
9372 return err;
9373 }
9374
9375 /* tp->lock is held. */
9376 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9377 dma_addr_t mapping, u32 maxlen_flags,
9378 u32 nic_addr)
9379 {
9380 tg3_write_mem(tp,
9381 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9382 ((u64) mapping >> 32));
9383 tg3_write_mem(tp,
9384 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9385 ((u64) mapping & 0xffffffff));
9386 tg3_write_mem(tp,
9387 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9388 maxlen_flags);
9389
9390 if (!tg3_flag(tp, 5705_PLUS))
9391 tg3_write_mem(tp,
9392 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9393 nic_addr);
9394 }
9395
9396
9397 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9398 {
9399 int i = 0;
9400
9401 if (!tg3_flag(tp, ENABLE_TSS)) {
9402 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9403 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9404 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9405 } else {
9406 tw32(HOSTCC_TXCOL_TICKS, 0);
9407 tw32(HOSTCC_TXMAX_FRAMES, 0);
9408 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9409
9410 for (; i < tp->txq_cnt; i++) {
9411 u32 reg;
9412
9413 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9414 tw32(reg, ec->tx_coalesce_usecs);
9415 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9416 tw32(reg, ec->tx_max_coalesced_frames);
9417 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9418 tw32(reg, ec->tx_max_coalesced_frames_irq);
9419 }
9420 }
9421
9422 for (; i < tp->irq_max - 1; i++) {
9423 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9424 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9425 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9426 }
9427 }
9428
9429 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9430 {
9431 int i = 0;
9432 u32 limit = tp->rxq_cnt;
9433
9434 if (!tg3_flag(tp, ENABLE_RSS)) {
9435 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9436 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9437 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9438 limit--;
9439 } else {
9440 tw32(HOSTCC_RXCOL_TICKS, 0);
9441 tw32(HOSTCC_RXMAX_FRAMES, 0);
9442 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9443 }
9444
9445 for (; i < limit; i++) {
9446 u32 reg;
9447
9448 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9449 tw32(reg, ec->rx_coalesce_usecs);
9450 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9451 tw32(reg, ec->rx_max_coalesced_frames);
9452 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9453 tw32(reg, ec->rx_max_coalesced_frames_irq);
9454 }
9455
9456 for (; i < tp->irq_max - 1; i++) {
9457 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9458 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9459 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9460 }
9461 }
9462
9463 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9464 {
9465 tg3_coal_tx_init(tp, ec);
9466 tg3_coal_rx_init(tp, ec);
9467
9468 if (!tg3_flag(tp, 5705_PLUS)) {
9469 u32 val = ec->stats_block_coalesce_usecs;
9470
9471 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9472 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9473
9474 if (!tp->link_up)
9475 val = 0;
9476
9477 tw32(HOSTCC_STAT_COAL_TICKS, val);
9478 }
9479 }
9480
9481 /* tp->lock is held. */
9482 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9483 {
9484 u32 txrcb, limit;
9485
9486 /* Disable all transmit rings but the first. */
9487 if (!tg3_flag(tp, 5705_PLUS))
9488 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9489 else if (tg3_flag(tp, 5717_PLUS))
9490 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9491 else if (tg3_flag(tp, 57765_CLASS) ||
9492 tg3_asic_rev(tp) == ASIC_REV_5762)
9493 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9494 else
9495 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9496
9497 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9498 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9499 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9500 BDINFO_FLAGS_DISABLED);
9501 }
9502
9503 /* tp->lock is held. */
9504 static void tg3_tx_rcbs_init(struct tg3 *tp)
9505 {
9506 int i = 0;
9507 u32 txrcb = NIC_SRAM_SEND_RCB;
9508
9509 if (tg3_flag(tp, ENABLE_TSS))
9510 i++;
9511
9512 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9513 struct tg3_napi *tnapi = &tp->napi[i];
9514
9515 if (!tnapi->tx_ring)
9516 continue;
9517
9518 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9519 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9520 NIC_SRAM_TX_BUFFER_DESC);
9521 }
9522 }
9523
9524 /* tp->lock is held. */
9525 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9526 {
9527 u32 rxrcb, limit;
9528
9529 /* Disable all receive return rings but the first. */
9530 if (tg3_flag(tp, 5717_PLUS))
9531 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9532 else if (!tg3_flag(tp, 5705_PLUS))
9533 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9534 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9535 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9536 tg3_flag(tp, 57765_CLASS))
9537 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9538 else
9539 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9540
9541 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9542 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9543 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9544 BDINFO_FLAGS_DISABLED);
9545 }
9546
9547 /* tp->lock is held. */
9548 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9549 {
9550 int i = 0;
9551 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9552
9553 if (tg3_flag(tp, ENABLE_RSS))
9554 i++;
9555
9556 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9557 struct tg3_napi *tnapi = &tp->napi[i];
9558
9559 if (!tnapi->rx_rcb)
9560 continue;
9561
9562 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9563 (tp->rx_ret_ring_mask + 1) <<
9564 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9565 }
9566 }
9567
9568 /* tp->lock is held. */
9569 static void tg3_rings_reset(struct tg3 *tp)
9570 {
9571 int i;
9572 u32 stblk;
9573 struct tg3_napi *tnapi = &tp->napi[0];
9574
9575 tg3_tx_rcbs_disable(tp);
9576
9577 tg3_rx_ret_rcbs_disable(tp);
9578
9579 /* Disable interrupts */
9580 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9581 tp->napi[0].chk_msi_cnt = 0;
9582 tp->napi[0].last_rx_cons = 0;
9583 tp->napi[0].last_tx_cons = 0;
9584
9585 /* Zero mailbox registers. */
9586 if (tg3_flag(tp, SUPPORT_MSIX)) {
9587 for (i = 1; i < tp->irq_max; i++) {
9588 tp->napi[i].tx_prod = 0;
9589 tp->napi[i].tx_cons = 0;
9590 if (tg3_flag(tp, ENABLE_TSS))
9591 tw32_mailbox(tp->napi[i].prodmbox, 0);
9592 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9593 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9594 tp->napi[i].chk_msi_cnt = 0;
9595 tp->napi[i].last_rx_cons = 0;
9596 tp->napi[i].last_tx_cons = 0;
9597 }
9598 if (!tg3_flag(tp, ENABLE_TSS))
9599 tw32_mailbox(tp->napi[0].prodmbox, 0);
9600 } else {
9601 tp->napi[0].tx_prod = 0;
9602 tp->napi[0].tx_cons = 0;
9603 tw32_mailbox(tp->napi[0].prodmbox, 0);
9604 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9605 }
9606
9607 /* Make sure the NIC-based send BD rings are disabled. */
9608 if (!tg3_flag(tp, 5705_PLUS)) {
9609 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9610 for (i = 0; i < 16; i++)
9611 tw32_tx_mbox(mbox + i * 8, 0);
9612 }
9613
9614 /* Clear status block in ram. */
9615 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9616
9617 /* Set status block DMA address */
9618 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9619 ((u64) tnapi->status_mapping >> 32));
9620 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9621 ((u64) tnapi->status_mapping & 0xffffffff));
9622
9623 stblk = HOSTCC_STATBLCK_RING1;
9624
9625 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9626 u64 mapping = (u64)tnapi->status_mapping;
9627 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9628 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9629 stblk += 8;
9630
9631 /* Clear status block in ram. */
9632 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9633 }
9634
9635 tg3_tx_rcbs_init(tp);
9636 tg3_rx_ret_rcbs_init(tp);
9637 }
9638
9639 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9640 {
9641 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9642
9643 if (!tg3_flag(tp, 5750_PLUS) ||
9644 tg3_flag(tp, 5780_CLASS) ||
9645 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9646 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9647 tg3_flag(tp, 57765_PLUS))
9648 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9649 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9650 tg3_asic_rev(tp) == ASIC_REV_5787)
9651 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9652 else
9653 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9654
9655 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9656 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9657
9658 val = min(nic_rep_thresh, host_rep_thresh);
9659 tw32(RCVBDI_STD_THRESH, val);
9660
9661 if (tg3_flag(tp, 57765_PLUS))
9662 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9663
9664 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9665 return;
9666
9667 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9668
9669 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9670
9671 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9672 tw32(RCVBDI_JUMBO_THRESH, val);
9673
9674 if (tg3_flag(tp, 57765_PLUS))
9675 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9676 }
9677
9678 static inline u32 calc_crc(unsigned char *buf, int len)
9679 {
9680 u32 reg;
9681 u32 tmp;
9682 int j, k;
9683
9684 reg = 0xffffffff;
9685
9686 for (j = 0; j < len; j++) {
9687 reg ^= buf[j];
9688
9689 for (k = 0; k < 8; k++) {
9690 tmp = reg & 0x01;
9691
9692 reg >>= 1;
9693
9694 if (tmp)
9695 reg ^= 0xedb88320;
9696 }
9697 }
9698
9699 return ~reg;
9700 }
9701
9702 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9703 {
9704 /* accept or reject all multicast frames */
9705 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9706 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9707 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9708 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9709 }
9710
9711 static void __tg3_set_rx_mode(struct net_device *dev)
9712 {
9713 struct tg3 *tp = netdev_priv(dev);
9714 u32 rx_mode;
9715
9716 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9717 RX_MODE_KEEP_VLAN_TAG);
9718
9719 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9720 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9721 * flag clear.
9722 */
9723 if (!tg3_flag(tp, ENABLE_ASF))
9724 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9725 #endif
9726
9727 if (dev->flags & IFF_PROMISC) {
9728 /* Promiscuous mode. */
9729 rx_mode |= RX_MODE_PROMISC;
9730 } else if (dev->flags & IFF_ALLMULTI) {
9731 /* Accept all multicast. */
9732 tg3_set_multi(tp, 1);
9733 } else if (netdev_mc_empty(dev)) {
9734 /* Reject all multicast. */
9735 tg3_set_multi(tp, 0);
9736 } else {
9737 /* Accept one or more multicast(s). */
9738 struct netdev_hw_addr *ha;
9739 u32 mc_filter[4] = { 0, };
9740 u32 regidx;
9741 u32 bit;
9742 u32 crc;
9743
9744 netdev_for_each_mc_addr(ha, dev) {
9745 crc = calc_crc(ha->addr, ETH_ALEN);
9746 bit = ~crc & 0x7f;
9747 regidx = (bit & 0x60) >> 5;
9748 bit &= 0x1f;
9749 mc_filter[regidx] |= (1 << bit);
9750 }
9751
9752 tw32(MAC_HASH_REG_0, mc_filter[0]);
9753 tw32(MAC_HASH_REG_1, mc_filter[1]);
9754 tw32(MAC_HASH_REG_2, mc_filter[2]);
9755 tw32(MAC_HASH_REG_3, mc_filter[3]);
9756 }
9757
9758 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9759 rx_mode |= RX_MODE_PROMISC;
9760 } else if (!(dev->flags & IFF_PROMISC)) {
9761 /* Add all entries into to the mac addr filter list */
9762 int i = 0;
9763 struct netdev_hw_addr *ha;
9764
9765 netdev_for_each_uc_addr(ha, dev) {
9766 __tg3_set_one_mac_addr(tp, ha->addr,
9767 i + TG3_UCAST_ADDR_IDX(tp));
9768 i++;
9769 }
9770 }
9771
9772 if (rx_mode != tp->rx_mode) {
9773 tp->rx_mode = rx_mode;
9774 tw32_f(MAC_RX_MODE, rx_mode);
9775 udelay(10);
9776 }
9777 }
9778
9779 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9780 {
9781 int i;
9782
9783 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9784 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9785 }
9786
9787 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9788 {
9789 int i;
9790
9791 if (!tg3_flag(tp, SUPPORT_MSIX))
9792 return;
9793
9794 if (tp->rxq_cnt == 1) {
9795 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9796 return;
9797 }
9798
9799 /* Validate table against current IRQ count */
9800 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9801 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9802 break;
9803 }
9804
9805 if (i != TG3_RSS_INDIR_TBL_SIZE)
9806 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9807 }
9808
9809 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9810 {
9811 int i = 0;
9812 u32 reg = MAC_RSS_INDIR_TBL_0;
9813
9814 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9815 u32 val = tp->rss_ind_tbl[i];
9816 i++;
9817 for (; i % 8; i++) {
9818 val <<= 4;
9819 val |= tp->rss_ind_tbl[i];
9820 }
9821 tw32(reg, val);
9822 reg += 4;
9823 }
9824 }
9825
9826 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9827 {
9828 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9829 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9830 else
9831 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9832 }
9833
9834 /* tp->lock is held. */
9835 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9836 {
9837 u32 val, rdmac_mode;
9838 int i, err, limit;
9839 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9840
9841 tg3_disable_ints(tp);
9842
9843 tg3_stop_fw(tp);
9844
9845 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9846
9847 if (tg3_flag(tp, INIT_COMPLETE))
9848 tg3_abort_hw(tp, 1);
9849
9850 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9851 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9852 tg3_phy_pull_config(tp);
9853 tg3_eee_pull_config(tp, NULL);
9854 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9855 }
9856
9857 /* Enable MAC control of LPI */
9858 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9859 tg3_setup_eee(tp);
9860
9861 if (reset_phy)
9862 tg3_phy_reset(tp);
9863
9864 err = tg3_chip_reset(tp);
9865 if (err)
9866 return err;
9867
9868 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9869
9870 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9871 val = tr32(TG3_CPMU_CTRL);
9872 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9873 tw32(TG3_CPMU_CTRL, val);
9874
9875 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9876 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9877 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9878 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9879
9880 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9881 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9882 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9883 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9884
9885 val = tr32(TG3_CPMU_HST_ACC);
9886 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9887 val |= CPMU_HST_ACC_MACCLK_6_25;
9888 tw32(TG3_CPMU_HST_ACC, val);
9889 }
9890
9891 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9892 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9893 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9894 PCIE_PWR_MGMT_L1_THRESH_4MS;
9895 tw32(PCIE_PWR_MGMT_THRESH, val);
9896
9897 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9898 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9899
9900 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9901
9902 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9903 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9904 }
9905
9906 if (tg3_flag(tp, L1PLLPD_EN)) {
9907 u32 grc_mode = tr32(GRC_MODE);
9908
9909 /* Access the lower 1K of PL PCIE block registers. */
9910 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9911 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9912
9913 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9914 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9915 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9916
9917 tw32(GRC_MODE, grc_mode);
9918 }
9919
9920 if (tg3_flag(tp, 57765_CLASS)) {
9921 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9922 u32 grc_mode = tr32(GRC_MODE);
9923
9924 /* Access the lower 1K of PL PCIE block registers. */
9925 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9926 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9927
9928 val = tr32(TG3_PCIE_TLDLPL_PORT +
9929 TG3_PCIE_PL_LO_PHYCTL5);
9930 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9931 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9932
9933 tw32(GRC_MODE, grc_mode);
9934 }
9935
9936 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9937 u32 grc_mode;
9938
9939 /* Fix transmit hangs */
9940 val = tr32(TG3_CPMU_PADRNG_CTL);
9941 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9942 tw32(TG3_CPMU_PADRNG_CTL, val);
9943
9944 grc_mode = tr32(GRC_MODE);
9945
9946 /* Access the lower 1K of DL PCIE block registers. */
9947 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9948 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9949
9950 val = tr32(TG3_PCIE_TLDLPL_PORT +
9951 TG3_PCIE_DL_LO_FTSMAX);
9952 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9953 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9954 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9955
9956 tw32(GRC_MODE, grc_mode);
9957 }
9958
9959 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9960 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9961 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9962 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9963 }
9964
9965 /* This works around an issue with Athlon chipsets on
9966 * B3 tigon3 silicon. This bit has no effect on any
9967 * other revision. But do not set this on PCI Express
9968 * chips and don't even touch the clocks if the CPMU is present.
9969 */
9970 if (!tg3_flag(tp, CPMU_PRESENT)) {
9971 if (!tg3_flag(tp, PCI_EXPRESS))
9972 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9973 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9974 }
9975
9976 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9977 tg3_flag(tp, PCIX_MODE)) {
9978 val = tr32(TG3PCI_PCISTATE);
9979 val |= PCISTATE_RETRY_SAME_DMA;
9980 tw32(TG3PCI_PCISTATE, val);
9981 }
9982
9983 if (tg3_flag(tp, ENABLE_APE)) {
9984 /* Allow reads and writes to the
9985 * APE register and memory space.
9986 */
9987 val = tr32(TG3PCI_PCISTATE);
9988 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9989 PCISTATE_ALLOW_APE_SHMEM_WR |
9990 PCISTATE_ALLOW_APE_PSPACE_WR;
9991 tw32(TG3PCI_PCISTATE, val);
9992 }
9993
9994 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9995 /* Enable some hw fixes. */
9996 val = tr32(TG3PCI_MSI_DATA);
9997 val |= (1 << 26) | (1 << 28) | (1 << 29);
9998 tw32(TG3PCI_MSI_DATA, val);
9999 }
10000
10001 /* Descriptor ring init may make accesses to the
10002 * NIC SRAM area to setup the TX descriptors, so we
10003 * can only do this after the hardware has been
10004 * successfully reset.
10005 */
10006 err = tg3_init_rings(tp);
10007 if (err)
10008 return err;
10009
10010 if (tg3_flag(tp, 57765_PLUS)) {
10011 val = tr32(TG3PCI_DMA_RW_CTRL) &
10012 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10013 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10014 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10015 if (!tg3_flag(tp, 57765_CLASS) &&
10016 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10017 tg3_asic_rev(tp) != ASIC_REV_5762)
10018 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10019 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10020 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10021 tg3_asic_rev(tp) != ASIC_REV_5761) {
10022 /* This value is determined during the probe time DMA
10023 * engine test, tg3_test_dma.
10024 */
10025 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10026 }
10027
10028 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10029 GRC_MODE_4X_NIC_SEND_RINGS |
10030 GRC_MODE_NO_TX_PHDR_CSUM |
10031 GRC_MODE_NO_RX_PHDR_CSUM);
10032 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10033
10034 /* Pseudo-header checksum is done by hardware logic and not
10035 * the offload processers, so make the chip do the pseudo-
10036 * header checksums on receive. For transmit it is more
10037 * convenient to do the pseudo-header checksum in software
10038 * as Linux does that on transmit for us in all cases.
10039 */
10040 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10041
10042 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10043 if (tp->rxptpctl)
10044 tw32(TG3_RX_PTP_CTL,
10045 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10046
10047 if (tg3_flag(tp, PTP_CAPABLE))
10048 val |= GRC_MODE_TIME_SYNC_ENABLE;
10049
10050 tw32(GRC_MODE, tp->grc_mode | val);
10051
10052 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10053 val = tr32(GRC_MISC_CFG);
10054 val &= ~0xff;
10055 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10056 tw32(GRC_MISC_CFG, val);
10057
10058 /* Initialize MBUF/DESC pool. */
10059 if (tg3_flag(tp, 5750_PLUS)) {
10060 /* Do nothing. */
10061 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10062 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10063 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10064 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10065 else
10066 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10067 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10068 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10069 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10070 int fw_len;
10071
10072 fw_len = tp->fw_len;
10073 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10074 tw32(BUFMGR_MB_POOL_ADDR,
10075 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10076 tw32(BUFMGR_MB_POOL_SIZE,
10077 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10078 }
10079
10080 if (tp->dev->mtu <= ETH_DATA_LEN) {
10081 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10082 tp->bufmgr_config.mbuf_read_dma_low_water);
10083 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10084 tp->bufmgr_config.mbuf_mac_rx_low_water);
10085 tw32(BUFMGR_MB_HIGH_WATER,
10086 tp->bufmgr_config.mbuf_high_water);
10087 } else {
10088 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10089 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10090 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10091 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10092 tw32(BUFMGR_MB_HIGH_WATER,
10093 tp->bufmgr_config.mbuf_high_water_jumbo);
10094 }
10095 tw32(BUFMGR_DMA_LOW_WATER,
10096 tp->bufmgr_config.dma_low_water);
10097 tw32(BUFMGR_DMA_HIGH_WATER,
10098 tp->bufmgr_config.dma_high_water);
10099
10100 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10101 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10102 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10103 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10104 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10105 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10106 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10107 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10108 tw32(BUFMGR_MODE, val);
10109 for (i = 0; i < 2000; i++) {
10110 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10111 break;
10112 udelay(10);
10113 }
10114 if (i >= 2000) {
10115 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10116 return -ENODEV;
10117 }
10118
10119 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10120 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10121
10122 tg3_setup_rxbd_thresholds(tp);
10123
10124 /* Initialize TG3_BDINFO's at:
10125 * RCVDBDI_STD_BD: standard eth size rx ring
10126 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10127 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10128 *
10129 * like so:
10130 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10131 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10132 * ring attribute flags
10133 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10134 *
10135 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10136 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10137 *
10138 * The size of each ring is fixed in the firmware, but the location is
10139 * configurable.
10140 */
10141 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10142 ((u64) tpr->rx_std_mapping >> 32));
10143 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10144 ((u64) tpr->rx_std_mapping & 0xffffffff));
10145 if (!tg3_flag(tp, 5717_PLUS))
10146 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10147 NIC_SRAM_RX_BUFFER_DESC);
10148
10149 /* Disable the mini ring */
10150 if (!tg3_flag(tp, 5705_PLUS))
10151 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10152 BDINFO_FLAGS_DISABLED);
10153
10154 /* Program the jumbo buffer descriptor ring control
10155 * blocks on those devices that have them.
10156 */
10157 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10158 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10159
10160 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10161 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10162 ((u64) tpr->rx_jmb_mapping >> 32));
10163 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10164 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10165 val = TG3_RX_JMB_RING_SIZE(tp) <<
10166 BDINFO_FLAGS_MAXLEN_SHIFT;
10167 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10168 val | BDINFO_FLAGS_USE_EXT_RECV);
10169 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10170 tg3_flag(tp, 57765_CLASS) ||
10171 tg3_asic_rev(tp) == ASIC_REV_5762)
10172 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10173 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10174 } else {
10175 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10176 BDINFO_FLAGS_DISABLED);
10177 }
10178
10179 if (tg3_flag(tp, 57765_PLUS)) {
10180 val = TG3_RX_STD_RING_SIZE(tp);
10181 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10182 val |= (TG3_RX_STD_DMA_SZ << 2);
10183 } else
10184 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10185 } else
10186 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10187
10188 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10189
10190 tpr->rx_std_prod_idx = tp->rx_pending;
10191 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10192
10193 tpr->rx_jmb_prod_idx =
10194 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10195 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10196
10197 tg3_rings_reset(tp);
10198
10199 /* Initialize MAC address and backoff seed. */
10200 __tg3_set_mac_addr(tp, false);
10201
10202 /* MTU + ethernet header + FCS + optional VLAN tag */
10203 tw32(MAC_RX_MTU_SIZE,
10204 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10205
10206 /* The slot time is changed by tg3_setup_phy if we
10207 * run at gigabit with half duplex.
10208 */
10209 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10210 (6 << TX_LENGTHS_IPG_SHIFT) |
10211 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10212
10213 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10214 tg3_asic_rev(tp) == ASIC_REV_5762)
10215 val |= tr32(MAC_TX_LENGTHS) &
10216 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10217 TX_LENGTHS_CNT_DWN_VAL_MSK);
10218
10219 tw32(MAC_TX_LENGTHS, val);
10220
10221 /* Receive rules. */
10222 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10223 tw32(RCVLPC_CONFIG, 0x0181);
10224
10225 /* Calculate RDMAC_MODE setting early, we need it to determine
10226 * the RCVLPC_STATE_ENABLE mask.
10227 */
10228 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10229 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10230 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10231 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10232 RDMAC_MODE_LNGREAD_ENAB);
10233
10234 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10235 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10236
10237 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10238 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10239 tg3_asic_rev(tp) == ASIC_REV_57780)
10240 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10241 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10242 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10243
10244 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10245 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10246 if (tg3_flag(tp, TSO_CAPABLE) &&
10247 tg3_asic_rev(tp) == ASIC_REV_5705) {
10248 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10249 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10250 !tg3_flag(tp, IS_5788)) {
10251 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10252 }
10253 }
10254
10255 if (tg3_flag(tp, PCI_EXPRESS))
10256 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10257
10258 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10259 tp->dma_limit = 0;
10260 if (tp->dev->mtu <= ETH_DATA_LEN) {
10261 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10262 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10263 }
10264 }
10265
10266 if (tg3_flag(tp, HW_TSO_1) ||
10267 tg3_flag(tp, HW_TSO_2) ||
10268 tg3_flag(tp, HW_TSO_3))
10269 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10270
10271 if (tg3_flag(tp, 57765_PLUS) ||
10272 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10273 tg3_asic_rev(tp) == ASIC_REV_57780)
10274 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10275
10276 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10277 tg3_asic_rev(tp) == ASIC_REV_5762)
10278 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10279
10280 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10281 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10282 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10283 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10284 tg3_flag(tp, 57765_PLUS)) {
10285 u32 tgtreg;
10286
10287 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10288 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10289 else
10290 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10291
10292 val = tr32(tgtreg);
10293 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10294 tg3_asic_rev(tp) == ASIC_REV_5762) {
10295 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10296 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10297 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10298 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10299 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10300 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10301 }
10302 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10303 }
10304
10305 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10306 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10307 tg3_asic_rev(tp) == ASIC_REV_5762) {
10308 u32 tgtreg;
10309
10310 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10311 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10312 else
10313 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10314
10315 val = tr32(tgtreg);
10316 tw32(tgtreg, val |
10317 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10318 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10319 }
10320
10321 /* Receive/send statistics. */
10322 if (tg3_flag(tp, 5750_PLUS)) {
10323 val = tr32(RCVLPC_STATS_ENABLE);
10324 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10325 tw32(RCVLPC_STATS_ENABLE, val);
10326 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10327 tg3_flag(tp, TSO_CAPABLE)) {
10328 val = tr32(RCVLPC_STATS_ENABLE);
10329 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10330 tw32(RCVLPC_STATS_ENABLE, val);
10331 } else {
10332 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10333 }
10334 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10335 tw32(SNDDATAI_STATSENAB, 0xffffff);
10336 tw32(SNDDATAI_STATSCTRL,
10337 (SNDDATAI_SCTRL_ENABLE |
10338 SNDDATAI_SCTRL_FASTUPD));
10339
10340 /* Setup host coalescing engine. */
10341 tw32(HOSTCC_MODE, 0);
10342 for (i = 0; i < 2000; i++) {
10343 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10344 break;
10345 udelay(10);
10346 }
10347
10348 __tg3_set_coalesce(tp, &tp->coal);
10349
10350 if (!tg3_flag(tp, 5705_PLUS)) {
10351 /* Status/statistics block address. See tg3_timer,
10352 * the tg3_periodic_fetch_stats call there, and
10353 * tg3_get_stats to see how this works for 5705/5750 chips.
10354 */
10355 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10356 ((u64) tp->stats_mapping >> 32));
10357 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10358 ((u64) tp->stats_mapping & 0xffffffff));
10359 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10360
10361 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10362
10363 /* Clear statistics and status block memory areas */
10364 for (i = NIC_SRAM_STATS_BLK;
10365 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10366 i += sizeof(u32)) {
10367 tg3_write_mem(tp, i, 0);
10368 udelay(40);
10369 }
10370 }
10371
10372 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10373
10374 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10375 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10376 if (!tg3_flag(tp, 5705_PLUS))
10377 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10378
10379 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10380 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10381 /* reset to prevent losing 1st rx packet intermittently */
10382 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10383 udelay(10);
10384 }
10385
10386 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10387 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10388 MAC_MODE_FHDE_ENABLE;
10389 if (tg3_flag(tp, ENABLE_APE))
10390 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10391 if (!tg3_flag(tp, 5705_PLUS) &&
10392 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10393 tg3_asic_rev(tp) != ASIC_REV_5700)
10394 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10395 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10396 udelay(40);
10397
10398 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10399 * If TG3_FLAG_IS_NIC is zero, we should read the
10400 * register to preserve the GPIO settings for LOMs. The GPIOs,
10401 * whether used as inputs or outputs, are set by boot code after
10402 * reset.
10403 */
10404 if (!tg3_flag(tp, IS_NIC)) {
10405 u32 gpio_mask;
10406
10407 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10408 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10409 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10410
10411 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10412 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10413 GRC_LCLCTRL_GPIO_OUTPUT3;
10414
10415 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10416 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10417
10418 tp->grc_local_ctrl &= ~gpio_mask;
10419 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10420
10421 /* GPIO1 must be driven high for eeprom write protect */
10422 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10423 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10424 GRC_LCLCTRL_GPIO_OUTPUT1);
10425 }
10426 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10427 udelay(100);
10428
10429 if (tg3_flag(tp, USING_MSIX)) {
10430 val = tr32(MSGINT_MODE);
10431 val |= MSGINT_MODE_ENABLE;
10432 if (tp->irq_cnt > 1)
10433 val |= MSGINT_MODE_MULTIVEC_EN;
10434 if (!tg3_flag(tp, 1SHOT_MSI))
10435 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10436 tw32(MSGINT_MODE, val);
10437 }
10438
10439 if (!tg3_flag(tp, 5705_PLUS)) {
10440 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10441 udelay(40);
10442 }
10443
10444 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10445 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10446 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10447 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10448 WDMAC_MODE_LNGREAD_ENAB);
10449
10450 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10451 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10452 if (tg3_flag(tp, TSO_CAPABLE) &&
10453 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10454 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10455 /* nothing */
10456 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10457 !tg3_flag(tp, IS_5788)) {
10458 val |= WDMAC_MODE_RX_ACCEL;
10459 }
10460 }
10461
10462 /* Enable host coalescing bug fix */
10463 if (tg3_flag(tp, 5755_PLUS))
10464 val |= WDMAC_MODE_STATUS_TAG_FIX;
10465
10466 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10467 val |= WDMAC_MODE_BURST_ALL_DATA;
10468
10469 tw32_f(WDMAC_MODE, val);
10470 udelay(40);
10471
10472 if (tg3_flag(tp, PCIX_MODE)) {
10473 u16 pcix_cmd;
10474
10475 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10476 &pcix_cmd);
10477 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10478 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10479 pcix_cmd |= PCI_X_CMD_READ_2K;
10480 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10481 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10482 pcix_cmd |= PCI_X_CMD_READ_2K;
10483 }
10484 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10485 pcix_cmd);
10486 }
10487
10488 tw32_f(RDMAC_MODE, rdmac_mode);
10489 udelay(40);
10490
10491 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10492 tg3_asic_rev(tp) == ASIC_REV_5720) {
10493 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10494 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10495 break;
10496 }
10497 if (i < TG3_NUM_RDMA_CHANNELS) {
10498 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10499 val |= tg3_lso_rd_dma_workaround_bit(tp);
10500 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10501 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10502 }
10503 }
10504
10505 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10506 if (!tg3_flag(tp, 5705_PLUS))
10507 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10508
10509 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10510 tw32(SNDDATAC_MODE,
10511 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10512 else
10513 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10514
10515 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10516 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10517 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10518 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10519 val |= RCVDBDI_MODE_LRG_RING_SZ;
10520 tw32(RCVDBDI_MODE, val);
10521 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10522 if (tg3_flag(tp, HW_TSO_1) ||
10523 tg3_flag(tp, HW_TSO_2) ||
10524 tg3_flag(tp, HW_TSO_3))
10525 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10526 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10527 if (tg3_flag(tp, ENABLE_TSS))
10528 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10529 tw32(SNDBDI_MODE, val);
10530 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10531
10532 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10533 err = tg3_load_5701_a0_firmware_fix(tp);
10534 if (err)
10535 return err;
10536 }
10537
10538 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10539 /* Ignore any errors for the firmware download. If download
10540 * fails, the device will operate with EEE disabled
10541 */
10542 tg3_load_57766_firmware(tp);
10543 }
10544
10545 if (tg3_flag(tp, TSO_CAPABLE)) {
10546 err = tg3_load_tso_firmware(tp);
10547 if (err)
10548 return err;
10549 }
10550
10551 tp->tx_mode = TX_MODE_ENABLE;
10552
10553 if (tg3_flag(tp, 5755_PLUS) ||
10554 tg3_asic_rev(tp) == ASIC_REV_5906)
10555 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10556
10557 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10558 tg3_asic_rev(tp) == ASIC_REV_5762) {
10559 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10560 tp->tx_mode &= ~val;
10561 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10562 }
10563
10564 tw32_f(MAC_TX_MODE, tp->tx_mode);
10565 udelay(100);
10566
10567 if (tg3_flag(tp, ENABLE_RSS)) {
10568 u32 rss_key[10];
10569
10570 tg3_rss_write_indir_tbl(tp);
10571
10572 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10573
10574 for (i = 0; i < 10 ; i++)
10575 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10576 }
10577
10578 tp->rx_mode = RX_MODE_ENABLE;
10579 if (tg3_flag(tp, 5755_PLUS))
10580 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10581
10582 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10583 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10584
10585 if (tg3_flag(tp, ENABLE_RSS))
10586 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10587 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10588 RX_MODE_RSS_IPV6_HASH_EN |
10589 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10590 RX_MODE_RSS_IPV4_HASH_EN |
10591 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10592
10593 tw32_f(MAC_RX_MODE, tp->rx_mode);
10594 udelay(10);
10595
10596 tw32(MAC_LED_CTRL, tp->led_ctrl);
10597
10598 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10599 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10600 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10601 udelay(10);
10602 }
10603 tw32_f(MAC_RX_MODE, tp->rx_mode);
10604 udelay(10);
10605
10606 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10607 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10608 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10609 /* Set drive transmission level to 1.2V */
10610 /* only if the signal pre-emphasis bit is not set */
10611 val = tr32(MAC_SERDES_CFG);
10612 val &= 0xfffff000;
10613 val |= 0x880;
10614 tw32(MAC_SERDES_CFG, val);
10615 }
10616 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10617 tw32(MAC_SERDES_CFG, 0x616000);
10618 }
10619
10620 /* Prevent chip from dropping frames when flow control
10621 * is enabled.
10622 */
10623 if (tg3_flag(tp, 57765_CLASS))
10624 val = 1;
10625 else
10626 val = 2;
10627 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10628
10629 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10630 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10631 /* Use hardware link auto-negotiation */
10632 tg3_flag_set(tp, HW_AUTONEG);
10633 }
10634
10635 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10636 tg3_asic_rev(tp) == ASIC_REV_5714) {
10637 u32 tmp;
10638
10639 tmp = tr32(SERDES_RX_CTRL);
10640 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10641 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10642 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10643 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10644 }
10645
10646 if (!tg3_flag(tp, USE_PHYLIB)) {
10647 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10648 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10649
10650 err = tg3_setup_phy(tp, false);
10651 if (err)
10652 return err;
10653
10654 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10655 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10656 u32 tmp;
10657
10658 /* Clear CRC stats. */
10659 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10660 tg3_writephy(tp, MII_TG3_TEST1,
10661 tmp | MII_TG3_TEST1_CRC_EN);
10662 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10663 }
10664 }
10665 }
10666
10667 __tg3_set_rx_mode(tp->dev);
10668
10669 /* Initialize receive rules. */
10670 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10671 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10672 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10673 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10674
10675 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10676 limit = 8;
10677 else
10678 limit = 16;
10679 if (tg3_flag(tp, ENABLE_ASF))
10680 limit -= 4;
10681 switch (limit) {
10682 case 16:
10683 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10684 case 15:
10685 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10686 case 14:
10687 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10688 case 13:
10689 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10690 case 12:
10691 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10692 case 11:
10693 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10694 case 10:
10695 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10696 case 9:
10697 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10698 case 8:
10699 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10700 case 7:
10701 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10702 case 6:
10703 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10704 case 5:
10705 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10706 case 4:
10707 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10708 case 3:
10709 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10710 case 2:
10711 case 1:
10712
10713 default:
10714 break;
10715 }
10716
10717 if (tg3_flag(tp, ENABLE_APE))
10718 /* Write our heartbeat update interval to APE. */
10719 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10720 APE_HOST_HEARTBEAT_INT_DISABLE);
10721
10722 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10723
10724 return 0;
10725 }
10726
10727 /* Called at device open time to get the chip ready for
10728 * packet processing. Invoked with tp->lock held.
10729 */
10730 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10731 {
10732 /* Chip may have been just powered on. If so, the boot code may still
10733 * be running initialization. Wait for it to finish to avoid races in
10734 * accessing the hardware.
10735 */
10736 tg3_enable_register_access(tp);
10737 tg3_poll_fw(tp);
10738
10739 tg3_switch_clocks(tp);
10740
10741 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10742
10743 return tg3_reset_hw(tp, reset_phy);
10744 }
10745
10746 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10747 {
10748 int i;
10749
10750 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10751 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10752
10753 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10754 off += len;
10755
10756 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10757 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10758 memset(ocir, 0, TG3_OCIR_LEN);
10759 }
10760 }
10761
10762 /* sysfs attributes for hwmon */
10763 static ssize_t tg3_show_temp(struct device *dev,
10764 struct device_attribute *devattr, char *buf)
10765 {
10766 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10767 struct tg3 *tp = dev_get_drvdata(dev);
10768 u32 temperature;
10769
10770 spin_lock_bh(&tp->lock);
10771 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10772 sizeof(temperature));
10773 spin_unlock_bh(&tp->lock);
10774 return sprintf(buf, "%u\n", temperature * 1000);
10775 }
10776
10777
10778 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10779 TG3_TEMP_SENSOR_OFFSET);
10780 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10781 TG3_TEMP_CAUTION_OFFSET);
10782 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10783 TG3_TEMP_MAX_OFFSET);
10784
10785 static struct attribute *tg3_attrs[] = {
10786 &sensor_dev_attr_temp1_input.dev_attr.attr,
10787 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10788 &sensor_dev_attr_temp1_max.dev_attr.attr,
10789 NULL
10790 };
10791 ATTRIBUTE_GROUPS(tg3);
10792
10793 static void tg3_hwmon_close(struct tg3 *tp)
10794 {
10795 if (tp->hwmon_dev) {
10796 hwmon_device_unregister(tp->hwmon_dev);
10797 tp->hwmon_dev = NULL;
10798 }
10799 }
10800
10801 static void tg3_hwmon_open(struct tg3 *tp)
10802 {
10803 int i;
10804 u32 size = 0;
10805 struct pci_dev *pdev = tp->pdev;
10806 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10807
10808 tg3_sd_scan_scratchpad(tp, ocirs);
10809
10810 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10811 if (!ocirs[i].src_data_length)
10812 continue;
10813
10814 size += ocirs[i].src_hdr_length;
10815 size += ocirs[i].src_data_length;
10816 }
10817
10818 if (!size)
10819 return;
10820
10821 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10822 tp, tg3_groups);
10823 if (IS_ERR(tp->hwmon_dev)) {
10824 tp->hwmon_dev = NULL;
10825 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10826 }
10827 }
10828
10829
10830 #define TG3_STAT_ADD32(PSTAT, REG) \
10831 do { u32 __val = tr32(REG); \
10832 (PSTAT)->low += __val; \
10833 if ((PSTAT)->low < __val) \
10834 (PSTAT)->high += 1; \
10835 } while (0)
10836
10837 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10838 {
10839 struct tg3_hw_stats *sp = tp->hw_stats;
10840
10841 if (!tp->link_up)
10842 return;
10843
10844 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10845 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10846 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10847 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10848 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10849 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10850 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10851 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10852 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10853 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10854 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10855 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10856 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10857 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10858 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10859 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10860 u32 val;
10861
10862 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10863 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10864 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10865 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10866 }
10867
10868 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10869 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10870 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10871 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10872 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10873 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10874 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10875 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10876 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10877 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10878 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10879 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10880 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10881 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10882
10883 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10884 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10885 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10886 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10887 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10888 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10889 } else {
10890 u32 val = tr32(HOSTCC_FLOW_ATTN);
10891 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10892 if (val) {
10893 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10894 sp->rx_discards.low += val;
10895 if (sp->rx_discards.low < val)
10896 sp->rx_discards.high += 1;
10897 }
10898 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10899 }
10900 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10901 }
10902
10903 static void tg3_chk_missed_msi(struct tg3 *tp)
10904 {
10905 u32 i;
10906
10907 for (i = 0; i < tp->irq_cnt; i++) {
10908 struct tg3_napi *tnapi = &tp->napi[i];
10909
10910 if (tg3_has_work(tnapi)) {
10911 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10912 tnapi->last_tx_cons == tnapi->tx_cons) {
10913 if (tnapi->chk_msi_cnt < 1) {
10914 tnapi->chk_msi_cnt++;
10915 return;
10916 }
10917 tg3_msi(0, tnapi);
10918 }
10919 }
10920 tnapi->chk_msi_cnt = 0;
10921 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10922 tnapi->last_tx_cons = tnapi->tx_cons;
10923 }
10924 }
10925
10926 static void tg3_timer(unsigned long __opaque)
10927 {
10928 struct tg3 *tp = (struct tg3 *) __opaque;
10929
10930 spin_lock(&tp->lock);
10931
10932 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10933 spin_unlock(&tp->lock);
10934 goto restart_timer;
10935 }
10936
10937 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10938 tg3_flag(tp, 57765_CLASS))
10939 tg3_chk_missed_msi(tp);
10940
10941 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10942 /* BCM4785: Flush posted writes from GbE to host memory. */
10943 tr32(HOSTCC_MODE);
10944 }
10945
10946 if (!tg3_flag(tp, TAGGED_STATUS)) {
10947 /* All of this garbage is because when using non-tagged
10948 * IRQ status the mailbox/status_block protocol the chip
10949 * uses with the cpu is race prone.
10950 */
10951 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10952 tw32(GRC_LOCAL_CTRL,
10953 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10954 } else {
10955 tw32(HOSTCC_MODE, tp->coalesce_mode |
10956 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10957 }
10958
10959 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10960 spin_unlock(&tp->lock);
10961 tg3_reset_task_schedule(tp);
10962 goto restart_timer;
10963 }
10964 }
10965
10966 /* This part only runs once per second. */
10967 if (!--tp->timer_counter) {
10968 if (tg3_flag(tp, 5705_PLUS))
10969 tg3_periodic_fetch_stats(tp);
10970
10971 if (tp->setlpicnt && !--tp->setlpicnt)
10972 tg3_phy_eee_enable(tp);
10973
10974 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10975 u32 mac_stat;
10976 int phy_event;
10977
10978 mac_stat = tr32(MAC_STATUS);
10979
10980 phy_event = 0;
10981 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10982 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10983 phy_event = 1;
10984 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10985 phy_event = 1;
10986
10987 if (phy_event)
10988 tg3_setup_phy(tp, false);
10989 } else if (tg3_flag(tp, POLL_SERDES)) {
10990 u32 mac_stat = tr32(MAC_STATUS);
10991 int need_setup = 0;
10992
10993 if (tp->link_up &&
10994 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10995 need_setup = 1;
10996 }
10997 if (!tp->link_up &&
10998 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10999 MAC_STATUS_SIGNAL_DET))) {
11000 need_setup = 1;
11001 }
11002 if (need_setup) {
11003 if (!tp->serdes_counter) {
11004 tw32_f(MAC_MODE,
11005 (tp->mac_mode &
11006 ~MAC_MODE_PORT_MODE_MASK));
11007 udelay(40);
11008 tw32_f(MAC_MODE, tp->mac_mode);
11009 udelay(40);
11010 }
11011 tg3_setup_phy(tp, false);
11012 }
11013 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11014 tg3_flag(tp, 5780_CLASS)) {
11015 tg3_serdes_parallel_detect(tp);
11016 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11017 u32 cpmu = tr32(TG3_CPMU_STATUS);
11018 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11019 TG3_CPMU_STATUS_LINK_MASK);
11020
11021 if (link_up != tp->link_up)
11022 tg3_setup_phy(tp, false);
11023 }
11024
11025 tp->timer_counter = tp->timer_multiplier;
11026 }
11027
11028 /* Heartbeat is only sent once every 2 seconds.
11029 *
11030 * The heartbeat is to tell the ASF firmware that the host
11031 * driver is still alive. In the event that the OS crashes,
11032 * ASF needs to reset the hardware to free up the FIFO space
11033 * that may be filled with rx packets destined for the host.
11034 * If the FIFO is full, ASF will no longer function properly.
11035 *
11036 * Unintended resets have been reported on real time kernels
11037 * where the timer doesn't run on time. Netpoll will also have
11038 * same problem.
11039 *
11040 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11041 * to check the ring condition when the heartbeat is expiring
11042 * before doing the reset. This will prevent most unintended
11043 * resets.
11044 */
11045 if (!--tp->asf_counter) {
11046 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11047 tg3_wait_for_event_ack(tp);
11048
11049 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11050 FWCMD_NICDRV_ALIVE3);
11051 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11052 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11053 TG3_FW_UPDATE_TIMEOUT_SEC);
11054
11055 tg3_generate_fw_event(tp);
11056 }
11057 tp->asf_counter = tp->asf_multiplier;
11058 }
11059
11060 spin_unlock(&tp->lock);
11061
11062 restart_timer:
11063 tp->timer.expires = jiffies + tp->timer_offset;
11064 add_timer(&tp->timer);
11065 }
11066
11067 static void tg3_timer_init(struct tg3 *tp)
11068 {
11069 if (tg3_flag(tp, TAGGED_STATUS) &&
11070 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11071 !tg3_flag(tp, 57765_CLASS))
11072 tp->timer_offset = HZ;
11073 else
11074 tp->timer_offset = HZ / 10;
11075
11076 BUG_ON(tp->timer_offset > HZ);
11077
11078 tp->timer_multiplier = (HZ / tp->timer_offset);
11079 tp->asf_multiplier = (HZ / tp->timer_offset) *
11080 TG3_FW_UPDATE_FREQ_SEC;
11081
11082 init_timer(&tp->timer);
11083 tp->timer.data = (unsigned long) tp;
11084 tp->timer.function = tg3_timer;
11085 }
11086
11087 static void tg3_timer_start(struct tg3 *tp)
11088 {
11089 tp->asf_counter = tp->asf_multiplier;
11090 tp->timer_counter = tp->timer_multiplier;
11091
11092 tp->timer.expires = jiffies + tp->timer_offset;
11093 add_timer(&tp->timer);
11094 }
11095
11096 static void tg3_timer_stop(struct tg3 *tp)
11097 {
11098 del_timer_sync(&tp->timer);
11099 }
11100
11101 /* Restart hardware after configuration changes, self-test, etc.
11102 * Invoked with tp->lock held.
11103 */
11104 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11105 __releases(tp->lock)
11106 __acquires(tp->lock)
11107 {
11108 int err;
11109
11110 err = tg3_init_hw(tp, reset_phy);
11111 if (err) {
11112 netdev_err(tp->dev,
11113 "Failed to re-initialize device, aborting\n");
11114 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11115 tg3_full_unlock(tp);
11116 tg3_timer_stop(tp);
11117 tp->irq_sync = 0;
11118 tg3_napi_enable(tp);
11119 dev_close(tp->dev);
11120 tg3_full_lock(tp, 0);
11121 }
11122 return err;
11123 }
11124
11125 static void tg3_reset_task(struct work_struct *work)
11126 {
11127 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11128 int err;
11129
11130 rtnl_lock();
11131 tg3_full_lock(tp, 0);
11132
11133 if (!netif_running(tp->dev)) {
11134 tg3_flag_clear(tp, RESET_TASK_PENDING);
11135 tg3_full_unlock(tp);
11136 rtnl_unlock();
11137 return;
11138 }
11139
11140 tg3_full_unlock(tp);
11141
11142 tg3_phy_stop(tp);
11143
11144 tg3_netif_stop(tp);
11145
11146 tg3_full_lock(tp, 1);
11147
11148 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11149 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11150 tp->write32_rx_mbox = tg3_write_flush_reg32;
11151 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11152 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11153 }
11154
11155 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11156 err = tg3_init_hw(tp, true);
11157 if (err)
11158 goto out;
11159
11160 tg3_netif_start(tp);
11161
11162 out:
11163 tg3_full_unlock(tp);
11164
11165 if (!err)
11166 tg3_phy_start(tp);
11167
11168 tg3_flag_clear(tp, RESET_TASK_PENDING);
11169 rtnl_unlock();
11170 }
11171
11172 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11173 {
11174 irq_handler_t fn;
11175 unsigned long flags;
11176 char *name;
11177 struct tg3_napi *tnapi = &tp->napi[irq_num];
11178
11179 if (tp->irq_cnt == 1)
11180 name = tp->dev->name;
11181 else {
11182 name = &tnapi->irq_lbl[0];
11183 if (tnapi->tx_buffers && tnapi->rx_rcb)
11184 snprintf(name, IFNAMSIZ,
11185 "%s-txrx-%d", tp->dev->name, irq_num);
11186 else if (tnapi->tx_buffers)
11187 snprintf(name, IFNAMSIZ,
11188 "%s-tx-%d", tp->dev->name, irq_num);
11189 else if (tnapi->rx_rcb)
11190 snprintf(name, IFNAMSIZ,
11191 "%s-rx-%d", tp->dev->name, irq_num);
11192 else
11193 snprintf(name, IFNAMSIZ,
11194 "%s-%d", tp->dev->name, irq_num);
11195 name[IFNAMSIZ-1] = 0;
11196 }
11197
11198 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11199 fn = tg3_msi;
11200 if (tg3_flag(tp, 1SHOT_MSI))
11201 fn = tg3_msi_1shot;
11202 flags = 0;
11203 } else {
11204 fn = tg3_interrupt;
11205 if (tg3_flag(tp, TAGGED_STATUS))
11206 fn = tg3_interrupt_tagged;
11207 flags = IRQF_SHARED;
11208 }
11209
11210 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11211 }
11212
11213 static int tg3_test_interrupt(struct tg3 *tp)
11214 {
11215 struct tg3_napi *tnapi = &tp->napi[0];
11216 struct net_device *dev = tp->dev;
11217 int err, i, intr_ok = 0;
11218 u32 val;
11219
11220 if (!netif_running(dev))
11221 return -ENODEV;
11222
11223 tg3_disable_ints(tp);
11224
11225 free_irq(tnapi->irq_vec, tnapi);
11226
11227 /*
11228 * Turn off MSI one shot mode. Otherwise this test has no
11229 * observable way to know whether the interrupt was delivered.
11230 */
11231 if (tg3_flag(tp, 57765_PLUS)) {
11232 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11233 tw32(MSGINT_MODE, val);
11234 }
11235
11236 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11237 IRQF_SHARED, dev->name, tnapi);
11238 if (err)
11239 return err;
11240
11241 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11242 tg3_enable_ints(tp);
11243
11244 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11245 tnapi->coal_now);
11246
11247 for (i = 0; i < 5; i++) {
11248 u32 int_mbox, misc_host_ctrl;
11249
11250 int_mbox = tr32_mailbox(tnapi->int_mbox);
11251 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11252
11253 if ((int_mbox != 0) ||
11254 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11255 intr_ok = 1;
11256 break;
11257 }
11258
11259 if (tg3_flag(tp, 57765_PLUS) &&
11260 tnapi->hw_status->status_tag != tnapi->last_tag)
11261 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11262
11263 msleep(10);
11264 }
11265
11266 tg3_disable_ints(tp);
11267
11268 free_irq(tnapi->irq_vec, tnapi);
11269
11270 err = tg3_request_irq(tp, 0);
11271
11272 if (err)
11273 return err;
11274
11275 if (intr_ok) {
11276 /* Reenable MSI one shot mode. */
11277 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11278 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11279 tw32(MSGINT_MODE, val);
11280 }
11281 return 0;
11282 }
11283
11284 return -EIO;
11285 }
11286
11287 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11288 * successfully restored
11289 */
11290 static int tg3_test_msi(struct tg3 *tp)
11291 {
11292 int err;
11293 u16 pci_cmd;
11294
11295 if (!tg3_flag(tp, USING_MSI))
11296 return 0;
11297
11298 /* Turn off SERR reporting in case MSI terminates with Master
11299 * Abort.
11300 */
11301 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11302 pci_write_config_word(tp->pdev, PCI_COMMAND,
11303 pci_cmd & ~PCI_COMMAND_SERR);
11304
11305 err = tg3_test_interrupt(tp);
11306
11307 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11308
11309 if (!err)
11310 return 0;
11311
11312 /* other failures */
11313 if (err != -EIO)
11314 return err;
11315
11316 /* MSI test failed, go back to INTx mode */
11317 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11318 "to INTx mode. Please report this failure to the PCI "
11319 "maintainer and include system chipset information\n");
11320
11321 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11322
11323 pci_disable_msi(tp->pdev);
11324
11325 tg3_flag_clear(tp, USING_MSI);
11326 tp->napi[0].irq_vec = tp->pdev->irq;
11327
11328 err = tg3_request_irq(tp, 0);
11329 if (err)
11330 return err;
11331
11332 /* Need to reset the chip because the MSI cycle may have terminated
11333 * with Master Abort.
11334 */
11335 tg3_full_lock(tp, 1);
11336
11337 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11338 err = tg3_init_hw(tp, true);
11339
11340 tg3_full_unlock(tp);
11341
11342 if (err)
11343 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11344
11345 return err;
11346 }
11347
11348 static int tg3_request_firmware(struct tg3 *tp)
11349 {
11350 const struct tg3_firmware_hdr *fw_hdr;
11351
11352 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11353 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11354 tp->fw_needed);
11355 return -ENOENT;
11356 }
11357
11358 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11359
11360 /* Firmware blob starts with version numbers, followed by
11361 * start address and _full_ length including BSS sections
11362 * (which must be longer than the actual data, of course
11363 */
11364
11365 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11366 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11367 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11368 tp->fw_len, tp->fw_needed);
11369 release_firmware(tp->fw);
11370 tp->fw = NULL;
11371 return -EINVAL;
11372 }
11373
11374 /* We no longer need firmware; we have it. */
11375 tp->fw_needed = NULL;
11376 return 0;
11377 }
11378
11379 static u32 tg3_irq_count(struct tg3 *tp)
11380 {
11381 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11382
11383 if (irq_cnt > 1) {
11384 /* We want as many rx rings enabled as there are cpus.
11385 * In multiqueue MSI-X mode, the first MSI-X vector
11386 * only deals with link interrupts, etc, so we add
11387 * one to the number of vectors we are requesting.
11388 */
11389 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11390 }
11391
11392 return irq_cnt;
11393 }
11394
11395 static bool tg3_enable_msix(struct tg3 *tp)
11396 {
11397 int i, rc;
11398 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11399
11400 tp->txq_cnt = tp->txq_req;
11401 tp->rxq_cnt = tp->rxq_req;
11402 if (!tp->rxq_cnt)
11403 tp->rxq_cnt = netif_get_num_default_rss_queues();
11404 if (tp->rxq_cnt > tp->rxq_max)
11405 tp->rxq_cnt = tp->rxq_max;
11406
11407 /* Disable multiple TX rings by default. Simple round-robin hardware
11408 * scheduling of the TX rings can cause starvation of rings with
11409 * small packets when other rings have TSO or jumbo packets.
11410 */
11411 if (!tp->txq_req)
11412 tp->txq_cnt = 1;
11413
11414 tp->irq_cnt = tg3_irq_count(tp);
11415
11416 for (i = 0; i < tp->irq_max; i++) {
11417 msix_ent[i].entry = i;
11418 msix_ent[i].vector = 0;
11419 }
11420
11421 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11422 if (rc < 0) {
11423 return false;
11424 } else if (rc < tp->irq_cnt) {
11425 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11426 tp->irq_cnt, rc);
11427 tp->irq_cnt = rc;
11428 tp->rxq_cnt = max(rc - 1, 1);
11429 if (tp->txq_cnt)
11430 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11431 }
11432
11433 for (i = 0; i < tp->irq_max; i++)
11434 tp->napi[i].irq_vec = msix_ent[i].vector;
11435
11436 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11437 pci_disable_msix(tp->pdev);
11438 return false;
11439 }
11440
11441 if (tp->irq_cnt == 1)
11442 return true;
11443
11444 tg3_flag_set(tp, ENABLE_RSS);
11445
11446 if (tp->txq_cnt > 1)
11447 tg3_flag_set(tp, ENABLE_TSS);
11448
11449 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11450
11451 return true;
11452 }
11453
11454 static void tg3_ints_init(struct tg3 *tp)
11455 {
11456 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11457 !tg3_flag(tp, TAGGED_STATUS)) {
11458 /* All MSI supporting chips should support tagged
11459 * status. Assert that this is the case.
11460 */
11461 netdev_warn(tp->dev,
11462 "MSI without TAGGED_STATUS? Not using MSI\n");
11463 goto defcfg;
11464 }
11465
11466 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11467 tg3_flag_set(tp, USING_MSIX);
11468 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11469 tg3_flag_set(tp, USING_MSI);
11470
11471 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11472 u32 msi_mode = tr32(MSGINT_MODE);
11473 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11474 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11475 if (!tg3_flag(tp, 1SHOT_MSI))
11476 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11477 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11478 }
11479 defcfg:
11480 if (!tg3_flag(tp, USING_MSIX)) {
11481 tp->irq_cnt = 1;
11482 tp->napi[0].irq_vec = tp->pdev->irq;
11483 }
11484
11485 if (tp->irq_cnt == 1) {
11486 tp->txq_cnt = 1;
11487 tp->rxq_cnt = 1;
11488 netif_set_real_num_tx_queues(tp->dev, 1);
11489 netif_set_real_num_rx_queues(tp->dev, 1);
11490 }
11491 }
11492
11493 static void tg3_ints_fini(struct tg3 *tp)
11494 {
11495 if (tg3_flag(tp, USING_MSIX))
11496 pci_disable_msix(tp->pdev);
11497 else if (tg3_flag(tp, USING_MSI))
11498 pci_disable_msi(tp->pdev);
11499 tg3_flag_clear(tp, USING_MSI);
11500 tg3_flag_clear(tp, USING_MSIX);
11501 tg3_flag_clear(tp, ENABLE_RSS);
11502 tg3_flag_clear(tp, ENABLE_TSS);
11503 }
11504
11505 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11506 bool init)
11507 {
11508 struct net_device *dev = tp->dev;
11509 int i, err;
11510
11511 /*
11512 * Setup interrupts first so we know how
11513 * many NAPI resources to allocate
11514 */
11515 tg3_ints_init(tp);
11516
11517 tg3_rss_check_indir_tbl(tp);
11518
11519 /* The placement of this call is tied
11520 * to the setup and use of Host TX descriptors.
11521 */
11522 err = tg3_alloc_consistent(tp);
11523 if (err)
11524 goto out_ints_fini;
11525
11526 tg3_napi_init(tp);
11527
11528 tg3_napi_enable(tp);
11529
11530 for (i = 0; i < tp->irq_cnt; i++) {
11531 struct tg3_napi *tnapi = &tp->napi[i];
11532 err = tg3_request_irq(tp, i);
11533 if (err) {
11534 for (i--; i >= 0; i--) {
11535 tnapi = &tp->napi[i];
11536 free_irq(tnapi->irq_vec, tnapi);
11537 }
11538 goto out_napi_fini;
11539 }
11540 }
11541
11542 tg3_full_lock(tp, 0);
11543
11544 if (init)
11545 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11546
11547 err = tg3_init_hw(tp, reset_phy);
11548 if (err) {
11549 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11550 tg3_free_rings(tp);
11551 }
11552
11553 tg3_full_unlock(tp);
11554
11555 if (err)
11556 goto out_free_irq;
11557
11558 if (test_irq && tg3_flag(tp, USING_MSI)) {
11559 err = tg3_test_msi(tp);
11560
11561 if (err) {
11562 tg3_full_lock(tp, 0);
11563 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11564 tg3_free_rings(tp);
11565 tg3_full_unlock(tp);
11566
11567 goto out_napi_fini;
11568 }
11569
11570 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11571 u32 val = tr32(PCIE_TRANSACTION_CFG);
11572
11573 tw32(PCIE_TRANSACTION_CFG,
11574 val | PCIE_TRANS_CFG_1SHOT_MSI);
11575 }
11576 }
11577
11578 tg3_phy_start(tp);
11579
11580 tg3_hwmon_open(tp);
11581
11582 tg3_full_lock(tp, 0);
11583
11584 tg3_timer_start(tp);
11585 tg3_flag_set(tp, INIT_COMPLETE);
11586 tg3_enable_ints(tp);
11587
11588 tg3_ptp_resume(tp);
11589
11590 tg3_full_unlock(tp);
11591
11592 netif_tx_start_all_queues(dev);
11593
11594 /*
11595 * Reset loopback feature if it was turned on while the device was down
11596 * make sure that it's installed properly now.
11597 */
11598 if (dev->features & NETIF_F_LOOPBACK)
11599 tg3_set_loopback(dev, dev->features);
11600
11601 return 0;
11602
11603 out_free_irq:
11604 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11605 struct tg3_napi *tnapi = &tp->napi[i];
11606 free_irq(tnapi->irq_vec, tnapi);
11607 }
11608
11609 out_napi_fini:
11610 tg3_napi_disable(tp);
11611 tg3_napi_fini(tp);
11612 tg3_free_consistent(tp);
11613
11614 out_ints_fini:
11615 tg3_ints_fini(tp);
11616
11617 return err;
11618 }
11619
11620 static void tg3_stop(struct tg3 *tp)
11621 {
11622 int i;
11623
11624 tg3_reset_task_cancel(tp);
11625 tg3_netif_stop(tp);
11626
11627 tg3_timer_stop(tp);
11628
11629 tg3_hwmon_close(tp);
11630
11631 tg3_phy_stop(tp);
11632
11633 tg3_full_lock(tp, 1);
11634
11635 tg3_disable_ints(tp);
11636
11637 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11638 tg3_free_rings(tp);
11639 tg3_flag_clear(tp, INIT_COMPLETE);
11640
11641 tg3_full_unlock(tp);
11642
11643 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11644 struct tg3_napi *tnapi = &tp->napi[i];
11645 free_irq(tnapi->irq_vec, tnapi);
11646 }
11647
11648 tg3_ints_fini(tp);
11649
11650 tg3_napi_fini(tp);
11651
11652 tg3_free_consistent(tp);
11653 }
11654
11655 static int tg3_open(struct net_device *dev)
11656 {
11657 struct tg3 *tp = netdev_priv(dev);
11658 int err;
11659
11660 if (tp->pcierr_recovery) {
11661 netdev_err(dev, "Failed to open device. PCI error recovery "
11662 "in progress\n");
11663 return -EAGAIN;
11664 }
11665
11666 if (tp->fw_needed) {
11667 err = tg3_request_firmware(tp);
11668 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11669 if (err) {
11670 netdev_warn(tp->dev, "EEE capability disabled\n");
11671 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11672 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11673 netdev_warn(tp->dev, "EEE capability restored\n");
11674 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11675 }
11676 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11677 if (err)
11678 return err;
11679 } else if (err) {
11680 netdev_warn(tp->dev, "TSO capability disabled\n");
11681 tg3_flag_clear(tp, TSO_CAPABLE);
11682 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11683 netdev_notice(tp->dev, "TSO capability restored\n");
11684 tg3_flag_set(tp, TSO_CAPABLE);
11685 }
11686 }
11687
11688 tg3_carrier_off(tp);
11689
11690 err = tg3_power_up(tp);
11691 if (err)
11692 return err;
11693
11694 tg3_full_lock(tp, 0);
11695
11696 tg3_disable_ints(tp);
11697 tg3_flag_clear(tp, INIT_COMPLETE);
11698
11699 tg3_full_unlock(tp);
11700
11701 err = tg3_start(tp,
11702 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11703 true, true);
11704 if (err) {
11705 tg3_frob_aux_power(tp, false);
11706 pci_set_power_state(tp->pdev, PCI_D3hot);
11707 }
11708
11709 return err;
11710 }
11711
11712 static int tg3_close(struct net_device *dev)
11713 {
11714 struct tg3 *tp = netdev_priv(dev);
11715
11716 if (tp->pcierr_recovery) {
11717 netdev_err(dev, "Failed to close device. PCI error recovery "
11718 "in progress\n");
11719 return -EAGAIN;
11720 }
11721
11722 tg3_stop(tp);
11723
11724 /* Clear stats across close / open calls */
11725 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11726 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11727
11728 if (pci_device_is_present(tp->pdev)) {
11729 tg3_power_down_prepare(tp);
11730
11731 tg3_carrier_off(tp);
11732 }
11733 return 0;
11734 }
11735
11736 static inline u64 get_stat64(tg3_stat64_t *val)
11737 {
11738 return ((u64)val->high << 32) | ((u64)val->low);
11739 }
11740
11741 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11742 {
11743 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11744
11745 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11746 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11747 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11748 u32 val;
11749
11750 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11751 tg3_writephy(tp, MII_TG3_TEST1,
11752 val | MII_TG3_TEST1_CRC_EN);
11753 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11754 } else
11755 val = 0;
11756
11757 tp->phy_crc_errors += val;
11758
11759 return tp->phy_crc_errors;
11760 }
11761
11762 return get_stat64(&hw_stats->rx_fcs_errors);
11763 }
11764
11765 #define ESTAT_ADD(member) \
11766 estats->member = old_estats->member + \
11767 get_stat64(&hw_stats->member)
11768
11769 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11770 {
11771 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11772 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11773
11774 ESTAT_ADD(rx_octets);
11775 ESTAT_ADD(rx_fragments);
11776 ESTAT_ADD(rx_ucast_packets);
11777 ESTAT_ADD(rx_mcast_packets);
11778 ESTAT_ADD(rx_bcast_packets);
11779 ESTAT_ADD(rx_fcs_errors);
11780 ESTAT_ADD(rx_align_errors);
11781 ESTAT_ADD(rx_xon_pause_rcvd);
11782 ESTAT_ADD(rx_xoff_pause_rcvd);
11783 ESTAT_ADD(rx_mac_ctrl_rcvd);
11784 ESTAT_ADD(rx_xoff_entered);
11785 ESTAT_ADD(rx_frame_too_long_errors);
11786 ESTAT_ADD(rx_jabbers);
11787 ESTAT_ADD(rx_undersize_packets);
11788 ESTAT_ADD(rx_in_length_errors);
11789 ESTAT_ADD(rx_out_length_errors);
11790 ESTAT_ADD(rx_64_or_less_octet_packets);
11791 ESTAT_ADD(rx_65_to_127_octet_packets);
11792 ESTAT_ADD(rx_128_to_255_octet_packets);
11793 ESTAT_ADD(rx_256_to_511_octet_packets);
11794 ESTAT_ADD(rx_512_to_1023_octet_packets);
11795 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11796 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11797 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11798 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11799 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11800
11801 ESTAT_ADD(tx_octets);
11802 ESTAT_ADD(tx_collisions);
11803 ESTAT_ADD(tx_xon_sent);
11804 ESTAT_ADD(tx_xoff_sent);
11805 ESTAT_ADD(tx_flow_control);
11806 ESTAT_ADD(tx_mac_errors);
11807 ESTAT_ADD(tx_single_collisions);
11808 ESTAT_ADD(tx_mult_collisions);
11809 ESTAT_ADD(tx_deferred);
11810 ESTAT_ADD(tx_excessive_collisions);
11811 ESTAT_ADD(tx_late_collisions);
11812 ESTAT_ADD(tx_collide_2times);
11813 ESTAT_ADD(tx_collide_3times);
11814 ESTAT_ADD(tx_collide_4times);
11815 ESTAT_ADD(tx_collide_5times);
11816 ESTAT_ADD(tx_collide_6times);
11817 ESTAT_ADD(tx_collide_7times);
11818 ESTAT_ADD(tx_collide_8times);
11819 ESTAT_ADD(tx_collide_9times);
11820 ESTAT_ADD(tx_collide_10times);
11821 ESTAT_ADD(tx_collide_11times);
11822 ESTAT_ADD(tx_collide_12times);
11823 ESTAT_ADD(tx_collide_13times);
11824 ESTAT_ADD(tx_collide_14times);
11825 ESTAT_ADD(tx_collide_15times);
11826 ESTAT_ADD(tx_ucast_packets);
11827 ESTAT_ADD(tx_mcast_packets);
11828 ESTAT_ADD(tx_bcast_packets);
11829 ESTAT_ADD(tx_carrier_sense_errors);
11830 ESTAT_ADD(tx_discards);
11831 ESTAT_ADD(tx_errors);
11832
11833 ESTAT_ADD(dma_writeq_full);
11834 ESTAT_ADD(dma_write_prioq_full);
11835 ESTAT_ADD(rxbds_empty);
11836 ESTAT_ADD(rx_discards);
11837 ESTAT_ADD(rx_errors);
11838 ESTAT_ADD(rx_threshold_hit);
11839
11840 ESTAT_ADD(dma_readq_full);
11841 ESTAT_ADD(dma_read_prioq_full);
11842 ESTAT_ADD(tx_comp_queue_full);
11843
11844 ESTAT_ADD(ring_set_send_prod_index);
11845 ESTAT_ADD(ring_status_update);
11846 ESTAT_ADD(nic_irqs);
11847 ESTAT_ADD(nic_avoided_irqs);
11848 ESTAT_ADD(nic_tx_threshold_hit);
11849
11850 ESTAT_ADD(mbuf_lwm_thresh_hit);
11851 }
11852
11853 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11854 {
11855 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11856 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11857
11858 stats->rx_packets = old_stats->rx_packets +
11859 get_stat64(&hw_stats->rx_ucast_packets) +
11860 get_stat64(&hw_stats->rx_mcast_packets) +
11861 get_stat64(&hw_stats->rx_bcast_packets);
11862
11863 stats->tx_packets = old_stats->tx_packets +
11864 get_stat64(&hw_stats->tx_ucast_packets) +
11865 get_stat64(&hw_stats->tx_mcast_packets) +
11866 get_stat64(&hw_stats->tx_bcast_packets);
11867
11868 stats->rx_bytes = old_stats->rx_bytes +
11869 get_stat64(&hw_stats->rx_octets);
11870 stats->tx_bytes = old_stats->tx_bytes +
11871 get_stat64(&hw_stats->tx_octets);
11872
11873 stats->rx_errors = old_stats->rx_errors +
11874 get_stat64(&hw_stats->rx_errors);
11875 stats->tx_errors = old_stats->tx_errors +
11876 get_stat64(&hw_stats->tx_errors) +
11877 get_stat64(&hw_stats->tx_mac_errors) +
11878 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11879 get_stat64(&hw_stats->tx_discards);
11880
11881 stats->multicast = old_stats->multicast +
11882 get_stat64(&hw_stats->rx_mcast_packets);
11883 stats->collisions = old_stats->collisions +
11884 get_stat64(&hw_stats->tx_collisions);
11885
11886 stats->rx_length_errors = old_stats->rx_length_errors +
11887 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11888 get_stat64(&hw_stats->rx_undersize_packets);
11889
11890 stats->rx_frame_errors = old_stats->rx_frame_errors +
11891 get_stat64(&hw_stats->rx_align_errors);
11892 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11893 get_stat64(&hw_stats->tx_discards);
11894 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11895 get_stat64(&hw_stats->tx_carrier_sense_errors);
11896
11897 stats->rx_crc_errors = old_stats->rx_crc_errors +
11898 tg3_calc_crc_errors(tp);
11899
11900 stats->rx_missed_errors = old_stats->rx_missed_errors +
11901 get_stat64(&hw_stats->rx_discards);
11902
11903 stats->rx_dropped = tp->rx_dropped;
11904 stats->tx_dropped = tp->tx_dropped;
11905 }
11906
11907 static int tg3_get_regs_len(struct net_device *dev)
11908 {
11909 return TG3_REG_BLK_SIZE;
11910 }
11911
11912 static void tg3_get_regs(struct net_device *dev,
11913 struct ethtool_regs *regs, void *_p)
11914 {
11915 struct tg3 *tp = netdev_priv(dev);
11916
11917 regs->version = 0;
11918
11919 memset(_p, 0, TG3_REG_BLK_SIZE);
11920
11921 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11922 return;
11923
11924 tg3_full_lock(tp, 0);
11925
11926 tg3_dump_legacy_regs(tp, (u32 *)_p);
11927
11928 tg3_full_unlock(tp);
11929 }
11930
11931 static int tg3_get_eeprom_len(struct net_device *dev)
11932 {
11933 struct tg3 *tp = netdev_priv(dev);
11934
11935 return tp->nvram_size;
11936 }
11937
11938 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11939 {
11940 struct tg3 *tp = netdev_priv(dev);
11941 int ret, cpmu_restore = 0;
11942 u8 *pd;
11943 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11944 __be32 val;
11945
11946 if (tg3_flag(tp, NO_NVRAM))
11947 return -EINVAL;
11948
11949 offset = eeprom->offset;
11950 len = eeprom->len;
11951 eeprom->len = 0;
11952
11953 eeprom->magic = TG3_EEPROM_MAGIC;
11954
11955 /* Override clock, link aware and link idle modes */
11956 if (tg3_flag(tp, CPMU_PRESENT)) {
11957 cpmu_val = tr32(TG3_CPMU_CTRL);
11958 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11959 CPMU_CTRL_LINK_IDLE_MODE)) {
11960 tw32(TG3_CPMU_CTRL, cpmu_val &
11961 ~(CPMU_CTRL_LINK_AWARE_MODE |
11962 CPMU_CTRL_LINK_IDLE_MODE));
11963 cpmu_restore = 1;
11964 }
11965 }
11966 tg3_override_clk(tp);
11967
11968 if (offset & 3) {
11969 /* adjustments to start on required 4 byte boundary */
11970 b_offset = offset & 3;
11971 b_count = 4 - b_offset;
11972 if (b_count > len) {
11973 /* i.e. offset=1 len=2 */
11974 b_count = len;
11975 }
11976 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11977 if (ret)
11978 goto eeprom_done;
11979 memcpy(data, ((char *)&val) + b_offset, b_count);
11980 len -= b_count;
11981 offset += b_count;
11982 eeprom->len += b_count;
11983 }
11984
11985 /* read bytes up to the last 4 byte boundary */
11986 pd = &data[eeprom->len];
11987 for (i = 0; i < (len - (len & 3)); i += 4) {
11988 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11989 if (ret) {
11990 if (i)
11991 i -= 4;
11992 eeprom->len += i;
11993 goto eeprom_done;
11994 }
11995 memcpy(pd + i, &val, 4);
11996 if (need_resched()) {
11997 if (signal_pending(current)) {
11998 eeprom->len += i;
11999 ret = -EINTR;
12000 goto eeprom_done;
12001 }
12002 cond_resched();
12003 }
12004 }
12005 eeprom->len += i;
12006
12007 if (len & 3) {
12008 /* read last bytes not ending on 4 byte boundary */
12009 pd = &data[eeprom->len];
12010 b_count = len & 3;
12011 b_offset = offset + len - b_count;
12012 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12013 if (ret)
12014 goto eeprom_done;
12015 memcpy(pd, &val, b_count);
12016 eeprom->len += b_count;
12017 }
12018 ret = 0;
12019
12020 eeprom_done:
12021 /* Restore clock, link aware and link idle modes */
12022 tg3_restore_clk(tp);
12023 if (cpmu_restore)
12024 tw32(TG3_CPMU_CTRL, cpmu_val);
12025
12026 return ret;
12027 }
12028
12029 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12030 {
12031 struct tg3 *tp = netdev_priv(dev);
12032 int ret;
12033 u32 offset, len, b_offset, odd_len;
12034 u8 *buf;
12035 __be32 start = 0, end;
12036
12037 if (tg3_flag(tp, NO_NVRAM) ||
12038 eeprom->magic != TG3_EEPROM_MAGIC)
12039 return -EINVAL;
12040
12041 offset = eeprom->offset;
12042 len = eeprom->len;
12043
12044 if ((b_offset = (offset & 3))) {
12045 /* adjustments to start on required 4 byte boundary */
12046 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12047 if (ret)
12048 return ret;
12049 len += b_offset;
12050 offset &= ~3;
12051 if (len < 4)
12052 len = 4;
12053 }
12054
12055 odd_len = 0;
12056 if (len & 3) {
12057 /* adjustments to end on required 4 byte boundary */
12058 odd_len = 1;
12059 len = (len + 3) & ~3;
12060 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12061 if (ret)
12062 return ret;
12063 }
12064
12065 buf = data;
12066 if (b_offset || odd_len) {
12067 buf = kmalloc(len, GFP_KERNEL);
12068 if (!buf)
12069 return -ENOMEM;
12070 if (b_offset)
12071 memcpy(buf, &start, 4);
12072 if (odd_len)
12073 memcpy(buf+len-4, &end, 4);
12074 memcpy(buf + b_offset, data, eeprom->len);
12075 }
12076
12077 ret = tg3_nvram_write_block(tp, offset, len, buf);
12078
12079 if (buf != data)
12080 kfree(buf);
12081
12082 return ret;
12083 }
12084
12085 static int tg3_get_link_ksettings(struct net_device *dev,
12086 struct ethtool_link_ksettings *cmd)
12087 {
12088 struct tg3 *tp = netdev_priv(dev);
12089 u32 supported, advertising;
12090
12091 if (tg3_flag(tp, USE_PHYLIB)) {
12092 struct phy_device *phydev;
12093 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12094 return -EAGAIN;
12095 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12096 return phy_ethtool_ksettings_get(phydev, cmd);
12097 }
12098
12099 supported = (SUPPORTED_Autoneg);
12100
12101 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12102 supported |= (SUPPORTED_1000baseT_Half |
12103 SUPPORTED_1000baseT_Full);
12104
12105 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12106 supported |= (SUPPORTED_100baseT_Half |
12107 SUPPORTED_100baseT_Full |
12108 SUPPORTED_10baseT_Half |
12109 SUPPORTED_10baseT_Full |
12110 SUPPORTED_TP);
12111 cmd->base.port = PORT_TP;
12112 } else {
12113 supported |= SUPPORTED_FIBRE;
12114 cmd->base.port = PORT_FIBRE;
12115 }
12116 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12117 supported);
12118
12119 advertising = tp->link_config.advertising;
12120 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12121 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12122 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12123 advertising |= ADVERTISED_Pause;
12124 } else {
12125 advertising |= ADVERTISED_Pause |
12126 ADVERTISED_Asym_Pause;
12127 }
12128 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12129 advertising |= ADVERTISED_Asym_Pause;
12130 }
12131 }
12132 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12133 advertising);
12134
12135 if (netif_running(dev) && tp->link_up) {
12136 cmd->base.speed = tp->link_config.active_speed;
12137 cmd->base.duplex = tp->link_config.active_duplex;
12138 ethtool_convert_legacy_u32_to_link_mode(
12139 cmd->link_modes.lp_advertising,
12140 tp->link_config.rmt_adv);
12141
12142 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12143 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12144 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12145 else
12146 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12147 }
12148 } else {
12149 cmd->base.speed = SPEED_UNKNOWN;
12150 cmd->base.duplex = DUPLEX_UNKNOWN;
12151 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12152 }
12153 cmd->base.phy_address = tp->phy_addr;
12154 cmd->base.autoneg = tp->link_config.autoneg;
12155 return 0;
12156 }
12157
12158 static int tg3_set_link_ksettings(struct net_device *dev,
12159 const struct ethtool_link_ksettings *cmd)
12160 {
12161 struct tg3 *tp = netdev_priv(dev);
12162 u32 speed = cmd->base.speed;
12163 u32 advertising;
12164
12165 if (tg3_flag(tp, USE_PHYLIB)) {
12166 struct phy_device *phydev;
12167 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12168 return -EAGAIN;
12169 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12170 return phy_ethtool_ksettings_set(phydev, cmd);
12171 }
12172
12173 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12174 cmd->base.autoneg != AUTONEG_DISABLE)
12175 return -EINVAL;
12176
12177 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12178 cmd->base.duplex != DUPLEX_FULL &&
12179 cmd->base.duplex != DUPLEX_HALF)
12180 return -EINVAL;
12181
12182 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12183 cmd->link_modes.advertising);
12184
12185 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12186 u32 mask = ADVERTISED_Autoneg |
12187 ADVERTISED_Pause |
12188 ADVERTISED_Asym_Pause;
12189
12190 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12191 mask |= ADVERTISED_1000baseT_Half |
12192 ADVERTISED_1000baseT_Full;
12193
12194 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12195 mask |= ADVERTISED_100baseT_Half |
12196 ADVERTISED_100baseT_Full |
12197 ADVERTISED_10baseT_Half |
12198 ADVERTISED_10baseT_Full |
12199 ADVERTISED_TP;
12200 else
12201 mask |= ADVERTISED_FIBRE;
12202
12203 if (advertising & ~mask)
12204 return -EINVAL;
12205
12206 mask &= (ADVERTISED_1000baseT_Half |
12207 ADVERTISED_1000baseT_Full |
12208 ADVERTISED_100baseT_Half |
12209 ADVERTISED_100baseT_Full |
12210 ADVERTISED_10baseT_Half |
12211 ADVERTISED_10baseT_Full);
12212
12213 advertising &= mask;
12214 } else {
12215 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12216 if (speed != SPEED_1000)
12217 return -EINVAL;
12218
12219 if (cmd->base.duplex != DUPLEX_FULL)
12220 return -EINVAL;
12221 } else {
12222 if (speed != SPEED_100 &&
12223 speed != SPEED_10)
12224 return -EINVAL;
12225 }
12226 }
12227
12228 tg3_full_lock(tp, 0);
12229
12230 tp->link_config.autoneg = cmd->base.autoneg;
12231 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12232 tp->link_config.advertising = (advertising |
12233 ADVERTISED_Autoneg);
12234 tp->link_config.speed = SPEED_UNKNOWN;
12235 tp->link_config.duplex = DUPLEX_UNKNOWN;
12236 } else {
12237 tp->link_config.advertising = 0;
12238 tp->link_config.speed = speed;
12239 tp->link_config.duplex = cmd->base.duplex;
12240 }
12241
12242 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12243
12244 tg3_warn_mgmt_link_flap(tp);
12245
12246 if (netif_running(dev))
12247 tg3_setup_phy(tp, true);
12248
12249 tg3_full_unlock(tp);
12250
12251 return 0;
12252 }
12253
12254 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12255 {
12256 struct tg3 *tp = netdev_priv(dev);
12257
12258 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12259 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12260 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12261 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12262 }
12263
12264 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12265 {
12266 struct tg3 *tp = netdev_priv(dev);
12267
12268 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12269 wol->supported = WAKE_MAGIC;
12270 else
12271 wol->supported = 0;
12272 wol->wolopts = 0;
12273 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12274 wol->wolopts = WAKE_MAGIC;
12275 memset(&wol->sopass, 0, sizeof(wol->sopass));
12276 }
12277
12278 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12279 {
12280 struct tg3 *tp = netdev_priv(dev);
12281 struct device *dp = &tp->pdev->dev;
12282
12283 if (wol->wolopts & ~WAKE_MAGIC)
12284 return -EINVAL;
12285 if ((wol->wolopts & WAKE_MAGIC) &&
12286 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12287 return -EINVAL;
12288
12289 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12290
12291 if (device_may_wakeup(dp))
12292 tg3_flag_set(tp, WOL_ENABLE);
12293 else
12294 tg3_flag_clear(tp, WOL_ENABLE);
12295
12296 return 0;
12297 }
12298
12299 static u32 tg3_get_msglevel(struct net_device *dev)
12300 {
12301 struct tg3 *tp = netdev_priv(dev);
12302 return tp->msg_enable;
12303 }
12304
12305 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12306 {
12307 struct tg3 *tp = netdev_priv(dev);
12308 tp->msg_enable = value;
12309 }
12310
12311 static int tg3_nway_reset(struct net_device *dev)
12312 {
12313 struct tg3 *tp = netdev_priv(dev);
12314 int r;
12315
12316 if (!netif_running(dev))
12317 return -EAGAIN;
12318
12319 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12320 return -EINVAL;
12321
12322 tg3_warn_mgmt_link_flap(tp);
12323
12324 if (tg3_flag(tp, USE_PHYLIB)) {
12325 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12326 return -EAGAIN;
12327 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12328 } else {
12329 u32 bmcr;
12330
12331 spin_lock_bh(&tp->lock);
12332 r = -EINVAL;
12333 tg3_readphy(tp, MII_BMCR, &bmcr);
12334 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12335 ((bmcr & BMCR_ANENABLE) ||
12336 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12337 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12338 BMCR_ANENABLE);
12339 r = 0;
12340 }
12341 spin_unlock_bh(&tp->lock);
12342 }
12343
12344 return r;
12345 }
12346
12347 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12348 {
12349 struct tg3 *tp = netdev_priv(dev);
12350
12351 ering->rx_max_pending = tp->rx_std_ring_mask;
12352 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12353 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12354 else
12355 ering->rx_jumbo_max_pending = 0;
12356
12357 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12358
12359 ering->rx_pending = tp->rx_pending;
12360 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12361 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12362 else
12363 ering->rx_jumbo_pending = 0;
12364
12365 ering->tx_pending = tp->napi[0].tx_pending;
12366 }
12367
12368 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12369 {
12370 struct tg3 *tp = netdev_priv(dev);
12371 int i, irq_sync = 0, err = 0;
12372
12373 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12374 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12375 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12376 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12377 (tg3_flag(tp, TSO_BUG) &&
12378 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12379 return -EINVAL;
12380
12381 if (netif_running(dev)) {
12382 tg3_phy_stop(tp);
12383 tg3_netif_stop(tp);
12384 irq_sync = 1;
12385 }
12386
12387 tg3_full_lock(tp, irq_sync);
12388
12389 tp->rx_pending = ering->rx_pending;
12390
12391 if (tg3_flag(tp, MAX_RXPEND_64) &&
12392 tp->rx_pending > 63)
12393 tp->rx_pending = 63;
12394
12395 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12396 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12397
12398 for (i = 0; i < tp->irq_max; i++)
12399 tp->napi[i].tx_pending = ering->tx_pending;
12400
12401 if (netif_running(dev)) {
12402 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12403 err = tg3_restart_hw(tp, false);
12404 if (!err)
12405 tg3_netif_start(tp);
12406 }
12407
12408 tg3_full_unlock(tp);
12409
12410 if (irq_sync && !err)
12411 tg3_phy_start(tp);
12412
12413 return err;
12414 }
12415
12416 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12417 {
12418 struct tg3 *tp = netdev_priv(dev);
12419
12420 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12421
12422 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12423 epause->rx_pause = 1;
12424 else
12425 epause->rx_pause = 0;
12426
12427 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12428 epause->tx_pause = 1;
12429 else
12430 epause->tx_pause = 0;
12431 }
12432
12433 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12434 {
12435 struct tg3 *tp = netdev_priv(dev);
12436 int err = 0;
12437
12438 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12439 tg3_warn_mgmt_link_flap(tp);
12440
12441 if (tg3_flag(tp, USE_PHYLIB)) {
12442 u32 newadv;
12443 struct phy_device *phydev;
12444
12445 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12446
12447 if (!(phydev->supported & SUPPORTED_Pause) ||
12448 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12449 (epause->rx_pause != epause->tx_pause)))
12450 return -EINVAL;
12451
12452 tp->link_config.flowctrl = 0;
12453 if (epause->rx_pause) {
12454 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12455
12456 if (epause->tx_pause) {
12457 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12458 newadv = ADVERTISED_Pause;
12459 } else
12460 newadv = ADVERTISED_Pause |
12461 ADVERTISED_Asym_Pause;
12462 } else if (epause->tx_pause) {
12463 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12464 newadv = ADVERTISED_Asym_Pause;
12465 } else
12466 newadv = 0;
12467
12468 if (epause->autoneg)
12469 tg3_flag_set(tp, PAUSE_AUTONEG);
12470 else
12471 tg3_flag_clear(tp, PAUSE_AUTONEG);
12472
12473 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12474 u32 oldadv = phydev->advertising &
12475 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12476 if (oldadv != newadv) {
12477 phydev->advertising &=
12478 ~(ADVERTISED_Pause |
12479 ADVERTISED_Asym_Pause);
12480 phydev->advertising |= newadv;
12481 if (phydev->autoneg) {
12482 /*
12483 * Always renegotiate the link to
12484 * inform our link partner of our
12485 * flow control settings, even if the
12486 * flow control is forced. Let
12487 * tg3_adjust_link() do the final
12488 * flow control setup.
12489 */
12490 return phy_start_aneg(phydev);
12491 }
12492 }
12493
12494 if (!epause->autoneg)
12495 tg3_setup_flow_control(tp, 0, 0);
12496 } else {
12497 tp->link_config.advertising &=
12498 ~(ADVERTISED_Pause |
12499 ADVERTISED_Asym_Pause);
12500 tp->link_config.advertising |= newadv;
12501 }
12502 } else {
12503 int irq_sync = 0;
12504
12505 if (netif_running(dev)) {
12506 tg3_netif_stop(tp);
12507 irq_sync = 1;
12508 }
12509
12510 tg3_full_lock(tp, irq_sync);
12511
12512 if (epause->autoneg)
12513 tg3_flag_set(tp, PAUSE_AUTONEG);
12514 else
12515 tg3_flag_clear(tp, PAUSE_AUTONEG);
12516 if (epause->rx_pause)
12517 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12518 else
12519 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12520 if (epause->tx_pause)
12521 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12522 else
12523 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12524
12525 if (netif_running(dev)) {
12526 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12527 err = tg3_restart_hw(tp, false);
12528 if (!err)
12529 tg3_netif_start(tp);
12530 }
12531
12532 tg3_full_unlock(tp);
12533 }
12534
12535 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12536
12537 return err;
12538 }
12539
12540 static int tg3_get_sset_count(struct net_device *dev, int sset)
12541 {
12542 switch (sset) {
12543 case ETH_SS_TEST:
12544 return TG3_NUM_TEST;
12545 case ETH_SS_STATS:
12546 return TG3_NUM_STATS;
12547 default:
12548 return -EOPNOTSUPP;
12549 }
12550 }
12551
12552 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12553 u32 *rules __always_unused)
12554 {
12555 struct tg3 *tp = netdev_priv(dev);
12556
12557 if (!tg3_flag(tp, SUPPORT_MSIX))
12558 return -EOPNOTSUPP;
12559
12560 switch (info->cmd) {
12561 case ETHTOOL_GRXRINGS:
12562 if (netif_running(tp->dev))
12563 info->data = tp->rxq_cnt;
12564 else {
12565 info->data = num_online_cpus();
12566 if (info->data > TG3_RSS_MAX_NUM_QS)
12567 info->data = TG3_RSS_MAX_NUM_QS;
12568 }
12569
12570 return 0;
12571
12572 default:
12573 return -EOPNOTSUPP;
12574 }
12575 }
12576
12577 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12578 {
12579 u32 size = 0;
12580 struct tg3 *tp = netdev_priv(dev);
12581
12582 if (tg3_flag(tp, SUPPORT_MSIX))
12583 size = TG3_RSS_INDIR_TBL_SIZE;
12584
12585 return size;
12586 }
12587
12588 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12589 {
12590 struct tg3 *tp = netdev_priv(dev);
12591 int i;
12592
12593 if (hfunc)
12594 *hfunc = ETH_RSS_HASH_TOP;
12595 if (!indir)
12596 return 0;
12597
12598 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12599 indir[i] = tp->rss_ind_tbl[i];
12600
12601 return 0;
12602 }
12603
12604 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12605 const u8 hfunc)
12606 {
12607 struct tg3 *tp = netdev_priv(dev);
12608 size_t i;
12609
12610 /* We require at least one supported parameter to be changed and no
12611 * change in any of the unsupported parameters
12612 */
12613 if (key ||
12614 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12615 return -EOPNOTSUPP;
12616
12617 if (!indir)
12618 return 0;
12619
12620 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12621 tp->rss_ind_tbl[i] = indir[i];
12622
12623 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12624 return 0;
12625
12626 /* It is legal to write the indirection
12627 * table while the device is running.
12628 */
12629 tg3_full_lock(tp, 0);
12630 tg3_rss_write_indir_tbl(tp);
12631 tg3_full_unlock(tp);
12632
12633 return 0;
12634 }
12635
12636 static void tg3_get_channels(struct net_device *dev,
12637 struct ethtool_channels *channel)
12638 {
12639 struct tg3 *tp = netdev_priv(dev);
12640 u32 deflt_qs = netif_get_num_default_rss_queues();
12641
12642 channel->max_rx = tp->rxq_max;
12643 channel->max_tx = tp->txq_max;
12644
12645 if (netif_running(dev)) {
12646 channel->rx_count = tp->rxq_cnt;
12647 channel->tx_count = tp->txq_cnt;
12648 } else {
12649 if (tp->rxq_req)
12650 channel->rx_count = tp->rxq_req;
12651 else
12652 channel->rx_count = min(deflt_qs, tp->rxq_max);
12653
12654 if (tp->txq_req)
12655 channel->tx_count = tp->txq_req;
12656 else
12657 channel->tx_count = min(deflt_qs, tp->txq_max);
12658 }
12659 }
12660
12661 static int tg3_set_channels(struct net_device *dev,
12662 struct ethtool_channels *channel)
12663 {
12664 struct tg3 *tp = netdev_priv(dev);
12665
12666 if (!tg3_flag(tp, SUPPORT_MSIX))
12667 return -EOPNOTSUPP;
12668
12669 if (channel->rx_count > tp->rxq_max ||
12670 channel->tx_count > tp->txq_max)
12671 return -EINVAL;
12672
12673 tp->rxq_req = channel->rx_count;
12674 tp->txq_req = channel->tx_count;
12675
12676 if (!netif_running(dev))
12677 return 0;
12678
12679 tg3_stop(tp);
12680
12681 tg3_carrier_off(tp);
12682
12683 tg3_start(tp, true, false, false);
12684
12685 return 0;
12686 }
12687
12688 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12689 {
12690 switch (stringset) {
12691 case ETH_SS_STATS:
12692 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12693 break;
12694 case ETH_SS_TEST:
12695 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12696 break;
12697 default:
12698 WARN_ON(1); /* we need a WARN() */
12699 break;
12700 }
12701 }
12702
12703 static int tg3_set_phys_id(struct net_device *dev,
12704 enum ethtool_phys_id_state state)
12705 {
12706 struct tg3 *tp = netdev_priv(dev);
12707
12708 if (!netif_running(tp->dev))
12709 return -EAGAIN;
12710
12711 switch (state) {
12712 case ETHTOOL_ID_ACTIVE:
12713 return 1; /* cycle on/off once per second */
12714
12715 case ETHTOOL_ID_ON:
12716 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12717 LED_CTRL_1000MBPS_ON |
12718 LED_CTRL_100MBPS_ON |
12719 LED_CTRL_10MBPS_ON |
12720 LED_CTRL_TRAFFIC_OVERRIDE |
12721 LED_CTRL_TRAFFIC_BLINK |
12722 LED_CTRL_TRAFFIC_LED);
12723 break;
12724
12725 case ETHTOOL_ID_OFF:
12726 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12727 LED_CTRL_TRAFFIC_OVERRIDE);
12728 break;
12729
12730 case ETHTOOL_ID_INACTIVE:
12731 tw32(MAC_LED_CTRL, tp->led_ctrl);
12732 break;
12733 }
12734
12735 return 0;
12736 }
12737
12738 static void tg3_get_ethtool_stats(struct net_device *dev,
12739 struct ethtool_stats *estats, u64 *tmp_stats)
12740 {
12741 struct tg3 *tp = netdev_priv(dev);
12742
12743 if (tp->hw_stats)
12744 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12745 else
12746 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12747 }
12748
12749 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12750 {
12751 int i;
12752 __be32 *buf;
12753 u32 offset = 0, len = 0;
12754 u32 magic, val;
12755
12756 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12757 return NULL;
12758
12759 if (magic == TG3_EEPROM_MAGIC) {
12760 for (offset = TG3_NVM_DIR_START;
12761 offset < TG3_NVM_DIR_END;
12762 offset += TG3_NVM_DIRENT_SIZE) {
12763 if (tg3_nvram_read(tp, offset, &val))
12764 return NULL;
12765
12766 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12767 TG3_NVM_DIRTYPE_EXTVPD)
12768 break;
12769 }
12770
12771 if (offset != TG3_NVM_DIR_END) {
12772 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12773 if (tg3_nvram_read(tp, offset + 4, &offset))
12774 return NULL;
12775
12776 offset = tg3_nvram_logical_addr(tp, offset);
12777 }
12778 }
12779
12780 if (!offset || !len) {
12781 offset = TG3_NVM_VPD_OFF;
12782 len = TG3_NVM_VPD_LEN;
12783 }
12784
12785 buf = kmalloc(len, GFP_KERNEL);
12786 if (buf == NULL)
12787 return NULL;
12788
12789 if (magic == TG3_EEPROM_MAGIC) {
12790 for (i = 0; i < len; i += 4) {
12791 /* The data is in little-endian format in NVRAM.
12792 * Use the big-endian read routines to preserve
12793 * the byte order as it exists in NVRAM.
12794 */
12795 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12796 goto error;
12797 }
12798 } else {
12799 u8 *ptr;
12800 ssize_t cnt;
12801 unsigned int pos = 0;
12802
12803 ptr = (u8 *)&buf[0];
12804 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12805 cnt = pci_read_vpd(tp->pdev, pos,
12806 len - pos, ptr);
12807 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12808 cnt = 0;
12809 else if (cnt < 0)
12810 goto error;
12811 }
12812 if (pos != len)
12813 goto error;
12814 }
12815
12816 *vpdlen = len;
12817
12818 return buf;
12819
12820 error:
12821 kfree(buf);
12822 return NULL;
12823 }
12824
12825 #define NVRAM_TEST_SIZE 0x100
12826 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12827 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12828 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12829 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12830 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12831 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12832 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12833 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12834
12835 static int tg3_test_nvram(struct tg3 *tp)
12836 {
12837 u32 csum, magic, len;
12838 __be32 *buf;
12839 int i, j, k, err = 0, size;
12840
12841 if (tg3_flag(tp, NO_NVRAM))
12842 return 0;
12843
12844 if (tg3_nvram_read(tp, 0, &magic) != 0)
12845 return -EIO;
12846
12847 if (magic == TG3_EEPROM_MAGIC)
12848 size = NVRAM_TEST_SIZE;
12849 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12850 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12851 TG3_EEPROM_SB_FORMAT_1) {
12852 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12853 case TG3_EEPROM_SB_REVISION_0:
12854 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12855 break;
12856 case TG3_EEPROM_SB_REVISION_2:
12857 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12858 break;
12859 case TG3_EEPROM_SB_REVISION_3:
12860 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12861 break;
12862 case TG3_EEPROM_SB_REVISION_4:
12863 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12864 break;
12865 case TG3_EEPROM_SB_REVISION_5:
12866 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12867 break;
12868 case TG3_EEPROM_SB_REVISION_6:
12869 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12870 break;
12871 default:
12872 return -EIO;
12873 }
12874 } else
12875 return 0;
12876 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12877 size = NVRAM_SELFBOOT_HW_SIZE;
12878 else
12879 return -EIO;
12880
12881 buf = kmalloc(size, GFP_KERNEL);
12882 if (buf == NULL)
12883 return -ENOMEM;
12884
12885 err = -EIO;
12886 for (i = 0, j = 0; i < size; i += 4, j++) {
12887 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12888 if (err)
12889 break;
12890 }
12891 if (i < size)
12892 goto out;
12893
12894 /* Selfboot format */
12895 magic = be32_to_cpu(buf[0]);
12896 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12897 TG3_EEPROM_MAGIC_FW) {
12898 u8 *buf8 = (u8 *) buf, csum8 = 0;
12899
12900 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12901 TG3_EEPROM_SB_REVISION_2) {
12902 /* For rev 2, the csum doesn't include the MBA. */
12903 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12904 csum8 += buf8[i];
12905 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12906 csum8 += buf8[i];
12907 } else {
12908 for (i = 0; i < size; i++)
12909 csum8 += buf8[i];
12910 }
12911
12912 if (csum8 == 0) {
12913 err = 0;
12914 goto out;
12915 }
12916
12917 err = -EIO;
12918 goto out;
12919 }
12920
12921 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12922 TG3_EEPROM_MAGIC_HW) {
12923 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12924 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12925 u8 *buf8 = (u8 *) buf;
12926
12927 /* Separate the parity bits and the data bytes. */
12928 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12929 if ((i == 0) || (i == 8)) {
12930 int l;
12931 u8 msk;
12932
12933 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12934 parity[k++] = buf8[i] & msk;
12935 i++;
12936 } else if (i == 16) {
12937 int l;
12938 u8 msk;
12939
12940 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12941 parity[k++] = buf8[i] & msk;
12942 i++;
12943
12944 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12945 parity[k++] = buf8[i] & msk;
12946 i++;
12947 }
12948 data[j++] = buf8[i];
12949 }
12950
12951 err = -EIO;
12952 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12953 u8 hw8 = hweight8(data[i]);
12954
12955 if ((hw8 & 0x1) && parity[i])
12956 goto out;
12957 else if (!(hw8 & 0x1) && !parity[i])
12958 goto out;
12959 }
12960 err = 0;
12961 goto out;
12962 }
12963
12964 err = -EIO;
12965
12966 /* Bootstrap checksum at offset 0x10 */
12967 csum = calc_crc((unsigned char *) buf, 0x10);
12968 if (csum != le32_to_cpu(buf[0x10/4]))
12969 goto out;
12970
12971 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12972 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12973 if (csum != le32_to_cpu(buf[0xfc/4]))
12974 goto out;
12975
12976 kfree(buf);
12977
12978 buf = tg3_vpd_readblock(tp, &len);
12979 if (!buf)
12980 return -ENOMEM;
12981
12982 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12983 if (i > 0) {
12984 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12985 if (j < 0)
12986 goto out;
12987
12988 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12989 goto out;
12990
12991 i += PCI_VPD_LRDT_TAG_SIZE;
12992 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12993 PCI_VPD_RO_KEYWORD_CHKSUM);
12994 if (j > 0) {
12995 u8 csum8 = 0;
12996
12997 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12998
12999 for (i = 0; i <= j; i++)
13000 csum8 += ((u8 *)buf)[i];
13001
13002 if (csum8)
13003 goto out;
13004 }
13005 }
13006
13007 err = 0;
13008
13009 out:
13010 kfree(buf);
13011 return err;
13012 }
13013
13014 #define TG3_SERDES_TIMEOUT_SEC 2
13015 #define TG3_COPPER_TIMEOUT_SEC 6
13016
13017 static int tg3_test_link(struct tg3 *tp)
13018 {
13019 int i, max;
13020
13021 if (!netif_running(tp->dev))
13022 return -ENODEV;
13023
13024 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13025 max = TG3_SERDES_TIMEOUT_SEC;
13026 else
13027 max = TG3_COPPER_TIMEOUT_SEC;
13028
13029 for (i = 0; i < max; i++) {
13030 if (tp->link_up)
13031 return 0;
13032
13033 if (msleep_interruptible(1000))
13034 break;
13035 }
13036
13037 return -EIO;
13038 }
13039
13040 /* Only test the commonly used registers */
13041 static int tg3_test_registers(struct tg3 *tp)
13042 {
13043 int i, is_5705, is_5750;
13044 u32 offset, read_mask, write_mask, val, save_val, read_val;
13045 static struct {
13046 u16 offset;
13047 u16 flags;
13048 #define TG3_FL_5705 0x1
13049 #define TG3_FL_NOT_5705 0x2
13050 #define TG3_FL_NOT_5788 0x4
13051 #define TG3_FL_NOT_5750 0x8
13052 u32 read_mask;
13053 u32 write_mask;
13054 } reg_tbl[] = {
13055 /* MAC Control Registers */
13056 { MAC_MODE, TG3_FL_NOT_5705,
13057 0x00000000, 0x00ef6f8c },
13058 { MAC_MODE, TG3_FL_5705,
13059 0x00000000, 0x01ef6b8c },
13060 { MAC_STATUS, TG3_FL_NOT_5705,
13061 0x03800107, 0x00000000 },
13062 { MAC_STATUS, TG3_FL_5705,
13063 0x03800100, 0x00000000 },
13064 { MAC_ADDR_0_HIGH, 0x0000,
13065 0x00000000, 0x0000ffff },
13066 { MAC_ADDR_0_LOW, 0x0000,
13067 0x00000000, 0xffffffff },
13068 { MAC_RX_MTU_SIZE, 0x0000,
13069 0x00000000, 0x0000ffff },
13070 { MAC_TX_MODE, 0x0000,
13071 0x00000000, 0x00000070 },
13072 { MAC_TX_LENGTHS, 0x0000,
13073 0x00000000, 0x00003fff },
13074 { MAC_RX_MODE, TG3_FL_NOT_5705,
13075 0x00000000, 0x000007fc },
13076 { MAC_RX_MODE, TG3_FL_5705,
13077 0x00000000, 0x000007dc },
13078 { MAC_HASH_REG_0, 0x0000,
13079 0x00000000, 0xffffffff },
13080 { MAC_HASH_REG_1, 0x0000,
13081 0x00000000, 0xffffffff },
13082 { MAC_HASH_REG_2, 0x0000,
13083 0x00000000, 0xffffffff },
13084 { MAC_HASH_REG_3, 0x0000,
13085 0x00000000, 0xffffffff },
13086
13087 /* Receive Data and Receive BD Initiator Control Registers. */
13088 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13089 0x00000000, 0xffffffff },
13090 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13091 0x00000000, 0xffffffff },
13092 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13093 0x00000000, 0x00000003 },
13094 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13095 0x00000000, 0xffffffff },
13096 { RCVDBDI_STD_BD+0, 0x0000,
13097 0x00000000, 0xffffffff },
13098 { RCVDBDI_STD_BD+4, 0x0000,
13099 0x00000000, 0xffffffff },
13100 { RCVDBDI_STD_BD+8, 0x0000,
13101 0x00000000, 0xffff0002 },
13102 { RCVDBDI_STD_BD+0xc, 0x0000,
13103 0x00000000, 0xffffffff },
13104
13105 /* Receive BD Initiator Control Registers. */
13106 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13107 0x00000000, 0xffffffff },
13108 { RCVBDI_STD_THRESH, TG3_FL_5705,
13109 0x00000000, 0x000003ff },
13110 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13111 0x00000000, 0xffffffff },
13112
13113 /* Host Coalescing Control Registers. */
13114 { HOSTCC_MODE, TG3_FL_NOT_5705,
13115 0x00000000, 0x00000004 },
13116 { HOSTCC_MODE, TG3_FL_5705,
13117 0x00000000, 0x000000f6 },
13118 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13119 0x00000000, 0xffffffff },
13120 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13121 0x00000000, 0x000003ff },
13122 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13123 0x00000000, 0xffffffff },
13124 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13125 0x00000000, 0x000003ff },
13126 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13127 0x00000000, 0xffffffff },
13128 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13129 0x00000000, 0x000000ff },
13130 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13131 0x00000000, 0xffffffff },
13132 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13133 0x00000000, 0x000000ff },
13134 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13135 0x00000000, 0xffffffff },
13136 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13137 0x00000000, 0xffffffff },
13138 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13139 0x00000000, 0xffffffff },
13140 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13141 0x00000000, 0x000000ff },
13142 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13143 0x00000000, 0xffffffff },
13144 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13145 0x00000000, 0x000000ff },
13146 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13147 0x00000000, 0xffffffff },
13148 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13149 0x00000000, 0xffffffff },
13150 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13151 0x00000000, 0xffffffff },
13152 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13153 0x00000000, 0xffffffff },
13154 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13155 0x00000000, 0xffffffff },
13156 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13157 0xffffffff, 0x00000000 },
13158 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13159 0xffffffff, 0x00000000 },
13160
13161 /* Buffer Manager Control Registers. */
13162 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13163 0x00000000, 0x007fff80 },
13164 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13165 0x00000000, 0x007fffff },
13166 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13167 0x00000000, 0x0000003f },
13168 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13169 0x00000000, 0x000001ff },
13170 { BUFMGR_MB_HIGH_WATER, 0x0000,
13171 0x00000000, 0x000001ff },
13172 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13173 0xffffffff, 0x00000000 },
13174 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13175 0xffffffff, 0x00000000 },
13176
13177 /* Mailbox Registers */
13178 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13179 0x00000000, 0x000001ff },
13180 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13181 0x00000000, 0x000001ff },
13182 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13183 0x00000000, 0x000007ff },
13184 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13185 0x00000000, 0x000001ff },
13186
13187 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13188 };
13189
13190 is_5705 = is_5750 = 0;
13191 if (tg3_flag(tp, 5705_PLUS)) {
13192 is_5705 = 1;
13193 if (tg3_flag(tp, 5750_PLUS))
13194 is_5750 = 1;
13195 }
13196
13197 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13198 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13199 continue;
13200
13201 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13202 continue;
13203
13204 if (tg3_flag(tp, IS_5788) &&
13205 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13206 continue;
13207
13208 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13209 continue;
13210
13211 offset = (u32) reg_tbl[i].offset;
13212 read_mask = reg_tbl[i].read_mask;
13213 write_mask = reg_tbl[i].write_mask;
13214
13215 /* Save the original register content */
13216 save_val = tr32(offset);
13217
13218 /* Determine the read-only value. */
13219 read_val = save_val & read_mask;
13220
13221 /* Write zero to the register, then make sure the read-only bits
13222 * are not changed and the read/write bits are all zeros.
13223 */
13224 tw32(offset, 0);
13225
13226 val = tr32(offset);
13227
13228 /* Test the read-only and read/write bits. */
13229 if (((val & read_mask) != read_val) || (val & write_mask))
13230 goto out;
13231
13232 /* Write ones to all the bits defined by RdMask and WrMask, then
13233 * make sure the read-only bits are not changed and the
13234 * read/write bits are all ones.
13235 */
13236 tw32(offset, read_mask | write_mask);
13237
13238 val = tr32(offset);
13239
13240 /* Test the read-only bits. */
13241 if ((val & read_mask) != read_val)
13242 goto out;
13243
13244 /* Test the read/write bits. */
13245 if ((val & write_mask) != write_mask)
13246 goto out;
13247
13248 tw32(offset, save_val);
13249 }
13250
13251 return 0;
13252
13253 out:
13254 if (netif_msg_hw(tp))
13255 netdev_err(tp->dev,
13256 "Register test failed at offset %x\n", offset);
13257 tw32(offset, save_val);
13258 return -EIO;
13259 }
13260
13261 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13262 {
13263 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13264 int i;
13265 u32 j;
13266
13267 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13268 for (j = 0; j < len; j += 4) {
13269 u32 val;
13270
13271 tg3_write_mem(tp, offset + j, test_pattern[i]);
13272 tg3_read_mem(tp, offset + j, &val);
13273 if (val != test_pattern[i])
13274 return -EIO;
13275 }
13276 }
13277 return 0;
13278 }
13279
13280 static int tg3_test_memory(struct tg3 *tp)
13281 {
13282 static struct mem_entry {
13283 u32 offset;
13284 u32 len;
13285 } mem_tbl_570x[] = {
13286 { 0x00000000, 0x00b50},
13287 { 0x00002000, 0x1c000},
13288 { 0xffffffff, 0x00000}
13289 }, mem_tbl_5705[] = {
13290 { 0x00000100, 0x0000c},
13291 { 0x00000200, 0x00008},
13292 { 0x00004000, 0x00800},
13293 { 0x00006000, 0x01000},
13294 { 0x00008000, 0x02000},
13295 { 0x00010000, 0x0e000},
13296 { 0xffffffff, 0x00000}
13297 }, mem_tbl_5755[] = {
13298 { 0x00000200, 0x00008},
13299 { 0x00004000, 0x00800},
13300 { 0x00006000, 0x00800},
13301 { 0x00008000, 0x02000},
13302 { 0x00010000, 0x0c000},
13303 { 0xffffffff, 0x00000}
13304 }, mem_tbl_5906[] = {
13305 { 0x00000200, 0x00008},
13306 { 0x00004000, 0x00400},
13307 { 0x00006000, 0x00400},
13308 { 0x00008000, 0x01000},
13309 { 0x00010000, 0x01000},
13310 { 0xffffffff, 0x00000}
13311 }, mem_tbl_5717[] = {
13312 { 0x00000200, 0x00008},
13313 { 0x00010000, 0x0a000},
13314 { 0x00020000, 0x13c00},
13315 { 0xffffffff, 0x00000}
13316 }, mem_tbl_57765[] = {
13317 { 0x00000200, 0x00008},
13318 { 0x00004000, 0x00800},
13319 { 0x00006000, 0x09800},
13320 { 0x00010000, 0x0a000},
13321 { 0xffffffff, 0x00000}
13322 };
13323 struct mem_entry *mem_tbl;
13324 int err = 0;
13325 int i;
13326
13327 if (tg3_flag(tp, 5717_PLUS))
13328 mem_tbl = mem_tbl_5717;
13329 else if (tg3_flag(tp, 57765_CLASS) ||
13330 tg3_asic_rev(tp) == ASIC_REV_5762)
13331 mem_tbl = mem_tbl_57765;
13332 else if (tg3_flag(tp, 5755_PLUS))
13333 mem_tbl = mem_tbl_5755;
13334 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13335 mem_tbl = mem_tbl_5906;
13336 else if (tg3_flag(tp, 5705_PLUS))
13337 mem_tbl = mem_tbl_5705;
13338 else
13339 mem_tbl = mem_tbl_570x;
13340
13341 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13342 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13343 if (err)
13344 break;
13345 }
13346
13347 return err;
13348 }
13349
13350 #define TG3_TSO_MSS 500
13351
13352 #define TG3_TSO_IP_HDR_LEN 20
13353 #define TG3_TSO_TCP_HDR_LEN 20
13354 #define TG3_TSO_TCP_OPT_LEN 12
13355
13356 static const u8 tg3_tso_header[] = {
13357 0x08, 0x00,
13358 0x45, 0x00, 0x00, 0x00,
13359 0x00, 0x00, 0x40, 0x00,
13360 0x40, 0x06, 0x00, 0x00,
13361 0x0a, 0x00, 0x00, 0x01,
13362 0x0a, 0x00, 0x00, 0x02,
13363 0x0d, 0x00, 0xe0, 0x00,
13364 0x00, 0x00, 0x01, 0x00,
13365 0x00, 0x00, 0x02, 0x00,
13366 0x80, 0x10, 0x10, 0x00,
13367 0x14, 0x09, 0x00, 0x00,
13368 0x01, 0x01, 0x08, 0x0a,
13369 0x11, 0x11, 0x11, 0x11,
13370 0x11, 0x11, 0x11, 0x11,
13371 };
13372
13373 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13374 {
13375 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13376 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13377 u32 budget;
13378 struct sk_buff *skb;
13379 u8 *tx_data, *rx_data;
13380 dma_addr_t map;
13381 int num_pkts, tx_len, rx_len, i, err;
13382 struct tg3_rx_buffer_desc *desc;
13383 struct tg3_napi *tnapi, *rnapi;
13384 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13385
13386 tnapi = &tp->napi[0];
13387 rnapi = &tp->napi[0];
13388 if (tp->irq_cnt > 1) {
13389 if (tg3_flag(tp, ENABLE_RSS))
13390 rnapi = &tp->napi[1];
13391 if (tg3_flag(tp, ENABLE_TSS))
13392 tnapi = &tp->napi[1];
13393 }
13394 coal_now = tnapi->coal_now | rnapi->coal_now;
13395
13396 err = -EIO;
13397
13398 tx_len = pktsz;
13399 skb = netdev_alloc_skb(tp->dev, tx_len);
13400 if (!skb)
13401 return -ENOMEM;
13402
13403 tx_data = skb_put(skb, tx_len);
13404 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13405 memset(tx_data + ETH_ALEN, 0x0, 8);
13406
13407 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13408
13409 if (tso_loopback) {
13410 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13411
13412 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13413 TG3_TSO_TCP_OPT_LEN;
13414
13415 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13416 sizeof(tg3_tso_header));
13417 mss = TG3_TSO_MSS;
13418
13419 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13420 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13421
13422 /* Set the total length field in the IP header */
13423 iph->tot_len = htons((u16)(mss + hdr_len));
13424
13425 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13426 TXD_FLAG_CPU_POST_DMA);
13427
13428 if (tg3_flag(tp, HW_TSO_1) ||
13429 tg3_flag(tp, HW_TSO_2) ||
13430 tg3_flag(tp, HW_TSO_3)) {
13431 struct tcphdr *th;
13432 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13433 th = (struct tcphdr *)&tx_data[val];
13434 th->check = 0;
13435 } else
13436 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13437
13438 if (tg3_flag(tp, HW_TSO_3)) {
13439 mss |= (hdr_len & 0xc) << 12;
13440 if (hdr_len & 0x10)
13441 base_flags |= 0x00000010;
13442 base_flags |= (hdr_len & 0x3e0) << 5;
13443 } else if (tg3_flag(tp, HW_TSO_2))
13444 mss |= hdr_len << 9;
13445 else if (tg3_flag(tp, HW_TSO_1) ||
13446 tg3_asic_rev(tp) == ASIC_REV_5705) {
13447 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13448 } else {
13449 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13450 }
13451
13452 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13453 } else {
13454 num_pkts = 1;
13455 data_off = ETH_HLEN;
13456
13457 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13458 tx_len > VLAN_ETH_FRAME_LEN)
13459 base_flags |= TXD_FLAG_JMB_PKT;
13460 }
13461
13462 for (i = data_off; i < tx_len; i++)
13463 tx_data[i] = (u8) (i & 0xff);
13464
13465 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13466 if (pci_dma_mapping_error(tp->pdev, map)) {
13467 dev_kfree_skb(skb);
13468 return -EIO;
13469 }
13470
13471 val = tnapi->tx_prod;
13472 tnapi->tx_buffers[val].skb = skb;
13473 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13474
13475 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13476 rnapi->coal_now);
13477
13478 udelay(10);
13479
13480 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13481
13482 budget = tg3_tx_avail(tnapi);
13483 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13484 base_flags | TXD_FLAG_END, mss, 0)) {
13485 tnapi->tx_buffers[val].skb = NULL;
13486 dev_kfree_skb(skb);
13487 return -EIO;
13488 }
13489
13490 tnapi->tx_prod++;
13491
13492 /* Sync BD data before updating mailbox */
13493 wmb();
13494
13495 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13496 tr32_mailbox(tnapi->prodmbox);
13497
13498 udelay(10);
13499
13500 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13501 for (i = 0; i < 35; i++) {
13502 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13503 coal_now);
13504
13505 udelay(10);
13506
13507 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13508 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13509 if ((tx_idx == tnapi->tx_prod) &&
13510 (rx_idx == (rx_start_idx + num_pkts)))
13511 break;
13512 }
13513
13514 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13515 dev_kfree_skb(skb);
13516
13517 if (tx_idx != tnapi->tx_prod)
13518 goto out;
13519
13520 if (rx_idx != rx_start_idx + num_pkts)
13521 goto out;
13522
13523 val = data_off;
13524 while (rx_idx != rx_start_idx) {
13525 desc = &rnapi->rx_rcb[rx_start_idx++];
13526 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13527 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13528
13529 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13530 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13531 goto out;
13532
13533 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13534 - ETH_FCS_LEN;
13535
13536 if (!tso_loopback) {
13537 if (rx_len != tx_len)
13538 goto out;
13539
13540 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13541 if (opaque_key != RXD_OPAQUE_RING_STD)
13542 goto out;
13543 } else {
13544 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13545 goto out;
13546 }
13547 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13548 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13549 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13550 goto out;
13551 }
13552
13553 if (opaque_key == RXD_OPAQUE_RING_STD) {
13554 rx_data = tpr->rx_std_buffers[desc_idx].data;
13555 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13556 mapping);
13557 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13558 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13559 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13560 mapping);
13561 } else
13562 goto out;
13563
13564 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13565 PCI_DMA_FROMDEVICE);
13566
13567 rx_data += TG3_RX_OFFSET(tp);
13568 for (i = data_off; i < rx_len; i++, val++) {
13569 if (*(rx_data + i) != (u8) (val & 0xff))
13570 goto out;
13571 }
13572 }
13573
13574 err = 0;
13575
13576 /* tg3_free_rings will unmap and free the rx_data */
13577 out:
13578 return err;
13579 }
13580
13581 #define TG3_STD_LOOPBACK_FAILED 1
13582 #define TG3_JMB_LOOPBACK_FAILED 2
13583 #define TG3_TSO_LOOPBACK_FAILED 4
13584 #define TG3_LOOPBACK_FAILED \
13585 (TG3_STD_LOOPBACK_FAILED | \
13586 TG3_JMB_LOOPBACK_FAILED | \
13587 TG3_TSO_LOOPBACK_FAILED)
13588
13589 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13590 {
13591 int err = -EIO;
13592 u32 eee_cap;
13593 u32 jmb_pkt_sz = 9000;
13594
13595 if (tp->dma_limit)
13596 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13597
13598 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13599 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13600
13601 if (!netif_running(tp->dev)) {
13602 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604 if (do_extlpbk)
13605 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606 goto done;
13607 }
13608
13609 err = tg3_reset_hw(tp, true);
13610 if (err) {
13611 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13612 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613 if (do_extlpbk)
13614 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13615 goto done;
13616 }
13617
13618 if (tg3_flag(tp, ENABLE_RSS)) {
13619 int i;
13620
13621 /* Reroute all rx packets to the 1st queue */
13622 for (i = MAC_RSS_INDIR_TBL_0;
13623 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13624 tw32(i, 0x0);
13625 }
13626
13627 /* HW errata - mac loopback fails in some cases on 5780.
13628 * Normal traffic and PHY loopback are not affected by
13629 * errata. Also, the MAC loopback test is deprecated for
13630 * all newer ASIC revisions.
13631 */
13632 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13633 !tg3_flag(tp, CPMU_PRESENT)) {
13634 tg3_mac_loopback(tp, true);
13635
13636 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13637 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13638
13639 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13640 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13641 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13642
13643 tg3_mac_loopback(tp, false);
13644 }
13645
13646 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13647 !tg3_flag(tp, USE_PHYLIB)) {
13648 int i;
13649
13650 tg3_phy_lpbk_set(tp, 0, false);
13651
13652 /* Wait for link */
13653 for (i = 0; i < 100; i++) {
13654 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13655 break;
13656 mdelay(1);
13657 }
13658
13659 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13660 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13661 if (tg3_flag(tp, TSO_CAPABLE) &&
13662 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13663 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13664 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13665 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13666 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13667
13668 if (do_extlpbk) {
13669 tg3_phy_lpbk_set(tp, 0, true);
13670
13671 /* All link indications report up, but the hardware
13672 * isn't really ready for about 20 msec. Double it
13673 * to be sure.
13674 */
13675 mdelay(40);
13676
13677 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13678 data[TG3_EXT_LOOPB_TEST] |=
13679 TG3_STD_LOOPBACK_FAILED;
13680 if (tg3_flag(tp, TSO_CAPABLE) &&
13681 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13682 data[TG3_EXT_LOOPB_TEST] |=
13683 TG3_TSO_LOOPBACK_FAILED;
13684 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13685 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13686 data[TG3_EXT_LOOPB_TEST] |=
13687 TG3_JMB_LOOPBACK_FAILED;
13688 }
13689
13690 /* Re-enable gphy autopowerdown. */
13691 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13692 tg3_phy_toggle_apd(tp, true);
13693 }
13694
13695 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13696 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13697
13698 done:
13699 tp->phy_flags |= eee_cap;
13700
13701 return err;
13702 }
13703
13704 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13705 u64 *data)
13706 {
13707 struct tg3 *tp = netdev_priv(dev);
13708 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13709
13710 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13711 if (tg3_power_up(tp)) {
13712 etest->flags |= ETH_TEST_FL_FAILED;
13713 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13714 return;
13715 }
13716 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13717 }
13718
13719 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13720
13721 if (tg3_test_nvram(tp) != 0) {
13722 etest->flags |= ETH_TEST_FL_FAILED;
13723 data[TG3_NVRAM_TEST] = 1;
13724 }
13725 if (!doextlpbk && tg3_test_link(tp)) {
13726 etest->flags |= ETH_TEST_FL_FAILED;
13727 data[TG3_LINK_TEST] = 1;
13728 }
13729 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13730 int err, err2 = 0, irq_sync = 0;
13731
13732 if (netif_running(dev)) {
13733 tg3_phy_stop(tp);
13734 tg3_netif_stop(tp);
13735 irq_sync = 1;
13736 }
13737
13738 tg3_full_lock(tp, irq_sync);
13739 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13740 err = tg3_nvram_lock(tp);
13741 tg3_halt_cpu(tp, RX_CPU_BASE);
13742 if (!tg3_flag(tp, 5705_PLUS))
13743 tg3_halt_cpu(tp, TX_CPU_BASE);
13744 if (!err)
13745 tg3_nvram_unlock(tp);
13746
13747 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13748 tg3_phy_reset(tp);
13749
13750 if (tg3_test_registers(tp) != 0) {
13751 etest->flags |= ETH_TEST_FL_FAILED;
13752 data[TG3_REGISTER_TEST] = 1;
13753 }
13754
13755 if (tg3_test_memory(tp) != 0) {
13756 etest->flags |= ETH_TEST_FL_FAILED;
13757 data[TG3_MEMORY_TEST] = 1;
13758 }
13759
13760 if (doextlpbk)
13761 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13762
13763 if (tg3_test_loopback(tp, data, doextlpbk))
13764 etest->flags |= ETH_TEST_FL_FAILED;
13765
13766 tg3_full_unlock(tp);
13767
13768 if (tg3_test_interrupt(tp) != 0) {
13769 etest->flags |= ETH_TEST_FL_FAILED;
13770 data[TG3_INTERRUPT_TEST] = 1;
13771 }
13772
13773 tg3_full_lock(tp, 0);
13774
13775 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13776 if (netif_running(dev)) {
13777 tg3_flag_set(tp, INIT_COMPLETE);
13778 err2 = tg3_restart_hw(tp, true);
13779 if (!err2)
13780 tg3_netif_start(tp);
13781 }
13782
13783 tg3_full_unlock(tp);
13784
13785 if (irq_sync && !err2)
13786 tg3_phy_start(tp);
13787 }
13788 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13789 tg3_power_down_prepare(tp);
13790
13791 }
13792
13793 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13794 {
13795 struct tg3 *tp = netdev_priv(dev);
13796 struct hwtstamp_config stmpconf;
13797
13798 if (!tg3_flag(tp, PTP_CAPABLE))
13799 return -EOPNOTSUPP;
13800
13801 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13802 return -EFAULT;
13803
13804 if (stmpconf.flags)
13805 return -EINVAL;
13806
13807 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13808 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13809 return -ERANGE;
13810
13811 switch (stmpconf.rx_filter) {
13812 case HWTSTAMP_FILTER_NONE:
13813 tp->rxptpctl = 0;
13814 break;
13815 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13816 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13817 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13818 break;
13819 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13820 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13821 TG3_RX_PTP_CTL_SYNC_EVNT;
13822 break;
13823 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13824 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13825 TG3_RX_PTP_CTL_DELAY_REQ;
13826 break;
13827 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13828 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13829 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13830 break;
13831 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13832 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13833 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13834 break;
13835 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13836 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13837 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13838 break;
13839 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13840 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13841 TG3_RX_PTP_CTL_SYNC_EVNT;
13842 break;
13843 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13844 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13845 TG3_RX_PTP_CTL_SYNC_EVNT;
13846 break;
13847 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13849 TG3_RX_PTP_CTL_SYNC_EVNT;
13850 break;
13851 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13853 TG3_RX_PTP_CTL_DELAY_REQ;
13854 break;
13855 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13857 TG3_RX_PTP_CTL_DELAY_REQ;
13858 break;
13859 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13861 TG3_RX_PTP_CTL_DELAY_REQ;
13862 break;
13863 default:
13864 return -ERANGE;
13865 }
13866
13867 if (netif_running(dev) && tp->rxptpctl)
13868 tw32(TG3_RX_PTP_CTL,
13869 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13870
13871 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13872 tg3_flag_set(tp, TX_TSTAMP_EN);
13873 else
13874 tg3_flag_clear(tp, TX_TSTAMP_EN);
13875
13876 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13877 -EFAULT : 0;
13878 }
13879
13880 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13881 {
13882 struct tg3 *tp = netdev_priv(dev);
13883 struct hwtstamp_config stmpconf;
13884
13885 if (!tg3_flag(tp, PTP_CAPABLE))
13886 return -EOPNOTSUPP;
13887
13888 stmpconf.flags = 0;
13889 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13890 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13891
13892 switch (tp->rxptpctl) {
13893 case 0:
13894 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13895 break;
13896 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13897 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13898 break;
13899 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13900 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13901 break;
13902 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13903 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13904 break;
13905 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13906 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13907 break;
13908 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13909 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13910 break;
13911 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13912 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13913 break;
13914 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13915 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13916 break;
13917 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13918 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13919 break;
13920 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13921 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13922 break;
13923 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13924 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13925 break;
13926 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13927 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13928 break;
13929 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13930 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13931 break;
13932 default:
13933 WARN_ON_ONCE(1);
13934 return -ERANGE;
13935 }
13936
13937 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13938 -EFAULT : 0;
13939 }
13940
13941 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13942 {
13943 struct mii_ioctl_data *data = if_mii(ifr);
13944 struct tg3 *tp = netdev_priv(dev);
13945 int err;
13946
13947 if (tg3_flag(tp, USE_PHYLIB)) {
13948 struct phy_device *phydev;
13949 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13950 return -EAGAIN;
13951 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13952 return phy_mii_ioctl(phydev, ifr, cmd);
13953 }
13954
13955 switch (cmd) {
13956 case SIOCGMIIPHY:
13957 data->phy_id = tp->phy_addr;
13958
13959 /* fallthru */
13960 case SIOCGMIIREG: {
13961 u32 mii_regval;
13962
13963 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13964 break; /* We have no PHY */
13965
13966 if (!netif_running(dev))
13967 return -EAGAIN;
13968
13969 spin_lock_bh(&tp->lock);
13970 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13971 data->reg_num & 0x1f, &mii_regval);
13972 spin_unlock_bh(&tp->lock);
13973
13974 data->val_out = mii_regval;
13975
13976 return err;
13977 }
13978
13979 case SIOCSMIIREG:
13980 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13981 break; /* We have no PHY */
13982
13983 if (!netif_running(dev))
13984 return -EAGAIN;
13985
13986 spin_lock_bh(&tp->lock);
13987 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13988 data->reg_num & 0x1f, data->val_in);
13989 spin_unlock_bh(&tp->lock);
13990
13991 return err;
13992
13993 case SIOCSHWTSTAMP:
13994 return tg3_hwtstamp_set(dev, ifr);
13995
13996 case SIOCGHWTSTAMP:
13997 return tg3_hwtstamp_get(dev, ifr);
13998
13999 default:
14000 /* do nothing */
14001 break;
14002 }
14003 return -EOPNOTSUPP;
14004 }
14005
14006 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14007 {
14008 struct tg3 *tp = netdev_priv(dev);
14009
14010 memcpy(ec, &tp->coal, sizeof(*ec));
14011 return 0;
14012 }
14013
14014 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14015 {
14016 struct tg3 *tp = netdev_priv(dev);
14017 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14018 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14019
14020 if (!tg3_flag(tp, 5705_PLUS)) {
14021 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14022 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14023 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14024 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14025 }
14026
14027 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14028 (!ec->rx_coalesce_usecs) ||
14029 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14030 (!ec->tx_coalesce_usecs) ||
14031 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14032 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14033 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14034 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14035 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14036 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14037 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14038 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14039 return -EINVAL;
14040
14041 /* Only copy relevant parameters, ignore all others. */
14042 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14043 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14044 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14045 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14046 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14047 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14048 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14049 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14050 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14051
14052 if (netif_running(dev)) {
14053 tg3_full_lock(tp, 0);
14054 __tg3_set_coalesce(tp, &tp->coal);
14055 tg3_full_unlock(tp);
14056 }
14057 return 0;
14058 }
14059
14060 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14061 {
14062 struct tg3 *tp = netdev_priv(dev);
14063
14064 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14065 netdev_warn(tp->dev, "Board does not support EEE!\n");
14066 return -EOPNOTSUPP;
14067 }
14068
14069 if (edata->advertised != tp->eee.advertised) {
14070 netdev_warn(tp->dev,
14071 "Direct manipulation of EEE advertisement is not supported\n");
14072 return -EINVAL;
14073 }
14074
14075 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14076 netdev_warn(tp->dev,
14077 "Maximal Tx Lpi timer supported is %#x(u)\n",
14078 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14079 return -EINVAL;
14080 }
14081
14082 tp->eee = *edata;
14083
14084 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14085 tg3_warn_mgmt_link_flap(tp);
14086
14087 if (netif_running(tp->dev)) {
14088 tg3_full_lock(tp, 0);
14089 tg3_setup_eee(tp);
14090 tg3_phy_reset(tp);
14091 tg3_full_unlock(tp);
14092 }
14093
14094 return 0;
14095 }
14096
14097 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14098 {
14099 struct tg3 *tp = netdev_priv(dev);
14100
14101 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14102 netdev_warn(tp->dev,
14103 "Board does not support EEE!\n");
14104 return -EOPNOTSUPP;
14105 }
14106
14107 *edata = tp->eee;
14108 return 0;
14109 }
14110
14111 static const struct ethtool_ops tg3_ethtool_ops = {
14112 .get_drvinfo = tg3_get_drvinfo,
14113 .get_regs_len = tg3_get_regs_len,
14114 .get_regs = tg3_get_regs,
14115 .get_wol = tg3_get_wol,
14116 .set_wol = tg3_set_wol,
14117 .get_msglevel = tg3_get_msglevel,
14118 .set_msglevel = tg3_set_msglevel,
14119 .nway_reset = tg3_nway_reset,
14120 .get_link = ethtool_op_get_link,
14121 .get_eeprom_len = tg3_get_eeprom_len,
14122 .get_eeprom = tg3_get_eeprom,
14123 .set_eeprom = tg3_set_eeprom,
14124 .get_ringparam = tg3_get_ringparam,
14125 .set_ringparam = tg3_set_ringparam,
14126 .get_pauseparam = tg3_get_pauseparam,
14127 .set_pauseparam = tg3_set_pauseparam,
14128 .self_test = tg3_self_test,
14129 .get_strings = tg3_get_strings,
14130 .set_phys_id = tg3_set_phys_id,
14131 .get_ethtool_stats = tg3_get_ethtool_stats,
14132 .get_coalesce = tg3_get_coalesce,
14133 .set_coalesce = tg3_set_coalesce,
14134 .get_sset_count = tg3_get_sset_count,
14135 .get_rxnfc = tg3_get_rxnfc,
14136 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14137 .get_rxfh = tg3_get_rxfh,
14138 .set_rxfh = tg3_set_rxfh,
14139 .get_channels = tg3_get_channels,
14140 .set_channels = tg3_set_channels,
14141 .get_ts_info = tg3_get_ts_info,
14142 .get_eee = tg3_get_eee,
14143 .set_eee = tg3_set_eee,
14144 .get_link_ksettings = tg3_get_link_ksettings,
14145 .set_link_ksettings = tg3_set_link_ksettings,
14146 };
14147
14148 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14149 struct rtnl_link_stats64 *stats)
14150 {
14151 struct tg3 *tp = netdev_priv(dev);
14152
14153 spin_lock_bh(&tp->lock);
14154 if (!tp->hw_stats) {
14155 *stats = tp->net_stats_prev;
14156 spin_unlock_bh(&tp->lock);
14157 return stats;
14158 }
14159
14160 tg3_get_nstats(tp, stats);
14161 spin_unlock_bh(&tp->lock);
14162
14163 return stats;
14164 }
14165
14166 static void tg3_set_rx_mode(struct net_device *dev)
14167 {
14168 struct tg3 *tp = netdev_priv(dev);
14169
14170 if (!netif_running(dev))
14171 return;
14172
14173 tg3_full_lock(tp, 0);
14174 __tg3_set_rx_mode(dev);
14175 tg3_full_unlock(tp);
14176 }
14177
14178 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14179 int new_mtu)
14180 {
14181 dev->mtu = new_mtu;
14182
14183 if (new_mtu > ETH_DATA_LEN) {
14184 if (tg3_flag(tp, 5780_CLASS)) {
14185 netdev_update_features(dev);
14186 tg3_flag_clear(tp, TSO_CAPABLE);
14187 } else {
14188 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14189 }
14190 } else {
14191 if (tg3_flag(tp, 5780_CLASS)) {
14192 tg3_flag_set(tp, TSO_CAPABLE);
14193 netdev_update_features(dev);
14194 }
14195 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14196 }
14197 }
14198
14199 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14200 {
14201 struct tg3 *tp = netdev_priv(dev);
14202 int err;
14203 bool reset_phy = false;
14204
14205 if (!netif_running(dev)) {
14206 /* We'll just catch it later when the
14207 * device is up'd.
14208 */
14209 tg3_set_mtu(dev, tp, new_mtu);
14210 return 0;
14211 }
14212
14213 tg3_phy_stop(tp);
14214
14215 tg3_netif_stop(tp);
14216
14217 tg3_set_mtu(dev, tp, new_mtu);
14218
14219 tg3_full_lock(tp, 1);
14220
14221 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14222
14223 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14224 * breaks all requests to 256 bytes.
14225 */
14226 if (tg3_asic_rev(tp) == ASIC_REV_57766)
14227 reset_phy = true;
14228
14229 err = tg3_restart_hw(tp, reset_phy);
14230
14231 if (!err)
14232 tg3_netif_start(tp);
14233
14234 tg3_full_unlock(tp);
14235
14236 if (!err)
14237 tg3_phy_start(tp);
14238
14239 return err;
14240 }
14241
14242 static const struct net_device_ops tg3_netdev_ops = {
14243 .ndo_open = tg3_open,
14244 .ndo_stop = tg3_close,
14245 .ndo_start_xmit = tg3_start_xmit,
14246 .ndo_get_stats64 = tg3_get_stats64,
14247 .ndo_validate_addr = eth_validate_addr,
14248 .ndo_set_rx_mode = tg3_set_rx_mode,
14249 .ndo_set_mac_address = tg3_set_mac_addr,
14250 .ndo_do_ioctl = tg3_ioctl,
14251 .ndo_tx_timeout = tg3_tx_timeout,
14252 .ndo_change_mtu = tg3_change_mtu,
14253 .ndo_fix_features = tg3_fix_features,
14254 .ndo_set_features = tg3_set_features,
14255 #ifdef CONFIG_NET_POLL_CONTROLLER
14256 .ndo_poll_controller = tg3_poll_controller,
14257 #endif
14258 };
14259
14260 static void tg3_get_eeprom_size(struct tg3 *tp)
14261 {
14262 u32 cursize, val, magic;
14263
14264 tp->nvram_size = EEPROM_CHIP_SIZE;
14265
14266 if (tg3_nvram_read(tp, 0, &magic) != 0)
14267 return;
14268
14269 if ((magic != TG3_EEPROM_MAGIC) &&
14270 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14271 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14272 return;
14273
14274 /*
14275 * Size the chip by reading offsets at increasing powers of two.
14276 * When we encounter our validation signature, we know the addressing
14277 * has wrapped around, and thus have our chip size.
14278 */
14279 cursize = 0x10;
14280
14281 while (cursize < tp->nvram_size) {
14282 if (tg3_nvram_read(tp, cursize, &val) != 0)
14283 return;
14284
14285 if (val == magic)
14286 break;
14287
14288 cursize <<= 1;
14289 }
14290
14291 tp->nvram_size = cursize;
14292 }
14293
14294 static void tg3_get_nvram_size(struct tg3 *tp)
14295 {
14296 u32 val;
14297
14298 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14299 return;
14300
14301 /* Selfboot format */
14302 if (val != TG3_EEPROM_MAGIC) {
14303 tg3_get_eeprom_size(tp);
14304 return;
14305 }
14306
14307 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14308 if (val != 0) {
14309 /* This is confusing. We want to operate on the
14310 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14311 * call will read from NVRAM and byteswap the data
14312 * according to the byteswapping settings for all
14313 * other register accesses. This ensures the data we
14314 * want will always reside in the lower 16-bits.
14315 * However, the data in NVRAM is in LE format, which
14316 * means the data from the NVRAM read will always be
14317 * opposite the endianness of the CPU. The 16-bit
14318 * byteswap then brings the data to CPU endianness.
14319 */
14320 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14321 return;
14322 }
14323 }
14324 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14325 }
14326
14327 static void tg3_get_nvram_info(struct tg3 *tp)
14328 {
14329 u32 nvcfg1;
14330
14331 nvcfg1 = tr32(NVRAM_CFG1);
14332 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14333 tg3_flag_set(tp, FLASH);
14334 } else {
14335 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14336 tw32(NVRAM_CFG1, nvcfg1);
14337 }
14338
14339 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14340 tg3_flag(tp, 5780_CLASS)) {
14341 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14342 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14343 tp->nvram_jedecnum = JEDEC_ATMEL;
14344 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14345 tg3_flag_set(tp, NVRAM_BUFFERED);
14346 break;
14347 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14348 tp->nvram_jedecnum = JEDEC_ATMEL;
14349 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14350 break;
14351 case FLASH_VENDOR_ATMEL_EEPROM:
14352 tp->nvram_jedecnum = JEDEC_ATMEL;
14353 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14354 tg3_flag_set(tp, NVRAM_BUFFERED);
14355 break;
14356 case FLASH_VENDOR_ST:
14357 tp->nvram_jedecnum = JEDEC_ST;
14358 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14359 tg3_flag_set(tp, NVRAM_BUFFERED);
14360 break;
14361 case FLASH_VENDOR_SAIFUN:
14362 tp->nvram_jedecnum = JEDEC_SAIFUN;
14363 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14364 break;
14365 case FLASH_VENDOR_SST_SMALL:
14366 case FLASH_VENDOR_SST_LARGE:
14367 tp->nvram_jedecnum = JEDEC_SST;
14368 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14369 break;
14370 }
14371 } else {
14372 tp->nvram_jedecnum = JEDEC_ATMEL;
14373 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14374 tg3_flag_set(tp, NVRAM_BUFFERED);
14375 }
14376 }
14377
14378 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14379 {
14380 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14381 case FLASH_5752PAGE_SIZE_256:
14382 tp->nvram_pagesize = 256;
14383 break;
14384 case FLASH_5752PAGE_SIZE_512:
14385 tp->nvram_pagesize = 512;
14386 break;
14387 case FLASH_5752PAGE_SIZE_1K:
14388 tp->nvram_pagesize = 1024;
14389 break;
14390 case FLASH_5752PAGE_SIZE_2K:
14391 tp->nvram_pagesize = 2048;
14392 break;
14393 case FLASH_5752PAGE_SIZE_4K:
14394 tp->nvram_pagesize = 4096;
14395 break;
14396 case FLASH_5752PAGE_SIZE_264:
14397 tp->nvram_pagesize = 264;
14398 break;
14399 case FLASH_5752PAGE_SIZE_528:
14400 tp->nvram_pagesize = 528;
14401 break;
14402 }
14403 }
14404
14405 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14406 {
14407 u32 nvcfg1;
14408
14409 nvcfg1 = tr32(NVRAM_CFG1);
14410
14411 /* NVRAM protection for TPM */
14412 if (nvcfg1 & (1 << 27))
14413 tg3_flag_set(tp, PROTECTED_NVRAM);
14414
14415 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14416 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14417 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14418 tp->nvram_jedecnum = JEDEC_ATMEL;
14419 tg3_flag_set(tp, NVRAM_BUFFERED);
14420 break;
14421 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14422 tp->nvram_jedecnum = JEDEC_ATMEL;
14423 tg3_flag_set(tp, NVRAM_BUFFERED);
14424 tg3_flag_set(tp, FLASH);
14425 break;
14426 case FLASH_5752VENDOR_ST_M45PE10:
14427 case FLASH_5752VENDOR_ST_M45PE20:
14428 case FLASH_5752VENDOR_ST_M45PE40:
14429 tp->nvram_jedecnum = JEDEC_ST;
14430 tg3_flag_set(tp, NVRAM_BUFFERED);
14431 tg3_flag_set(tp, FLASH);
14432 break;
14433 }
14434
14435 if (tg3_flag(tp, FLASH)) {
14436 tg3_nvram_get_pagesize(tp, nvcfg1);
14437 } else {
14438 /* For eeprom, set pagesize to maximum eeprom size */
14439 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14440
14441 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14442 tw32(NVRAM_CFG1, nvcfg1);
14443 }
14444 }
14445
14446 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14447 {
14448 u32 nvcfg1, protect = 0;
14449
14450 nvcfg1 = tr32(NVRAM_CFG1);
14451
14452 /* NVRAM protection for TPM */
14453 if (nvcfg1 & (1 << 27)) {
14454 tg3_flag_set(tp, PROTECTED_NVRAM);
14455 protect = 1;
14456 }
14457
14458 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14459 switch (nvcfg1) {
14460 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14461 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14462 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14463 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14464 tp->nvram_jedecnum = JEDEC_ATMEL;
14465 tg3_flag_set(tp, NVRAM_BUFFERED);
14466 tg3_flag_set(tp, FLASH);
14467 tp->nvram_pagesize = 264;
14468 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14469 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14470 tp->nvram_size = (protect ? 0x3e200 :
14471 TG3_NVRAM_SIZE_512KB);
14472 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14473 tp->nvram_size = (protect ? 0x1f200 :
14474 TG3_NVRAM_SIZE_256KB);
14475 else
14476 tp->nvram_size = (protect ? 0x1f200 :
14477 TG3_NVRAM_SIZE_128KB);
14478 break;
14479 case FLASH_5752VENDOR_ST_M45PE10:
14480 case FLASH_5752VENDOR_ST_M45PE20:
14481 case FLASH_5752VENDOR_ST_M45PE40:
14482 tp->nvram_jedecnum = JEDEC_ST;
14483 tg3_flag_set(tp, NVRAM_BUFFERED);
14484 tg3_flag_set(tp, FLASH);
14485 tp->nvram_pagesize = 256;
14486 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14487 tp->nvram_size = (protect ?
14488 TG3_NVRAM_SIZE_64KB :
14489 TG3_NVRAM_SIZE_128KB);
14490 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14491 tp->nvram_size = (protect ?
14492 TG3_NVRAM_SIZE_64KB :
14493 TG3_NVRAM_SIZE_256KB);
14494 else
14495 tp->nvram_size = (protect ?
14496 TG3_NVRAM_SIZE_128KB :
14497 TG3_NVRAM_SIZE_512KB);
14498 break;
14499 }
14500 }
14501
14502 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14503 {
14504 u32 nvcfg1;
14505
14506 nvcfg1 = tr32(NVRAM_CFG1);
14507
14508 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14509 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14510 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14511 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14512 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14513 tp->nvram_jedecnum = JEDEC_ATMEL;
14514 tg3_flag_set(tp, NVRAM_BUFFERED);
14515 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14516
14517 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14518 tw32(NVRAM_CFG1, nvcfg1);
14519 break;
14520 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14521 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14522 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14523 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14524 tp->nvram_jedecnum = JEDEC_ATMEL;
14525 tg3_flag_set(tp, NVRAM_BUFFERED);
14526 tg3_flag_set(tp, FLASH);
14527 tp->nvram_pagesize = 264;
14528 break;
14529 case FLASH_5752VENDOR_ST_M45PE10:
14530 case FLASH_5752VENDOR_ST_M45PE20:
14531 case FLASH_5752VENDOR_ST_M45PE40:
14532 tp->nvram_jedecnum = JEDEC_ST;
14533 tg3_flag_set(tp, NVRAM_BUFFERED);
14534 tg3_flag_set(tp, FLASH);
14535 tp->nvram_pagesize = 256;
14536 break;
14537 }
14538 }
14539
14540 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14541 {
14542 u32 nvcfg1, protect = 0;
14543
14544 nvcfg1 = tr32(NVRAM_CFG1);
14545
14546 /* NVRAM protection for TPM */
14547 if (nvcfg1 & (1 << 27)) {
14548 tg3_flag_set(tp, PROTECTED_NVRAM);
14549 protect = 1;
14550 }
14551
14552 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14553 switch (nvcfg1) {
14554 case FLASH_5761VENDOR_ATMEL_ADB021D:
14555 case FLASH_5761VENDOR_ATMEL_ADB041D:
14556 case FLASH_5761VENDOR_ATMEL_ADB081D:
14557 case FLASH_5761VENDOR_ATMEL_ADB161D:
14558 case FLASH_5761VENDOR_ATMEL_MDB021D:
14559 case FLASH_5761VENDOR_ATMEL_MDB041D:
14560 case FLASH_5761VENDOR_ATMEL_MDB081D:
14561 case FLASH_5761VENDOR_ATMEL_MDB161D:
14562 tp->nvram_jedecnum = JEDEC_ATMEL;
14563 tg3_flag_set(tp, NVRAM_BUFFERED);
14564 tg3_flag_set(tp, FLASH);
14565 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14566 tp->nvram_pagesize = 256;
14567 break;
14568 case FLASH_5761VENDOR_ST_A_M45PE20:
14569 case FLASH_5761VENDOR_ST_A_M45PE40:
14570 case FLASH_5761VENDOR_ST_A_M45PE80:
14571 case FLASH_5761VENDOR_ST_A_M45PE16:
14572 case FLASH_5761VENDOR_ST_M_M45PE20:
14573 case FLASH_5761VENDOR_ST_M_M45PE40:
14574 case FLASH_5761VENDOR_ST_M_M45PE80:
14575 case FLASH_5761VENDOR_ST_M_M45PE16:
14576 tp->nvram_jedecnum = JEDEC_ST;
14577 tg3_flag_set(tp, NVRAM_BUFFERED);
14578 tg3_flag_set(tp, FLASH);
14579 tp->nvram_pagesize = 256;
14580 break;
14581 }
14582
14583 if (protect) {
14584 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14585 } else {
14586 switch (nvcfg1) {
14587 case FLASH_5761VENDOR_ATMEL_ADB161D:
14588 case FLASH_5761VENDOR_ATMEL_MDB161D:
14589 case FLASH_5761VENDOR_ST_A_M45PE16:
14590 case FLASH_5761VENDOR_ST_M_M45PE16:
14591 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14592 break;
14593 case FLASH_5761VENDOR_ATMEL_ADB081D:
14594 case FLASH_5761VENDOR_ATMEL_MDB081D:
14595 case FLASH_5761VENDOR_ST_A_M45PE80:
14596 case FLASH_5761VENDOR_ST_M_M45PE80:
14597 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14598 break;
14599 case FLASH_5761VENDOR_ATMEL_ADB041D:
14600 case FLASH_5761VENDOR_ATMEL_MDB041D:
14601 case FLASH_5761VENDOR_ST_A_M45PE40:
14602 case FLASH_5761VENDOR_ST_M_M45PE40:
14603 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14604 break;
14605 case FLASH_5761VENDOR_ATMEL_ADB021D:
14606 case FLASH_5761VENDOR_ATMEL_MDB021D:
14607 case FLASH_5761VENDOR_ST_A_M45PE20:
14608 case FLASH_5761VENDOR_ST_M_M45PE20:
14609 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14610 break;
14611 }
14612 }
14613 }
14614
14615 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14616 {
14617 tp->nvram_jedecnum = JEDEC_ATMEL;
14618 tg3_flag_set(tp, NVRAM_BUFFERED);
14619 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14620 }
14621
14622 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14623 {
14624 u32 nvcfg1;
14625
14626 nvcfg1 = tr32(NVRAM_CFG1);
14627
14628 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14629 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14630 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14631 tp->nvram_jedecnum = JEDEC_ATMEL;
14632 tg3_flag_set(tp, NVRAM_BUFFERED);
14633 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14634
14635 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14636 tw32(NVRAM_CFG1, nvcfg1);
14637 return;
14638 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14639 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14640 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14641 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14642 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14643 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14644 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14645 tp->nvram_jedecnum = JEDEC_ATMEL;
14646 tg3_flag_set(tp, NVRAM_BUFFERED);
14647 tg3_flag_set(tp, FLASH);
14648
14649 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14650 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14652 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14653 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14654 break;
14655 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14656 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14657 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14658 break;
14659 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14660 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14661 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14662 break;
14663 }
14664 break;
14665 case FLASH_5752VENDOR_ST_M45PE10:
14666 case FLASH_5752VENDOR_ST_M45PE20:
14667 case FLASH_5752VENDOR_ST_M45PE40:
14668 tp->nvram_jedecnum = JEDEC_ST;
14669 tg3_flag_set(tp, NVRAM_BUFFERED);
14670 tg3_flag_set(tp, FLASH);
14671
14672 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14673 case FLASH_5752VENDOR_ST_M45PE10:
14674 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14675 break;
14676 case FLASH_5752VENDOR_ST_M45PE20:
14677 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14678 break;
14679 case FLASH_5752VENDOR_ST_M45PE40:
14680 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14681 break;
14682 }
14683 break;
14684 default:
14685 tg3_flag_set(tp, NO_NVRAM);
14686 return;
14687 }
14688
14689 tg3_nvram_get_pagesize(tp, nvcfg1);
14690 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14691 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14692 }
14693
14694
14695 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14696 {
14697 u32 nvcfg1;
14698
14699 nvcfg1 = tr32(NVRAM_CFG1);
14700
14701 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14702 case FLASH_5717VENDOR_ATMEL_EEPROM:
14703 case FLASH_5717VENDOR_MICRO_EEPROM:
14704 tp->nvram_jedecnum = JEDEC_ATMEL;
14705 tg3_flag_set(tp, NVRAM_BUFFERED);
14706 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14707
14708 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14709 tw32(NVRAM_CFG1, nvcfg1);
14710 return;
14711 case FLASH_5717VENDOR_ATMEL_MDB011D:
14712 case FLASH_5717VENDOR_ATMEL_ADB011B:
14713 case FLASH_5717VENDOR_ATMEL_ADB011D:
14714 case FLASH_5717VENDOR_ATMEL_MDB021D:
14715 case FLASH_5717VENDOR_ATMEL_ADB021B:
14716 case FLASH_5717VENDOR_ATMEL_ADB021D:
14717 case FLASH_5717VENDOR_ATMEL_45USPT:
14718 tp->nvram_jedecnum = JEDEC_ATMEL;
14719 tg3_flag_set(tp, NVRAM_BUFFERED);
14720 tg3_flag_set(tp, FLASH);
14721
14722 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14723 case FLASH_5717VENDOR_ATMEL_MDB021D:
14724 /* Detect size with tg3_nvram_get_size() */
14725 break;
14726 case FLASH_5717VENDOR_ATMEL_ADB021B:
14727 case FLASH_5717VENDOR_ATMEL_ADB021D:
14728 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14729 break;
14730 default:
14731 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14732 break;
14733 }
14734 break;
14735 case FLASH_5717VENDOR_ST_M_M25PE10:
14736 case FLASH_5717VENDOR_ST_A_M25PE10:
14737 case FLASH_5717VENDOR_ST_M_M45PE10:
14738 case FLASH_5717VENDOR_ST_A_M45PE10:
14739 case FLASH_5717VENDOR_ST_M_M25PE20:
14740 case FLASH_5717VENDOR_ST_A_M25PE20:
14741 case FLASH_5717VENDOR_ST_M_M45PE20:
14742 case FLASH_5717VENDOR_ST_A_M45PE20:
14743 case FLASH_5717VENDOR_ST_25USPT:
14744 case FLASH_5717VENDOR_ST_45USPT:
14745 tp->nvram_jedecnum = JEDEC_ST;
14746 tg3_flag_set(tp, NVRAM_BUFFERED);
14747 tg3_flag_set(tp, FLASH);
14748
14749 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14750 case FLASH_5717VENDOR_ST_M_M25PE20:
14751 case FLASH_5717VENDOR_ST_M_M45PE20:
14752 /* Detect size with tg3_nvram_get_size() */
14753 break;
14754 case FLASH_5717VENDOR_ST_A_M25PE20:
14755 case FLASH_5717VENDOR_ST_A_M45PE20:
14756 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14757 break;
14758 default:
14759 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14760 break;
14761 }
14762 break;
14763 default:
14764 tg3_flag_set(tp, NO_NVRAM);
14765 return;
14766 }
14767
14768 tg3_nvram_get_pagesize(tp, nvcfg1);
14769 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14770 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14771 }
14772
14773 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14774 {
14775 u32 nvcfg1, nvmpinstrp;
14776
14777 nvcfg1 = tr32(NVRAM_CFG1);
14778 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14779
14780 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14781 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14782 tg3_flag_set(tp, NO_NVRAM);
14783 return;
14784 }
14785
14786 switch (nvmpinstrp) {
14787 case FLASH_5762_EEPROM_HD:
14788 nvmpinstrp = FLASH_5720_EEPROM_HD;
14789 break;
14790 case FLASH_5762_EEPROM_LD:
14791 nvmpinstrp = FLASH_5720_EEPROM_LD;
14792 break;
14793 case FLASH_5720VENDOR_M_ST_M45PE20:
14794 /* This pinstrap supports multiple sizes, so force it
14795 * to read the actual size from location 0xf0.
14796 */
14797 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14798 break;
14799 }
14800 }
14801
14802 switch (nvmpinstrp) {
14803 case FLASH_5720_EEPROM_HD:
14804 case FLASH_5720_EEPROM_LD:
14805 tp->nvram_jedecnum = JEDEC_ATMEL;
14806 tg3_flag_set(tp, NVRAM_BUFFERED);
14807
14808 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14809 tw32(NVRAM_CFG1, nvcfg1);
14810 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14811 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14812 else
14813 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14814 return;
14815 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14816 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14817 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14818 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14819 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14820 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14821 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14822 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14823 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14824 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14825 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14826 case FLASH_5720VENDOR_ATMEL_45USPT:
14827 tp->nvram_jedecnum = JEDEC_ATMEL;
14828 tg3_flag_set(tp, NVRAM_BUFFERED);
14829 tg3_flag_set(tp, FLASH);
14830
14831 switch (nvmpinstrp) {
14832 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14833 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14834 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14835 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14836 break;
14837 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14838 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14839 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14840 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14841 break;
14842 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14843 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14844 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14845 break;
14846 default:
14847 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14848 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14849 break;
14850 }
14851 break;
14852 case FLASH_5720VENDOR_M_ST_M25PE10:
14853 case FLASH_5720VENDOR_M_ST_M45PE10:
14854 case FLASH_5720VENDOR_A_ST_M25PE10:
14855 case FLASH_5720VENDOR_A_ST_M45PE10:
14856 case FLASH_5720VENDOR_M_ST_M25PE20:
14857 case FLASH_5720VENDOR_M_ST_M45PE20:
14858 case FLASH_5720VENDOR_A_ST_M25PE20:
14859 case FLASH_5720VENDOR_A_ST_M45PE20:
14860 case FLASH_5720VENDOR_M_ST_M25PE40:
14861 case FLASH_5720VENDOR_M_ST_M45PE40:
14862 case FLASH_5720VENDOR_A_ST_M25PE40:
14863 case FLASH_5720VENDOR_A_ST_M45PE40:
14864 case FLASH_5720VENDOR_M_ST_M25PE80:
14865 case FLASH_5720VENDOR_M_ST_M45PE80:
14866 case FLASH_5720VENDOR_A_ST_M25PE80:
14867 case FLASH_5720VENDOR_A_ST_M45PE80:
14868 case FLASH_5720VENDOR_ST_25USPT:
14869 case FLASH_5720VENDOR_ST_45USPT:
14870 tp->nvram_jedecnum = JEDEC_ST;
14871 tg3_flag_set(tp, NVRAM_BUFFERED);
14872 tg3_flag_set(tp, FLASH);
14873
14874 switch (nvmpinstrp) {
14875 case FLASH_5720VENDOR_M_ST_M25PE20:
14876 case FLASH_5720VENDOR_M_ST_M45PE20:
14877 case FLASH_5720VENDOR_A_ST_M25PE20:
14878 case FLASH_5720VENDOR_A_ST_M45PE20:
14879 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14880 break;
14881 case FLASH_5720VENDOR_M_ST_M25PE40:
14882 case FLASH_5720VENDOR_M_ST_M45PE40:
14883 case FLASH_5720VENDOR_A_ST_M25PE40:
14884 case FLASH_5720VENDOR_A_ST_M45PE40:
14885 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14886 break;
14887 case FLASH_5720VENDOR_M_ST_M25PE80:
14888 case FLASH_5720VENDOR_M_ST_M45PE80:
14889 case FLASH_5720VENDOR_A_ST_M25PE80:
14890 case FLASH_5720VENDOR_A_ST_M45PE80:
14891 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14892 break;
14893 default:
14894 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14895 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14896 break;
14897 }
14898 break;
14899 default:
14900 tg3_flag_set(tp, NO_NVRAM);
14901 return;
14902 }
14903
14904 tg3_nvram_get_pagesize(tp, nvcfg1);
14905 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14906 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14907
14908 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14909 u32 val;
14910
14911 if (tg3_nvram_read(tp, 0, &val))
14912 return;
14913
14914 if (val != TG3_EEPROM_MAGIC &&
14915 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14916 tg3_flag_set(tp, NO_NVRAM);
14917 }
14918 }
14919
14920 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14921 static void tg3_nvram_init(struct tg3 *tp)
14922 {
14923 if (tg3_flag(tp, IS_SSB_CORE)) {
14924 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14925 tg3_flag_clear(tp, NVRAM);
14926 tg3_flag_clear(tp, NVRAM_BUFFERED);
14927 tg3_flag_set(tp, NO_NVRAM);
14928 return;
14929 }
14930
14931 tw32_f(GRC_EEPROM_ADDR,
14932 (EEPROM_ADDR_FSM_RESET |
14933 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14934 EEPROM_ADDR_CLKPERD_SHIFT)));
14935
14936 msleep(1);
14937
14938 /* Enable seeprom accesses. */
14939 tw32_f(GRC_LOCAL_CTRL,
14940 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14941 udelay(100);
14942
14943 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14944 tg3_asic_rev(tp) != ASIC_REV_5701) {
14945 tg3_flag_set(tp, NVRAM);
14946
14947 if (tg3_nvram_lock(tp)) {
14948 netdev_warn(tp->dev,
14949 "Cannot get nvram lock, %s failed\n",
14950 __func__);
14951 return;
14952 }
14953 tg3_enable_nvram_access(tp);
14954
14955 tp->nvram_size = 0;
14956
14957 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14958 tg3_get_5752_nvram_info(tp);
14959 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14960 tg3_get_5755_nvram_info(tp);
14961 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14962 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14963 tg3_asic_rev(tp) == ASIC_REV_5785)
14964 tg3_get_5787_nvram_info(tp);
14965 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14966 tg3_get_5761_nvram_info(tp);
14967 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14968 tg3_get_5906_nvram_info(tp);
14969 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14970 tg3_flag(tp, 57765_CLASS))
14971 tg3_get_57780_nvram_info(tp);
14972 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14973 tg3_asic_rev(tp) == ASIC_REV_5719)
14974 tg3_get_5717_nvram_info(tp);
14975 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14976 tg3_asic_rev(tp) == ASIC_REV_5762)
14977 tg3_get_5720_nvram_info(tp);
14978 else
14979 tg3_get_nvram_info(tp);
14980
14981 if (tp->nvram_size == 0)
14982 tg3_get_nvram_size(tp);
14983
14984 tg3_disable_nvram_access(tp);
14985 tg3_nvram_unlock(tp);
14986
14987 } else {
14988 tg3_flag_clear(tp, NVRAM);
14989 tg3_flag_clear(tp, NVRAM_BUFFERED);
14990
14991 tg3_get_eeprom_size(tp);
14992 }
14993 }
14994
14995 struct subsys_tbl_ent {
14996 u16 subsys_vendor, subsys_devid;
14997 u32 phy_id;
14998 };
14999
15000 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15001 /* Broadcom boards. */
15002 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15003 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15004 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15005 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15006 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15007 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15008 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15009 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15010 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15011 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15012 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15013 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15014 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15015 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15016 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15017 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15018 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15019 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15020 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15021 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15022 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15023 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15024
15025 /* 3com boards. */
15026 { TG3PCI_SUBVENDOR_ID_3COM,
15027 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15028 { TG3PCI_SUBVENDOR_ID_3COM,
15029 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15030 { TG3PCI_SUBVENDOR_ID_3COM,
15031 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15032 { TG3PCI_SUBVENDOR_ID_3COM,
15033 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15034 { TG3PCI_SUBVENDOR_ID_3COM,
15035 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15036
15037 /* DELL boards. */
15038 { TG3PCI_SUBVENDOR_ID_DELL,
15039 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15040 { TG3PCI_SUBVENDOR_ID_DELL,
15041 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15042 { TG3PCI_SUBVENDOR_ID_DELL,
15043 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15044 { TG3PCI_SUBVENDOR_ID_DELL,
15045 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15046
15047 /* Compaq boards. */
15048 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15049 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15050 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15051 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15052 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15053 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15054 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15055 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15056 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15057 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15058
15059 /* IBM boards. */
15060 { TG3PCI_SUBVENDOR_ID_IBM,
15061 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15062 };
15063
15064 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15065 {
15066 int i;
15067
15068 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15069 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15070 tp->pdev->subsystem_vendor) &&
15071 (subsys_id_to_phy_id[i].subsys_devid ==
15072 tp->pdev->subsystem_device))
15073 return &subsys_id_to_phy_id[i];
15074 }
15075 return NULL;
15076 }
15077
15078 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15079 {
15080 u32 val;
15081
15082 tp->phy_id = TG3_PHY_ID_INVALID;
15083 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15084
15085 /* Assume an onboard device and WOL capable by default. */
15086 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15087 tg3_flag_set(tp, WOL_CAP);
15088
15089 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15090 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15091 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15092 tg3_flag_set(tp, IS_NIC);
15093 }
15094 val = tr32(VCPU_CFGSHDW);
15095 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15096 tg3_flag_set(tp, ASPM_WORKAROUND);
15097 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15098 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15099 tg3_flag_set(tp, WOL_ENABLE);
15100 device_set_wakeup_enable(&tp->pdev->dev, true);
15101 }
15102 goto done;
15103 }
15104
15105 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15106 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15107 u32 nic_cfg, led_cfg;
15108 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15109 u32 nic_phy_id, ver, eeprom_phy_id;
15110 int eeprom_phy_serdes = 0;
15111
15112 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15113 tp->nic_sram_data_cfg = nic_cfg;
15114
15115 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15116 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15117 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15118 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15119 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15120 (ver > 0) && (ver < 0x100))
15121 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15122
15123 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15124 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15125
15126 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15127 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15128 tg3_asic_rev(tp) == ASIC_REV_5720)
15129 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15130
15131 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15132 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15133 eeprom_phy_serdes = 1;
15134
15135 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15136 if (nic_phy_id != 0) {
15137 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15138 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15139
15140 eeprom_phy_id = (id1 >> 16) << 10;
15141 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15142 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15143 } else
15144 eeprom_phy_id = 0;
15145
15146 tp->phy_id = eeprom_phy_id;
15147 if (eeprom_phy_serdes) {
15148 if (!tg3_flag(tp, 5705_PLUS))
15149 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15150 else
15151 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15152 }
15153
15154 if (tg3_flag(tp, 5750_PLUS))
15155 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15156 SHASTA_EXT_LED_MODE_MASK);
15157 else
15158 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15159
15160 switch (led_cfg) {
15161 default:
15162 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15163 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15164 break;
15165
15166 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15167 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15168 break;
15169
15170 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15171 tp->led_ctrl = LED_CTRL_MODE_MAC;
15172
15173 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15174 * read on some older 5700/5701 bootcode.
15175 */
15176 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15177 tg3_asic_rev(tp) == ASIC_REV_5701)
15178 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15179
15180 break;
15181
15182 case SHASTA_EXT_LED_SHARED:
15183 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15184 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15185 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15186 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15187 LED_CTRL_MODE_PHY_2);
15188
15189 if (tg3_flag(tp, 5717_PLUS) ||
15190 tg3_asic_rev(tp) == ASIC_REV_5762)
15191 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15192 LED_CTRL_BLINK_RATE_MASK;
15193
15194 break;
15195
15196 case SHASTA_EXT_LED_MAC:
15197 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15198 break;
15199
15200 case SHASTA_EXT_LED_COMBO:
15201 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15202 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15203 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15204 LED_CTRL_MODE_PHY_2);
15205 break;
15206
15207 }
15208
15209 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15210 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15211 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15212 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15213
15214 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15215 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15216
15217 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15218 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15219 if ((tp->pdev->subsystem_vendor ==
15220 PCI_VENDOR_ID_ARIMA) &&
15221 (tp->pdev->subsystem_device == 0x205a ||
15222 tp->pdev->subsystem_device == 0x2063))
15223 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15224 } else {
15225 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15226 tg3_flag_set(tp, IS_NIC);
15227 }
15228
15229 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15230 tg3_flag_set(tp, ENABLE_ASF);
15231 if (tg3_flag(tp, 5750_PLUS))
15232 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15233 }
15234
15235 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15236 tg3_flag(tp, 5750_PLUS))
15237 tg3_flag_set(tp, ENABLE_APE);
15238
15239 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15240 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15241 tg3_flag_clear(tp, WOL_CAP);
15242
15243 if (tg3_flag(tp, WOL_CAP) &&
15244 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15245 tg3_flag_set(tp, WOL_ENABLE);
15246 device_set_wakeup_enable(&tp->pdev->dev, true);
15247 }
15248
15249 if (cfg2 & (1 << 17))
15250 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15251
15252 /* serdes signal pre-emphasis in register 0x590 set by */
15253 /* bootcode if bit 18 is set */
15254 if (cfg2 & (1 << 18))
15255 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15256
15257 if ((tg3_flag(tp, 57765_PLUS) ||
15258 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15259 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15260 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15261 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15262
15263 if (tg3_flag(tp, PCI_EXPRESS)) {
15264 u32 cfg3;
15265
15266 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15267 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15268 !tg3_flag(tp, 57765_PLUS) &&
15269 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15270 tg3_flag_set(tp, ASPM_WORKAROUND);
15271 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15272 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15273 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15274 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15275 }
15276
15277 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15278 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15279 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15280 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15281 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15282 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15283
15284 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15285 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15286 }
15287 done:
15288 if (tg3_flag(tp, WOL_CAP))
15289 device_set_wakeup_enable(&tp->pdev->dev,
15290 tg3_flag(tp, WOL_ENABLE));
15291 else
15292 device_set_wakeup_capable(&tp->pdev->dev, false);
15293 }
15294
15295 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15296 {
15297 int i, err;
15298 u32 val2, off = offset * 8;
15299
15300 err = tg3_nvram_lock(tp);
15301 if (err)
15302 return err;
15303
15304 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15305 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15306 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15307 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15308 udelay(10);
15309
15310 for (i = 0; i < 100; i++) {
15311 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15312 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15313 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15314 break;
15315 }
15316 udelay(10);
15317 }
15318
15319 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15320
15321 tg3_nvram_unlock(tp);
15322 if (val2 & APE_OTP_STATUS_CMD_DONE)
15323 return 0;
15324
15325 return -EBUSY;
15326 }
15327
15328 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15329 {
15330 int i;
15331 u32 val;
15332
15333 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15334 tw32(OTP_CTRL, cmd);
15335
15336 /* Wait for up to 1 ms for command to execute. */
15337 for (i = 0; i < 100; i++) {
15338 val = tr32(OTP_STATUS);
15339 if (val & OTP_STATUS_CMD_DONE)
15340 break;
15341 udelay(10);
15342 }
15343
15344 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15345 }
15346
15347 /* Read the gphy configuration from the OTP region of the chip. The gphy
15348 * configuration is a 32-bit value that straddles the alignment boundary.
15349 * We do two 32-bit reads and then shift and merge the results.
15350 */
15351 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15352 {
15353 u32 bhalf_otp, thalf_otp;
15354
15355 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15356
15357 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15358 return 0;
15359
15360 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15361
15362 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15363 return 0;
15364
15365 thalf_otp = tr32(OTP_READ_DATA);
15366
15367 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15368
15369 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15370 return 0;
15371
15372 bhalf_otp = tr32(OTP_READ_DATA);
15373
15374 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15375 }
15376
15377 static void tg3_phy_init_link_config(struct tg3 *tp)
15378 {
15379 u32 adv = ADVERTISED_Autoneg;
15380
15381 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15382 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15383 adv |= ADVERTISED_1000baseT_Half;
15384 adv |= ADVERTISED_1000baseT_Full;
15385 }
15386
15387 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15388 adv |= ADVERTISED_100baseT_Half |
15389 ADVERTISED_100baseT_Full |
15390 ADVERTISED_10baseT_Half |
15391 ADVERTISED_10baseT_Full |
15392 ADVERTISED_TP;
15393 else
15394 adv |= ADVERTISED_FIBRE;
15395
15396 tp->link_config.advertising = adv;
15397 tp->link_config.speed = SPEED_UNKNOWN;
15398 tp->link_config.duplex = DUPLEX_UNKNOWN;
15399 tp->link_config.autoneg = AUTONEG_ENABLE;
15400 tp->link_config.active_speed = SPEED_UNKNOWN;
15401 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15402
15403 tp->old_link = -1;
15404 }
15405
15406 static int tg3_phy_probe(struct tg3 *tp)
15407 {
15408 u32 hw_phy_id_1, hw_phy_id_2;
15409 u32 hw_phy_id, hw_phy_id_masked;
15410 int err;
15411
15412 /* flow control autonegotiation is default behavior */
15413 tg3_flag_set(tp, PAUSE_AUTONEG);
15414 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15415
15416 if (tg3_flag(tp, ENABLE_APE)) {
15417 switch (tp->pci_fn) {
15418 case 0:
15419 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15420 break;
15421 case 1:
15422 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15423 break;
15424 case 2:
15425 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15426 break;
15427 case 3:
15428 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15429 break;
15430 }
15431 }
15432
15433 if (!tg3_flag(tp, ENABLE_ASF) &&
15434 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15435 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15436 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15437 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15438
15439 if (tg3_flag(tp, USE_PHYLIB))
15440 return tg3_phy_init(tp);
15441
15442 /* Reading the PHY ID register can conflict with ASF
15443 * firmware access to the PHY hardware.
15444 */
15445 err = 0;
15446 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15447 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15448 } else {
15449 /* Now read the physical PHY_ID from the chip and verify
15450 * that it is sane. If it doesn't look good, we fall back
15451 * to either the hard-coded table based PHY_ID and failing
15452 * that the value found in the eeprom area.
15453 */
15454 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15455 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15456
15457 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15458 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15459 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15460
15461 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15462 }
15463
15464 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15465 tp->phy_id = hw_phy_id;
15466 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15467 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15468 else
15469 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15470 } else {
15471 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15472 /* Do nothing, phy ID already set up in
15473 * tg3_get_eeprom_hw_cfg().
15474 */
15475 } else {
15476 struct subsys_tbl_ent *p;
15477
15478 /* No eeprom signature? Try the hardcoded
15479 * subsys device table.
15480 */
15481 p = tg3_lookup_by_subsys(tp);
15482 if (p) {
15483 tp->phy_id = p->phy_id;
15484 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15485 /* For now we saw the IDs 0xbc050cd0,
15486 * 0xbc050f80 and 0xbc050c30 on devices
15487 * connected to an BCM4785 and there are
15488 * probably more. Just assume that the phy is
15489 * supported when it is connected to a SSB core
15490 * for now.
15491 */
15492 return -ENODEV;
15493 }
15494
15495 if (!tp->phy_id ||
15496 tp->phy_id == TG3_PHY_ID_BCM8002)
15497 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15498 }
15499 }
15500
15501 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15502 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15503 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15504 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15505 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15506 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15507 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15508 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15509 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15510 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15511
15512 tp->eee.supported = SUPPORTED_100baseT_Full |
15513 SUPPORTED_1000baseT_Full;
15514 tp->eee.advertised = ADVERTISED_100baseT_Full |
15515 ADVERTISED_1000baseT_Full;
15516 tp->eee.eee_enabled = 1;
15517 tp->eee.tx_lpi_enabled = 1;
15518 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15519 }
15520
15521 tg3_phy_init_link_config(tp);
15522
15523 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15524 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15525 !tg3_flag(tp, ENABLE_APE) &&
15526 !tg3_flag(tp, ENABLE_ASF)) {
15527 u32 bmsr, dummy;
15528
15529 tg3_readphy(tp, MII_BMSR, &bmsr);
15530 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15531 (bmsr & BMSR_LSTATUS))
15532 goto skip_phy_reset;
15533
15534 err = tg3_phy_reset(tp);
15535 if (err)
15536 return err;
15537
15538 tg3_phy_set_wirespeed(tp);
15539
15540 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15541 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15542 tp->link_config.flowctrl);
15543
15544 tg3_writephy(tp, MII_BMCR,
15545 BMCR_ANENABLE | BMCR_ANRESTART);
15546 }
15547 }
15548
15549 skip_phy_reset:
15550 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15551 err = tg3_init_5401phy_dsp(tp);
15552 if (err)
15553 return err;
15554
15555 err = tg3_init_5401phy_dsp(tp);
15556 }
15557
15558 return err;
15559 }
15560
15561 static void tg3_read_vpd(struct tg3 *tp)
15562 {
15563 u8 *vpd_data;
15564 unsigned int block_end, rosize, len;
15565 u32 vpdlen;
15566 int j, i = 0;
15567
15568 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15569 if (!vpd_data)
15570 goto out_no_vpd;
15571
15572 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15573 if (i < 0)
15574 goto out_not_found;
15575
15576 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15577 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15578 i += PCI_VPD_LRDT_TAG_SIZE;
15579
15580 if (block_end > vpdlen)
15581 goto out_not_found;
15582
15583 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15584 PCI_VPD_RO_KEYWORD_MFR_ID);
15585 if (j > 0) {
15586 len = pci_vpd_info_field_size(&vpd_data[j]);
15587
15588 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15589 if (j + len > block_end || len != 4 ||
15590 memcmp(&vpd_data[j], "1028", 4))
15591 goto partno;
15592
15593 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15594 PCI_VPD_RO_KEYWORD_VENDOR0);
15595 if (j < 0)
15596 goto partno;
15597
15598 len = pci_vpd_info_field_size(&vpd_data[j]);
15599
15600 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15601 if (j + len > block_end)
15602 goto partno;
15603
15604 if (len >= sizeof(tp->fw_ver))
15605 len = sizeof(tp->fw_ver) - 1;
15606 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15607 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15608 &vpd_data[j]);
15609 }
15610
15611 partno:
15612 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15613 PCI_VPD_RO_KEYWORD_PARTNO);
15614 if (i < 0)
15615 goto out_not_found;
15616
15617 len = pci_vpd_info_field_size(&vpd_data[i]);
15618
15619 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15620 if (len > TG3_BPN_SIZE ||
15621 (len + i) > vpdlen)
15622 goto out_not_found;
15623
15624 memcpy(tp->board_part_number, &vpd_data[i], len);
15625
15626 out_not_found:
15627 kfree(vpd_data);
15628 if (tp->board_part_number[0])
15629 return;
15630
15631 out_no_vpd:
15632 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15633 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15634 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15635 strcpy(tp->board_part_number, "BCM5717");
15636 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15637 strcpy(tp->board_part_number, "BCM5718");
15638 else
15639 goto nomatch;
15640 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15641 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15642 strcpy(tp->board_part_number, "BCM57780");
15643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15644 strcpy(tp->board_part_number, "BCM57760");
15645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15646 strcpy(tp->board_part_number, "BCM57790");
15647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15648 strcpy(tp->board_part_number, "BCM57788");
15649 else
15650 goto nomatch;
15651 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15652 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15653 strcpy(tp->board_part_number, "BCM57761");
15654 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15655 strcpy(tp->board_part_number, "BCM57765");
15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15657 strcpy(tp->board_part_number, "BCM57781");
15658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15659 strcpy(tp->board_part_number, "BCM57785");
15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15661 strcpy(tp->board_part_number, "BCM57791");
15662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15663 strcpy(tp->board_part_number, "BCM57795");
15664 else
15665 goto nomatch;
15666 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15667 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15668 strcpy(tp->board_part_number, "BCM57762");
15669 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15670 strcpy(tp->board_part_number, "BCM57766");
15671 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15672 strcpy(tp->board_part_number, "BCM57782");
15673 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15674 strcpy(tp->board_part_number, "BCM57786");
15675 else
15676 goto nomatch;
15677 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15678 strcpy(tp->board_part_number, "BCM95906");
15679 } else {
15680 nomatch:
15681 strcpy(tp->board_part_number, "none");
15682 }
15683 }
15684
15685 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15686 {
15687 u32 val;
15688
15689 if (tg3_nvram_read(tp, offset, &val) ||
15690 (val & 0xfc000000) != 0x0c000000 ||
15691 tg3_nvram_read(tp, offset + 4, &val) ||
15692 val != 0)
15693 return 0;
15694
15695 return 1;
15696 }
15697
15698 static void tg3_read_bc_ver(struct tg3 *tp)
15699 {
15700 u32 val, offset, start, ver_offset;
15701 int i, dst_off;
15702 bool newver = false;
15703
15704 if (tg3_nvram_read(tp, 0xc, &offset) ||
15705 tg3_nvram_read(tp, 0x4, &start))
15706 return;
15707
15708 offset = tg3_nvram_logical_addr(tp, offset);
15709
15710 if (tg3_nvram_read(tp, offset, &val))
15711 return;
15712
15713 if ((val & 0xfc000000) == 0x0c000000) {
15714 if (tg3_nvram_read(tp, offset + 4, &val))
15715 return;
15716
15717 if (val == 0)
15718 newver = true;
15719 }
15720
15721 dst_off = strlen(tp->fw_ver);
15722
15723 if (newver) {
15724 if (TG3_VER_SIZE - dst_off < 16 ||
15725 tg3_nvram_read(tp, offset + 8, &ver_offset))
15726 return;
15727
15728 offset = offset + ver_offset - start;
15729 for (i = 0; i < 16; i += 4) {
15730 __be32 v;
15731 if (tg3_nvram_read_be32(tp, offset + i, &v))
15732 return;
15733
15734 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15735 }
15736 } else {
15737 u32 major, minor;
15738
15739 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15740 return;
15741
15742 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15743 TG3_NVM_BCVER_MAJSFT;
15744 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15745 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15746 "v%d.%02d", major, minor);
15747 }
15748 }
15749
15750 static void tg3_read_hwsb_ver(struct tg3 *tp)
15751 {
15752 u32 val, major, minor;
15753
15754 /* Use native endian representation */
15755 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15756 return;
15757
15758 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15759 TG3_NVM_HWSB_CFG1_MAJSFT;
15760 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15761 TG3_NVM_HWSB_CFG1_MINSFT;
15762
15763 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15764 }
15765
15766 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15767 {
15768 u32 offset, major, minor, build;
15769
15770 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15771
15772 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15773 return;
15774
15775 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15776 case TG3_EEPROM_SB_REVISION_0:
15777 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15778 break;
15779 case TG3_EEPROM_SB_REVISION_2:
15780 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15781 break;
15782 case TG3_EEPROM_SB_REVISION_3:
15783 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15784 break;
15785 case TG3_EEPROM_SB_REVISION_4:
15786 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15787 break;
15788 case TG3_EEPROM_SB_REVISION_5:
15789 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15790 break;
15791 case TG3_EEPROM_SB_REVISION_6:
15792 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15793 break;
15794 default:
15795 return;
15796 }
15797
15798 if (tg3_nvram_read(tp, offset, &val))
15799 return;
15800
15801 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15802 TG3_EEPROM_SB_EDH_BLD_SHFT;
15803 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15804 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15805 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15806
15807 if (minor > 99 || build > 26)
15808 return;
15809
15810 offset = strlen(tp->fw_ver);
15811 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15812 " v%d.%02d", major, minor);
15813
15814 if (build > 0) {
15815 offset = strlen(tp->fw_ver);
15816 if (offset < TG3_VER_SIZE - 1)
15817 tp->fw_ver[offset] = 'a' + build - 1;
15818 }
15819 }
15820
15821 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15822 {
15823 u32 val, offset, start;
15824 int i, vlen;
15825
15826 for (offset = TG3_NVM_DIR_START;
15827 offset < TG3_NVM_DIR_END;
15828 offset += TG3_NVM_DIRENT_SIZE) {
15829 if (tg3_nvram_read(tp, offset, &val))
15830 return;
15831
15832 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15833 break;
15834 }
15835
15836 if (offset == TG3_NVM_DIR_END)
15837 return;
15838
15839 if (!tg3_flag(tp, 5705_PLUS))
15840 start = 0x08000000;
15841 else if (tg3_nvram_read(tp, offset - 4, &start))
15842 return;
15843
15844 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15845 !tg3_fw_img_is_valid(tp, offset) ||
15846 tg3_nvram_read(tp, offset + 8, &val))
15847 return;
15848
15849 offset += val - start;
15850
15851 vlen = strlen(tp->fw_ver);
15852
15853 tp->fw_ver[vlen++] = ',';
15854 tp->fw_ver[vlen++] = ' ';
15855
15856 for (i = 0; i < 4; i++) {
15857 __be32 v;
15858 if (tg3_nvram_read_be32(tp, offset, &v))
15859 return;
15860
15861 offset += sizeof(v);
15862
15863 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15864 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15865 break;
15866 }
15867
15868 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15869 vlen += sizeof(v);
15870 }
15871 }
15872
15873 static void tg3_probe_ncsi(struct tg3 *tp)
15874 {
15875 u32 apedata;
15876
15877 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15878 if (apedata != APE_SEG_SIG_MAGIC)
15879 return;
15880
15881 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15882 if (!(apedata & APE_FW_STATUS_READY))
15883 return;
15884
15885 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15886 tg3_flag_set(tp, APE_HAS_NCSI);
15887 }
15888
15889 static void tg3_read_dash_ver(struct tg3 *tp)
15890 {
15891 int vlen;
15892 u32 apedata;
15893 char *fwtype;
15894
15895 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15896
15897 if (tg3_flag(tp, APE_HAS_NCSI))
15898 fwtype = "NCSI";
15899 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15900 fwtype = "SMASH";
15901 else
15902 fwtype = "DASH";
15903
15904 vlen = strlen(tp->fw_ver);
15905
15906 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15907 fwtype,
15908 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15909 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15910 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15911 (apedata & APE_FW_VERSION_BLDMSK));
15912 }
15913
15914 static void tg3_read_otp_ver(struct tg3 *tp)
15915 {
15916 u32 val, val2;
15917
15918 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15919 return;
15920
15921 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15922 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15923 TG3_OTP_MAGIC0_VALID(val)) {
15924 u64 val64 = (u64) val << 32 | val2;
15925 u32 ver = 0;
15926 int i, vlen;
15927
15928 for (i = 0; i < 7; i++) {
15929 if ((val64 & 0xff) == 0)
15930 break;
15931 ver = val64 & 0xff;
15932 val64 >>= 8;
15933 }
15934 vlen = strlen(tp->fw_ver);
15935 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15936 }
15937 }
15938
15939 static void tg3_read_fw_ver(struct tg3 *tp)
15940 {
15941 u32 val;
15942 bool vpd_vers = false;
15943
15944 if (tp->fw_ver[0] != 0)
15945 vpd_vers = true;
15946
15947 if (tg3_flag(tp, NO_NVRAM)) {
15948 strcat(tp->fw_ver, "sb");
15949 tg3_read_otp_ver(tp);
15950 return;
15951 }
15952
15953 if (tg3_nvram_read(tp, 0, &val))
15954 return;
15955
15956 if (val == TG3_EEPROM_MAGIC)
15957 tg3_read_bc_ver(tp);
15958 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15959 tg3_read_sb_ver(tp, val);
15960 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15961 tg3_read_hwsb_ver(tp);
15962
15963 if (tg3_flag(tp, ENABLE_ASF)) {
15964 if (tg3_flag(tp, ENABLE_APE)) {
15965 tg3_probe_ncsi(tp);
15966 if (!vpd_vers)
15967 tg3_read_dash_ver(tp);
15968 } else if (!vpd_vers) {
15969 tg3_read_mgmtfw_ver(tp);
15970 }
15971 }
15972
15973 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15974 }
15975
15976 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15977 {
15978 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15979 return TG3_RX_RET_MAX_SIZE_5717;
15980 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15981 return TG3_RX_RET_MAX_SIZE_5700;
15982 else
15983 return TG3_RX_RET_MAX_SIZE_5705;
15984 }
15985
15986 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15987 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15988 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15989 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15990 { },
15991 };
15992
15993 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15994 {
15995 struct pci_dev *peer;
15996 unsigned int func, devnr = tp->pdev->devfn & ~7;
15997
15998 for (func = 0; func < 8; func++) {
15999 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16000 if (peer && peer != tp->pdev)
16001 break;
16002 pci_dev_put(peer);
16003 }
16004 /* 5704 can be configured in single-port mode, set peer to
16005 * tp->pdev in that case.
16006 */
16007 if (!peer) {
16008 peer = tp->pdev;
16009 return peer;
16010 }
16011
16012 /*
16013 * We don't need to keep the refcount elevated; there's no way
16014 * to remove one half of this device without removing the other
16015 */
16016 pci_dev_put(peer);
16017
16018 return peer;
16019 }
16020
16021 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16022 {
16023 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16024 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16025 u32 reg;
16026
16027 /* All devices that use the alternate
16028 * ASIC REV location have a CPMU.
16029 */
16030 tg3_flag_set(tp, CPMU_PRESENT);
16031
16032 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16043 reg = TG3PCI_GEN2_PRODID_ASICREV;
16044 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16054 reg = TG3PCI_GEN15_PRODID_ASICREV;
16055 else
16056 reg = TG3PCI_PRODID_ASICREV;
16057
16058 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16059 }
16060
16061 /* Wrong chip ID in 5752 A0. This code can be removed later
16062 * as A0 is not in production.
16063 */
16064 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16065 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16066
16067 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16068 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16069
16070 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16071 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16072 tg3_asic_rev(tp) == ASIC_REV_5720)
16073 tg3_flag_set(tp, 5717_PLUS);
16074
16075 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16076 tg3_asic_rev(tp) == ASIC_REV_57766)
16077 tg3_flag_set(tp, 57765_CLASS);
16078
16079 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16080 tg3_asic_rev(tp) == ASIC_REV_5762)
16081 tg3_flag_set(tp, 57765_PLUS);
16082
16083 /* Intentionally exclude ASIC_REV_5906 */
16084 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16088 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16089 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16090 tg3_flag(tp, 57765_PLUS))
16091 tg3_flag_set(tp, 5755_PLUS);
16092
16093 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16094 tg3_asic_rev(tp) == ASIC_REV_5714)
16095 tg3_flag_set(tp, 5780_CLASS);
16096
16097 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16099 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16100 tg3_flag(tp, 5755_PLUS) ||
16101 tg3_flag(tp, 5780_CLASS))
16102 tg3_flag_set(tp, 5750_PLUS);
16103
16104 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16105 tg3_flag(tp, 5750_PLUS))
16106 tg3_flag_set(tp, 5705_PLUS);
16107 }
16108
16109 static bool tg3_10_100_only_device(struct tg3 *tp,
16110 const struct pci_device_id *ent)
16111 {
16112 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16113
16114 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16115 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16116 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16117 return true;
16118
16119 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16120 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16121 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16122 return true;
16123 } else {
16124 return true;
16125 }
16126 }
16127
16128 return false;
16129 }
16130
16131 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16132 {
16133 u32 misc_ctrl_reg;
16134 u32 pci_state_reg, grc_misc_cfg;
16135 u32 val;
16136 u16 pci_cmd;
16137 int err;
16138
16139 /* Force memory write invalidate off. If we leave it on,
16140 * then on 5700_BX chips we have to enable a workaround.
16141 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16142 * to match the cacheline size. The Broadcom driver have this
16143 * workaround but turns MWI off all the times so never uses
16144 * it. This seems to suggest that the workaround is insufficient.
16145 */
16146 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16147 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16148 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16149
16150 /* Important! -- Make sure register accesses are byteswapped
16151 * correctly. Also, for those chips that require it, make
16152 * sure that indirect register accesses are enabled before
16153 * the first operation.
16154 */
16155 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16156 &misc_ctrl_reg);
16157 tp->misc_host_ctrl |= (misc_ctrl_reg &
16158 MISC_HOST_CTRL_CHIPREV);
16159 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16160 tp->misc_host_ctrl);
16161
16162 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16163
16164 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16165 * we need to disable memory and use config. cycles
16166 * only to access all registers. The 5702/03 chips
16167 * can mistakenly decode the special cycles from the
16168 * ICH chipsets as memory write cycles, causing corruption
16169 * of register and memory space. Only certain ICH bridges
16170 * will drive special cycles with non-zero data during the
16171 * address phase which can fall within the 5703's address
16172 * range. This is not an ICH bug as the PCI spec allows
16173 * non-zero address during special cycles. However, only
16174 * these ICH bridges are known to drive non-zero addresses
16175 * during special cycles.
16176 *
16177 * Since special cycles do not cross PCI bridges, we only
16178 * enable this workaround if the 5703 is on the secondary
16179 * bus of these ICH bridges.
16180 */
16181 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16182 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16183 static struct tg3_dev_id {
16184 u32 vendor;
16185 u32 device;
16186 u32 rev;
16187 } ich_chipsets[] = {
16188 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16189 PCI_ANY_ID },
16190 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16191 PCI_ANY_ID },
16192 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16193 0xa },
16194 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16195 PCI_ANY_ID },
16196 { },
16197 };
16198 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16199 struct pci_dev *bridge = NULL;
16200
16201 while (pci_id->vendor != 0) {
16202 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16203 bridge);
16204 if (!bridge) {
16205 pci_id++;
16206 continue;
16207 }
16208 if (pci_id->rev != PCI_ANY_ID) {
16209 if (bridge->revision > pci_id->rev)
16210 continue;
16211 }
16212 if (bridge->subordinate &&
16213 (bridge->subordinate->number ==
16214 tp->pdev->bus->number)) {
16215 tg3_flag_set(tp, ICH_WORKAROUND);
16216 pci_dev_put(bridge);
16217 break;
16218 }
16219 }
16220 }
16221
16222 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16223 static struct tg3_dev_id {
16224 u32 vendor;
16225 u32 device;
16226 } bridge_chipsets[] = {
16227 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16228 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16229 { },
16230 };
16231 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16232 struct pci_dev *bridge = NULL;
16233
16234 while (pci_id->vendor != 0) {
16235 bridge = pci_get_device(pci_id->vendor,
16236 pci_id->device,
16237 bridge);
16238 if (!bridge) {
16239 pci_id++;
16240 continue;
16241 }
16242 if (bridge->subordinate &&
16243 (bridge->subordinate->number <=
16244 tp->pdev->bus->number) &&
16245 (bridge->subordinate->busn_res.end >=
16246 tp->pdev->bus->number)) {
16247 tg3_flag_set(tp, 5701_DMA_BUG);
16248 pci_dev_put(bridge);
16249 break;
16250 }
16251 }
16252 }
16253
16254 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16255 * DMA addresses > 40-bit. This bridge may have other additional
16256 * 57xx devices behind it in some 4-port NIC designs for example.
16257 * Any tg3 device found behind the bridge will also need the 40-bit
16258 * DMA workaround.
16259 */
16260 if (tg3_flag(tp, 5780_CLASS)) {
16261 tg3_flag_set(tp, 40BIT_DMA_BUG);
16262 tp->msi_cap = tp->pdev->msi_cap;
16263 } else {
16264 struct pci_dev *bridge = NULL;
16265
16266 do {
16267 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16268 PCI_DEVICE_ID_SERVERWORKS_EPB,
16269 bridge);
16270 if (bridge && bridge->subordinate &&
16271 (bridge->subordinate->number <=
16272 tp->pdev->bus->number) &&
16273 (bridge->subordinate->busn_res.end >=
16274 tp->pdev->bus->number)) {
16275 tg3_flag_set(tp, 40BIT_DMA_BUG);
16276 pci_dev_put(bridge);
16277 break;
16278 }
16279 } while (bridge);
16280 }
16281
16282 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16283 tg3_asic_rev(tp) == ASIC_REV_5714)
16284 tp->pdev_peer = tg3_find_peer(tp);
16285
16286 /* Determine TSO capabilities */
16287 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16288 ; /* Do nothing. HW bug. */
16289 else if (tg3_flag(tp, 57765_PLUS))
16290 tg3_flag_set(tp, HW_TSO_3);
16291 else if (tg3_flag(tp, 5755_PLUS) ||
16292 tg3_asic_rev(tp) == ASIC_REV_5906)
16293 tg3_flag_set(tp, HW_TSO_2);
16294 else if (tg3_flag(tp, 5750_PLUS)) {
16295 tg3_flag_set(tp, HW_TSO_1);
16296 tg3_flag_set(tp, TSO_BUG);
16297 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16298 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16299 tg3_flag_clear(tp, TSO_BUG);
16300 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16301 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16302 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16303 tg3_flag_set(tp, FW_TSO);
16304 tg3_flag_set(tp, TSO_BUG);
16305 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16306 tp->fw_needed = FIRMWARE_TG3TSO5;
16307 else
16308 tp->fw_needed = FIRMWARE_TG3TSO;
16309 }
16310
16311 /* Selectively allow TSO based on operating conditions */
16312 if (tg3_flag(tp, HW_TSO_1) ||
16313 tg3_flag(tp, HW_TSO_2) ||
16314 tg3_flag(tp, HW_TSO_3) ||
16315 tg3_flag(tp, FW_TSO)) {
16316 /* For firmware TSO, assume ASF is disabled.
16317 * We'll disable TSO later if we discover ASF
16318 * is enabled in tg3_get_eeprom_hw_cfg().
16319 */
16320 tg3_flag_set(tp, TSO_CAPABLE);
16321 } else {
16322 tg3_flag_clear(tp, TSO_CAPABLE);
16323 tg3_flag_clear(tp, TSO_BUG);
16324 tp->fw_needed = NULL;
16325 }
16326
16327 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16328 tp->fw_needed = FIRMWARE_TG3;
16329
16330 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16331 tp->fw_needed = FIRMWARE_TG357766;
16332
16333 tp->irq_max = 1;
16334
16335 if (tg3_flag(tp, 5750_PLUS)) {
16336 tg3_flag_set(tp, SUPPORT_MSI);
16337 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16338 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16339 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16340 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16341 tp->pdev_peer == tp->pdev))
16342 tg3_flag_clear(tp, SUPPORT_MSI);
16343
16344 if (tg3_flag(tp, 5755_PLUS) ||
16345 tg3_asic_rev(tp) == ASIC_REV_5906) {
16346 tg3_flag_set(tp, 1SHOT_MSI);
16347 }
16348
16349 if (tg3_flag(tp, 57765_PLUS)) {
16350 tg3_flag_set(tp, SUPPORT_MSIX);
16351 tp->irq_max = TG3_IRQ_MAX_VECS;
16352 }
16353 }
16354
16355 tp->txq_max = 1;
16356 tp->rxq_max = 1;
16357 if (tp->irq_max > 1) {
16358 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16359 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16360
16361 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16362 tg3_asic_rev(tp) == ASIC_REV_5720)
16363 tp->txq_max = tp->irq_max - 1;
16364 }
16365
16366 if (tg3_flag(tp, 5755_PLUS) ||
16367 tg3_asic_rev(tp) == ASIC_REV_5906)
16368 tg3_flag_set(tp, SHORT_DMA_BUG);
16369
16370 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16371 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16372
16373 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16374 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16375 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16376 tg3_asic_rev(tp) == ASIC_REV_5762)
16377 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16378
16379 if (tg3_flag(tp, 57765_PLUS) &&
16380 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16381 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16382
16383 if (!tg3_flag(tp, 5705_PLUS) ||
16384 tg3_flag(tp, 5780_CLASS) ||
16385 tg3_flag(tp, USE_JUMBO_BDFLAG))
16386 tg3_flag_set(tp, JUMBO_CAPABLE);
16387
16388 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16389 &pci_state_reg);
16390
16391 if (pci_is_pcie(tp->pdev)) {
16392 u16 lnkctl;
16393
16394 tg3_flag_set(tp, PCI_EXPRESS);
16395
16396 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16397 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16398 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16399 tg3_flag_clear(tp, HW_TSO_2);
16400 tg3_flag_clear(tp, TSO_CAPABLE);
16401 }
16402 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16403 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16404 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16405 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16406 tg3_flag_set(tp, CLKREQ_BUG);
16407 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16408 tg3_flag_set(tp, L1PLLPD_EN);
16409 }
16410 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16411 /* BCM5785 devices are effectively PCIe devices, and should
16412 * follow PCIe codepaths, but do not have a PCIe capabilities
16413 * section.
16414 */
16415 tg3_flag_set(tp, PCI_EXPRESS);
16416 } else if (!tg3_flag(tp, 5705_PLUS) ||
16417 tg3_flag(tp, 5780_CLASS)) {
16418 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16419 if (!tp->pcix_cap) {
16420 dev_err(&tp->pdev->dev,
16421 "Cannot find PCI-X capability, aborting\n");
16422 return -EIO;
16423 }
16424
16425 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16426 tg3_flag_set(tp, PCIX_MODE);
16427 }
16428
16429 /* If we have an AMD 762 or VIA K8T800 chipset, write
16430 * reordering to the mailbox registers done by the host
16431 * controller can cause major troubles. We read back from
16432 * every mailbox register write to force the writes to be
16433 * posted to the chip in order.
16434 */
16435 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16436 !tg3_flag(tp, PCI_EXPRESS))
16437 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16438
16439 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16440 &tp->pci_cacheline_sz);
16441 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16442 &tp->pci_lat_timer);
16443 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16444 tp->pci_lat_timer < 64) {
16445 tp->pci_lat_timer = 64;
16446 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16447 tp->pci_lat_timer);
16448 }
16449
16450 /* Important! -- It is critical that the PCI-X hw workaround
16451 * situation is decided before the first MMIO register access.
16452 */
16453 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16454 /* 5700 BX chips need to have their TX producer index
16455 * mailboxes written twice to workaround a bug.
16456 */
16457 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16458
16459 /* If we are in PCI-X mode, enable register write workaround.
16460 *
16461 * The workaround is to use indirect register accesses
16462 * for all chip writes not to mailbox registers.
16463 */
16464 if (tg3_flag(tp, PCIX_MODE)) {
16465 u32 pm_reg;
16466
16467 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16468
16469 /* The chip can have it's power management PCI config
16470 * space registers clobbered due to this bug.
16471 * So explicitly force the chip into D0 here.
16472 */
16473 pci_read_config_dword(tp->pdev,
16474 tp->pdev->pm_cap + PCI_PM_CTRL,
16475 &pm_reg);
16476 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16477 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16478 pci_write_config_dword(tp->pdev,
16479 tp->pdev->pm_cap + PCI_PM_CTRL,
16480 pm_reg);
16481
16482 /* Also, force SERR#/PERR# in PCI command. */
16483 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16484 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16485 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16486 }
16487 }
16488
16489 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16490 tg3_flag_set(tp, PCI_HIGH_SPEED);
16491 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16492 tg3_flag_set(tp, PCI_32BIT);
16493
16494 /* Chip-specific fixup from Broadcom driver */
16495 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16496 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16497 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16498 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16499 }
16500
16501 /* Default fast path register access methods */
16502 tp->read32 = tg3_read32;
16503 tp->write32 = tg3_write32;
16504 tp->read32_mbox = tg3_read32;
16505 tp->write32_mbox = tg3_write32;
16506 tp->write32_tx_mbox = tg3_write32;
16507 tp->write32_rx_mbox = tg3_write32;
16508
16509 /* Various workaround register access methods */
16510 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16511 tp->write32 = tg3_write_indirect_reg32;
16512 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16513 (tg3_flag(tp, PCI_EXPRESS) &&
16514 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16515 /*
16516 * Back to back register writes can cause problems on these
16517 * chips, the workaround is to read back all reg writes
16518 * except those to mailbox regs.
16519 *
16520 * See tg3_write_indirect_reg32().
16521 */
16522 tp->write32 = tg3_write_flush_reg32;
16523 }
16524
16525 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16526 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16527 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16528 tp->write32_rx_mbox = tg3_write_flush_reg32;
16529 }
16530
16531 if (tg3_flag(tp, ICH_WORKAROUND)) {
16532 tp->read32 = tg3_read_indirect_reg32;
16533 tp->write32 = tg3_write_indirect_reg32;
16534 tp->read32_mbox = tg3_read_indirect_mbox;
16535 tp->write32_mbox = tg3_write_indirect_mbox;
16536 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16537 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16538
16539 iounmap(tp->regs);
16540 tp->regs = NULL;
16541
16542 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16543 pci_cmd &= ~PCI_COMMAND_MEMORY;
16544 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16545 }
16546 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16547 tp->read32_mbox = tg3_read32_mbox_5906;
16548 tp->write32_mbox = tg3_write32_mbox_5906;
16549 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16550 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16551 }
16552
16553 if (tp->write32 == tg3_write_indirect_reg32 ||
16554 (tg3_flag(tp, PCIX_MODE) &&
16555 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16556 tg3_asic_rev(tp) == ASIC_REV_5701)))
16557 tg3_flag_set(tp, SRAM_USE_CONFIG);
16558
16559 /* The memory arbiter has to be enabled in order for SRAM accesses
16560 * to succeed. Normally on powerup the tg3 chip firmware will make
16561 * sure it is enabled, but other entities such as system netboot
16562 * code might disable it.
16563 */
16564 val = tr32(MEMARB_MODE);
16565 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16566
16567 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16568 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16569 tg3_flag(tp, 5780_CLASS)) {
16570 if (tg3_flag(tp, PCIX_MODE)) {
16571 pci_read_config_dword(tp->pdev,
16572 tp->pcix_cap + PCI_X_STATUS,
16573 &val);
16574 tp->pci_fn = val & 0x7;
16575 }
16576 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16577 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16578 tg3_asic_rev(tp) == ASIC_REV_5720) {
16579 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16580 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16581 val = tr32(TG3_CPMU_STATUS);
16582
16583 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16584 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16585 else
16586 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16587 TG3_CPMU_STATUS_FSHFT_5719;
16588 }
16589
16590 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16591 tp->write32_tx_mbox = tg3_write_flush_reg32;
16592 tp->write32_rx_mbox = tg3_write_flush_reg32;
16593 }
16594
16595 /* Get eeprom hw config before calling tg3_set_power_state().
16596 * In particular, the TG3_FLAG_IS_NIC flag must be
16597 * determined before calling tg3_set_power_state() so that
16598 * we know whether or not to switch out of Vaux power.
16599 * When the flag is set, it means that GPIO1 is used for eeprom
16600 * write protect and also implies that it is a LOM where GPIOs
16601 * are not used to switch power.
16602 */
16603 tg3_get_eeprom_hw_cfg(tp);
16604
16605 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16606 tg3_flag_clear(tp, TSO_CAPABLE);
16607 tg3_flag_clear(tp, TSO_BUG);
16608 tp->fw_needed = NULL;
16609 }
16610
16611 if (tg3_flag(tp, ENABLE_APE)) {
16612 /* Allow reads and writes to the
16613 * APE register and memory space.
16614 */
16615 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16616 PCISTATE_ALLOW_APE_SHMEM_WR |
16617 PCISTATE_ALLOW_APE_PSPACE_WR;
16618 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16619 pci_state_reg);
16620
16621 tg3_ape_lock_init(tp);
16622 }
16623
16624 /* Set up tp->grc_local_ctrl before calling
16625 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16626 * will bring 5700's external PHY out of reset.
16627 * It is also used as eeprom write protect on LOMs.
16628 */
16629 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16630 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16631 tg3_flag(tp, EEPROM_WRITE_PROT))
16632 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16633 GRC_LCLCTRL_GPIO_OUTPUT1);
16634 /* Unused GPIO3 must be driven as output on 5752 because there
16635 * are no pull-up resistors on unused GPIO pins.
16636 */
16637 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16638 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16639
16640 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16641 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16642 tg3_flag(tp, 57765_CLASS))
16643 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16644
16645 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16647 /* Turn off the debug UART. */
16648 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16649 if (tg3_flag(tp, IS_NIC))
16650 /* Keep VMain power. */
16651 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16652 GRC_LCLCTRL_GPIO_OUTPUT0;
16653 }
16654
16655 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16656 tp->grc_local_ctrl |=
16657 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16658
16659 /* Switch out of Vaux if it is a NIC */
16660 tg3_pwrsrc_switch_to_vmain(tp);
16661
16662 /* Derive initial jumbo mode from MTU assigned in
16663 * ether_setup() via the alloc_etherdev() call
16664 */
16665 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16666 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16667
16668 /* Determine WakeOnLan speed to use. */
16669 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16672 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16673 tg3_flag_clear(tp, WOL_SPEED_100MB);
16674 } else {
16675 tg3_flag_set(tp, WOL_SPEED_100MB);
16676 }
16677
16678 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16679 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16680
16681 /* A few boards don't want Ethernet@WireSpeed phy feature */
16682 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16683 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16684 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16685 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16686 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16687 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16688 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16689
16690 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16691 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16692 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16693 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16694 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16695
16696 if (tg3_flag(tp, 5705_PLUS) &&
16697 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16698 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16699 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16700 !tg3_flag(tp, 57765_PLUS)) {
16701 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16702 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16703 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16704 tg3_asic_rev(tp) == ASIC_REV_5761) {
16705 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16706 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16707 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16708 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16709 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16710 } else
16711 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16712 }
16713
16714 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16715 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16716 tp->phy_otp = tg3_read_otp_phycfg(tp);
16717 if (tp->phy_otp == 0)
16718 tp->phy_otp = TG3_OTP_DEFAULT;
16719 }
16720
16721 if (tg3_flag(tp, CPMU_PRESENT))
16722 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16723 else
16724 tp->mi_mode = MAC_MI_MODE_BASE;
16725
16726 tp->coalesce_mode = 0;
16727 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16728 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16729 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16730
16731 /* Set these bits to enable statistics workaround. */
16732 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16733 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16734 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16735 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16736 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16737 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16738 }
16739
16740 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16741 tg3_asic_rev(tp) == ASIC_REV_57780)
16742 tg3_flag_set(tp, USE_PHYLIB);
16743
16744 err = tg3_mdio_init(tp);
16745 if (err)
16746 return err;
16747
16748 /* Initialize data/descriptor byte/word swapping. */
16749 val = tr32(GRC_MODE);
16750 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16751 tg3_asic_rev(tp) == ASIC_REV_5762)
16752 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16753 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16754 GRC_MODE_B2HRX_ENABLE |
16755 GRC_MODE_HTX2B_ENABLE |
16756 GRC_MODE_HOST_STACKUP);
16757 else
16758 val &= GRC_MODE_HOST_STACKUP;
16759
16760 tw32(GRC_MODE, val | tp->grc_mode);
16761
16762 tg3_switch_clocks(tp);
16763
16764 /* Clear this out for sanity. */
16765 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16766
16767 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16768 tw32(TG3PCI_REG_BASE_ADDR, 0);
16769
16770 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16771 &pci_state_reg);
16772 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16773 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16774 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16775 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16776 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16777 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16778 void __iomem *sram_base;
16779
16780 /* Write some dummy words into the SRAM status block
16781 * area, see if it reads back correctly. If the return
16782 * value is bad, force enable the PCIX workaround.
16783 */
16784 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16785
16786 writel(0x00000000, sram_base);
16787 writel(0x00000000, sram_base + 4);
16788 writel(0xffffffff, sram_base + 4);
16789 if (readl(sram_base) != 0x00000000)
16790 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16791 }
16792 }
16793
16794 udelay(50);
16795 tg3_nvram_init(tp);
16796
16797 /* If the device has an NVRAM, no need to load patch firmware */
16798 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16799 !tg3_flag(tp, NO_NVRAM))
16800 tp->fw_needed = NULL;
16801
16802 grc_misc_cfg = tr32(GRC_MISC_CFG);
16803 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16804
16805 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16806 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16807 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16808 tg3_flag_set(tp, IS_5788);
16809
16810 if (!tg3_flag(tp, IS_5788) &&
16811 tg3_asic_rev(tp) != ASIC_REV_5700)
16812 tg3_flag_set(tp, TAGGED_STATUS);
16813 if (tg3_flag(tp, TAGGED_STATUS)) {
16814 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16815 HOSTCC_MODE_CLRTICK_TXBD);
16816
16817 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16818 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16819 tp->misc_host_ctrl);
16820 }
16821
16822 /* Preserve the APE MAC_MODE bits */
16823 if (tg3_flag(tp, ENABLE_APE))
16824 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16825 else
16826 tp->mac_mode = 0;
16827
16828 if (tg3_10_100_only_device(tp, ent))
16829 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16830
16831 err = tg3_phy_probe(tp);
16832 if (err) {
16833 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16834 /* ... but do not return immediately ... */
16835 tg3_mdio_fini(tp);
16836 }
16837
16838 tg3_read_vpd(tp);
16839 tg3_read_fw_ver(tp);
16840
16841 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16842 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16843 } else {
16844 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16845 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16846 else
16847 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16848 }
16849
16850 /* 5700 {AX,BX} chips have a broken status block link
16851 * change bit implementation, so we must use the
16852 * status register in those cases.
16853 */
16854 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16855 tg3_flag_set(tp, USE_LINKCHG_REG);
16856 else
16857 tg3_flag_clear(tp, USE_LINKCHG_REG);
16858
16859 /* The led_ctrl is set during tg3_phy_probe, here we might
16860 * have to force the link status polling mechanism based
16861 * upon subsystem IDs.
16862 */
16863 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16864 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16865 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16866 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16867 tg3_flag_set(tp, USE_LINKCHG_REG);
16868 }
16869
16870 /* For all SERDES we poll the MAC status register. */
16871 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16872 tg3_flag_set(tp, POLL_SERDES);
16873 else
16874 tg3_flag_clear(tp, POLL_SERDES);
16875
16876 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16877 tg3_flag_set(tp, POLL_CPMU_LINK);
16878
16879 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16880 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16881 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16882 tg3_flag(tp, PCIX_MODE)) {
16883 tp->rx_offset = NET_SKB_PAD;
16884 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16885 tp->rx_copy_thresh = ~(u16)0;
16886 #endif
16887 }
16888
16889 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16890 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16891 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16892
16893 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16894
16895 /* Increment the rx prod index on the rx std ring by at most
16896 * 8 for these chips to workaround hw errata.
16897 */
16898 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16899 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16900 tg3_asic_rev(tp) == ASIC_REV_5755)
16901 tp->rx_std_max_post = 8;
16902
16903 if (tg3_flag(tp, ASPM_WORKAROUND))
16904 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16905 PCIE_PWR_MGMT_L1_THRESH_MSK;
16906
16907 return err;
16908 }
16909
16910 #ifdef CONFIG_SPARC
16911 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16912 {
16913 struct net_device *dev = tp->dev;
16914 struct pci_dev *pdev = tp->pdev;
16915 struct device_node *dp = pci_device_to_OF_node(pdev);
16916 const unsigned char *addr;
16917 int len;
16918
16919 addr = of_get_property(dp, "local-mac-address", &len);
16920 if (addr && len == ETH_ALEN) {
16921 memcpy(dev->dev_addr, addr, ETH_ALEN);
16922 return 0;
16923 }
16924 return -ENODEV;
16925 }
16926
16927 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16928 {
16929 struct net_device *dev = tp->dev;
16930
16931 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16932 return 0;
16933 }
16934 #endif
16935
16936 static int tg3_get_device_address(struct tg3 *tp)
16937 {
16938 struct net_device *dev = tp->dev;
16939 u32 hi, lo, mac_offset;
16940 int addr_ok = 0;
16941 int err;
16942
16943 #ifdef CONFIG_SPARC
16944 if (!tg3_get_macaddr_sparc(tp))
16945 return 0;
16946 #endif
16947
16948 if (tg3_flag(tp, IS_SSB_CORE)) {
16949 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16950 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16951 return 0;
16952 }
16953
16954 mac_offset = 0x7c;
16955 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16956 tg3_flag(tp, 5780_CLASS)) {
16957 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16958 mac_offset = 0xcc;
16959 if (tg3_nvram_lock(tp))
16960 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16961 else
16962 tg3_nvram_unlock(tp);
16963 } else if (tg3_flag(tp, 5717_PLUS)) {
16964 if (tp->pci_fn & 1)
16965 mac_offset = 0xcc;
16966 if (tp->pci_fn > 1)
16967 mac_offset += 0x18c;
16968 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16969 mac_offset = 0x10;
16970
16971 /* First try to get it from MAC address mailbox. */
16972 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16973 if ((hi >> 16) == 0x484b) {
16974 dev->dev_addr[0] = (hi >> 8) & 0xff;
16975 dev->dev_addr[1] = (hi >> 0) & 0xff;
16976
16977 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16978 dev->dev_addr[2] = (lo >> 24) & 0xff;
16979 dev->dev_addr[3] = (lo >> 16) & 0xff;
16980 dev->dev_addr[4] = (lo >> 8) & 0xff;
16981 dev->dev_addr[5] = (lo >> 0) & 0xff;
16982
16983 /* Some old bootcode may report a 0 MAC address in SRAM */
16984 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16985 }
16986 if (!addr_ok) {
16987 /* Next, try NVRAM. */
16988 if (!tg3_flag(tp, NO_NVRAM) &&
16989 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16990 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16991 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16992 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16993 }
16994 /* Finally just fetch it out of the MAC control regs. */
16995 else {
16996 hi = tr32(MAC_ADDR_0_HIGH);
16997 lo = tr32(MAC_ADDR_0_LOW);
16998
16999 dev->dev_addr[5] = lo & 0xff;
17000 dev->dev_addr[4] = (lo >> 8) & 0xff;
17001 dev->dev_addr[3] = (lo >> 16) & 0xff;
17002 dev->dev_addr[2] = (lo >> 24) & 0xff;
17003 dev->dev_addr[1] = hi & 0xff;
17004 dev->dev_addr[0] = (hi >> 8) & 0xff;
17005 }
17006 }
17007
17008 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17009 #ifdef CONFIG_SPARC
17010 if (!tg3_get_default_macaddr_sparc(tp))
17011 return 0;
17012 #endif
17013 return -EINVAL;
17014 }
17015 return 0;
17016 }
17017
17018 #define BOUNDARY_SINGLE_CACHELINE 1
17019 #define BOUNDARY_MULTI_CACHELINE 2
17020
17021 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17022 {
17023 int cacheline_size;
17024 u8 byte;
17025 int goal;
17026
17027 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17028 if (byte == 0)
17029 cacheline_size = 1024;
17030 else
17031 cacheline_size = (int) byte * 4;
17032
17033 /* On 5703 and later chips, the boundary bits have no
17034 * effect.
17035 */
17036 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17037 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17038 !tg3_flag(tp, PCI_EXPRESS))
17039 goto out;
17040
17041 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17042 goal = BOUNDARY_MULTI_CACHELINE;
17043 #else
17044 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17045 goal = BOUNDARY_SINGLE_CACHELINE;
17046 #else
17047 goal = 0;
17048 #endif
17049 #endif
17050
17051 if (tg3_flag(tp, 57765_PLUS)) {
17052 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17053 goto out;
17054 }
17055
17056 if (!goal)
17057 goto out;
17058
17059 /* PCI controllers on most RISC systems tend to disconnect
17060 * when a device tries to burst across a cache-line boundary.
17061 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17062 *
17063 * Unfortunately, for PCI-E there are only limited
17064 * write-side controls for this, and thus for reads
17065 * we will still get the disconnects. We'll also waste
17066 * these PCI cycles for both read and write for chips
17067 * other than 5700 and 5701 which do not implement the
17068 * boundary bits.
17069 */
17070 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17071 switch (cacheline_size) {
17072 case 16:
17073 case 32:
17074 case 64:
17075 case 128:
17076 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17077 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17078 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17079 } else {
17080 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17081 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17082 }
17083 break;
17084
17085 case 256:
17086 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17087 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17088 break;
17089
17090 default:
17091 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17092 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17093 break;
17094 }
17095 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17096 switch (cacheline_size) {
17097 case 16:
17098 case 32:
17099 case 64:
17100 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17101 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17102 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17103 break;
17104 }
17105 /* fallthrough */
17106 case 128:
17107 default:
17108 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17109 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17110 break;
17111 }
17112 } else {
17113 switch (cacheline_size) {
17114 case 16:
17115 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17116 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17117 DMA_RWCTRL_WRITE_BNDRY_16);
17118 break;
17119 }
17120 /* fallthrough */
17121 case 32:
17122 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17123 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17124 DMA_RWCTRL_WRITE_BNDRY_32);
17125 break;
17126 }
17127 /* fallthrough */
17128 case 64:
17129 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17130 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17131 DMA_RWCTRL_WRITE_BNDRY_64);
17132 break;
17133 }
17134 /* fallthrough */
17135 case 128:
17136 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17137 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17138 DMA_RWCTRL_WRITE_BNDRY_128);
17139 break;
17140 }
17141 /* fallthrough */
17142 case 256:
17143 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17144 DMA_RWCTRL_WRITE_BNDRY_256);
17145 break;
17146 case 512:
17147 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17148 DMA_RWCTRL_WRITE_BNDRY_512);
17149 break;
17150 case 1024:
17151 default:
17152 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17153 DMA_RWCTRL_WRITE_BNDRY_1024);
17154 break;
17155 }
17156 }
17157
17158 out:
17159 return val;
17160 }
17161
17162 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17163 int size, bool to_device)
17164 {
17165 struct tg3_internal_buffer_desc test_desc;
17166 u32 sram_dma_descs;
17167 int i, ret;
17168
17169 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17170
17171 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17172 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17173 tw32(RDMAC_STATUS, 0);
17174 tw32(WDMAC_STATUS, 0);
17175
17176 tw32(BUFMGR_MODE, 0);
17177 tw32(FTQ_RESET, 0);
17178
17179 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17180 test_desc.addr_lo = buf_dma & 0xffffffff;
17181 test_desc.nic_mbuf = 0x00002100;
17182 test_desc.len = size;
17183
17184 /*
17185 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17186 * the *second* time the tg3 driver was getting loaded after an
17187 * initial scan.
17188 *
17189 * Broadcom tells me:
17190 * ...the DMA engine is connected to the GRC block and a DMA
17191 * reset may affect the GRC block in some unpredictable way...
17192 * The behavior of resets to individual blocks has not been tested.
17193 *
17194 * Broadcom noted the GRC reset will also reset all sub-components.
17195 */
17196 if (to_device) {
17197 test_desc.cqid_sqid = (13 << 8) | 2;
17198
17199 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17200 udelay(40);
17201 } else {
17202 test_desc.cqid_sqid = (16 << 8) | 7;
17203
17204 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17205 udelay(40);
17206 }
17207 test_desc.flags = 0x00000005;
17208
17209 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17210 u32 val;
17211
17212 val = *(((u32 *)&test_desc) + i);
17213 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17214 sram_dma_descs + (i * sizeof(u32)));
17215 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17216 }
17217 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17218
17219 if (to_device)
17220 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17221 else
17222 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17223
17224 ret = -ENODEV;
17225 for (i = 0; i < 40; i++) {
17226 u32 val;
17227
17228 if (to_device)
17229 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17230 else
17231 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17232 if ((val & 0xffff) == sram_dma_descs) {
17233 ret = 0;
17234 break;
17235 }
17236
17237 udelay(100);
17238 }
17239
17240 return ret;
17241 }
17242
17243 #define TEST_BUFFER_SIZE 0x2000
17244
17245 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17246 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17247 { },
17248 };
17249
17250 static int tg3_test_dma(struct tg3 *tp)
17251 {
17252 dma_addr_t buf_dma;
17253 u32 *buf, saved_dma_rwctrl;
17254 int ret = 0;
17255
17256 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17257 &buf_dma, GFP_KERNEL);
17258 if (!buf) {
17259 ret = -ENOMEM;
17260 goto out_nofree;
17261 }
17262
17263 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17264 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17265
17266 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17267
17268 if (tg3_flag(tp, 57765_PLUS))
17269 goto out;
17270
17271 if (tg3_flag(tp, PCI_EXPRESS)) {
17272 /* DMA read watermark not used on PCIE */
17273 tp->dma_rwctrl |= 0x00180000;
17274 } else if (!tg3_flag(tp, PCIX_MODE)) {
17275 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17276 tg3_asic_rev(tp) == ASIC_REV_5750)
17277 tp->dma_rwctrl |= 0x003f0000;
17278 else
17279 tp->dma_rwctrl |= 0x003f000f;
17280 } else {
17281 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17282 tg3_asic_rev(tp) == ASIC_REV_5704) {
17283 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17284 u32 read_water = 0x7;
17285
17286 /* If the 5704 is behind the EPB bridge, we can
17287 * do the less restrictive ONE_DMA workaround for
17288 * better performance.
17289 */
17290 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17291 tg3_asic_rev(tp) == ASIC_REV_5704)
17292 tp->dma_rwctrl |= 0x8000;
17293 else if (ccval == 0x6 || ccval == 0x7)
17294 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17295
17296 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17297 read_water = 4;
17298 /* Set bit 23 to enable PCIX hw bug fix */
17299 tp->dma_rwctrl |=
17300 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17301 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17302 (1 << 23);
17303 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17304 /* 5780 always in PCIX mode */
17305 tp->dma_rwctrl |= 0x00144000;
17306 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17307 /* 5714 always in PCIX mode */
17308 tp->dma_rwctrl |= 0x00148000;
17309 } else {
17310 tp->dma_rwctrl |= 0x001b000f;
17311 }
17312 }
17313 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17314 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17315
17316 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17317 tg3_asic_rev(tp) == ASIC_REV_5704)
17318 tp->dma_rwctrl &= 0xfffffff0;
17319
17320 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17321 tg3_asic_rev(tp) == ASIC_REV_5701) {
17322 /* Remove this if it causes problems for some boards. */
17323 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17324
17325 /* On 5700/5701 chips, we need to set this bit.
17326 * Otherwise the chip will issue cacheline transactions
17327 * to streamable DMA memory with not all the byte
17328 * enables turned on. This is an error on several
17329 * RISC PCI controllers, in particular sparc64.
17330 *
17331 * On 5703/5704 chips, this bit has been reassigned
17332 * a different meaning. In particular, it is used
17333 * on those chips to enable a PCI-X workaround.
17334 */
17335 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17336 }
17337
17338 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17339
17340
17341 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17342 tg3_asic_rev(tp) != ASIC_REV_5701)
17343 goto out;
17344
17345 /* It is best to perform DMA test with maximum write burst size
17346 * to expose the 5700/5701 write DMA bug.
17347 */
17348 saved_dma_rwctrl = tp->dma_rwctrl;
17349 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17350 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17351
17352 while (1) {
17353 u32 *p = buf, i;
17354
17355 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17356 p[i] = i;
17357
17358 /* Send the buffer to the chip. */
17359 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17360 if (ret) {
17361 dev_err(&tp->pdev->dev,
17362 "%s: Buffer write failed. err = %d\n",
17363 __func__, ret);
17364 break;
17365 }
17366
17367 /* Now read it back. */
17368 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17369 if (ret) {
17370 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17371 "err = %d\n", __func__, ret);
17372 break;
17373 }
17374
17375 /* Verify it. */
17376 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17377 if (p[i] == i)
17378 continue;
17379
17380 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17381 DMA_RWCTRL_WRITE_BNDRY_16) {
17382 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17383 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17384 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17385 break;
17386 } else {
17387 dev_err(&tp->pdev->dev,
17388 "%s: Buffer corrupted on read back! "
17389 "(%d != %d)\n", __func__, p[i], i);
17390 ret = -ENODEV;
17391 goto out;
17392 }
17393 }
17394
17395 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17396 /* Success. */
17397 ret = 0;
17398 break;
17399 }
17400 }
17401 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17402 DMA_RWCTRL_WRITE_BNDRY_16) {
17403 /* DMA test passed without adjusting DMA boundary,
17404 * now look for chipsets that are known to expose the
17405 * DMA bug without failing the test.
17406 */
17407 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17408 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17409 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17410 } else {
17411 /* Safe to use the calculated DMA boundary. */
17412 tp->dma_rwctrl = saved_dma_rwctrl;
17413 }
17414
17415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17416 }
17417
17418 out:
17419 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17420 out_nofree:
17421 return ret;
17422 }
17423
17424 static void tg3_init_bufmgr_config(struct tg3 *tp)
17425 {
17426 if (tg3_flag(tp, 57765_PLUS)) {
17427 tp->bufmgr_config.mbuf_read_dma_low_water =
17428 DEFAULT_MB_RDMA_LOW_WATER_5705;
17429 tp->bufmgr_config.mbuf_mac_rx_low_water =
17430 DEFAULT_MB_MACRX_LOW_WATER_57765;
17431 tp->bufmgr_config.mbuf_high_water =
17432 DEFAULT_MB_HIGH_WATER_57765;
17433
17434 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17435 DEFAULT_MB_RDMA_LOW_WATER_5705;
17436 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17437 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17438 tp->bufmgr_config.mbuf_high_water_jumbo =
17439 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17440 } else if (tg3_flag(tp, 5705_PLUS)) {
17441 tp->bufmgr_config.mbuf_read_dma_low_water =
17442 DEFAULT_MB_RDMA_LOW_WATER_5705;
17443 tp->bufmgr_config.mbuf_mac_rx_low_water =
17444 DEFAULT_MB_MACRX_LOW_WATER_5705;
17445 tp->bufmgr_config.mbuf_high_water =
17446 DEFAULT_MB_HIGH_WATER_5705;
17447 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17448 tp->bufmgr_config.mbuf_mac_rx_low_water =
17449 DEFAULT_MB_MACRX_LOW_WATER_5906;
17450 tp->bufmgr_config.mbuf_high_water =
17451 DEFAULT_MB_HIGH_WATER_5906;
17452 }
17453
17454 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17455 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17456 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17457 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17458 tp->bufmgr_config.mbuf_high_water_jumbo =
17459 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17460 } else {
17461 tp->bufmgr_config.mbuf_read_dma_low_water =
17462 DEFAULT_MB_RDMA_LOW_WATER;
17463 tp->bufmgr_config.mbuf_mac_rx_low_water =
17464 DEFAULT_MB_MACRX_LOW_WATER;
17465 tp->bufmgr_config.mbuf_high_water =
17466 DEFAULT_MB_HIGH_WATER;
17467
17468 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17469 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17470 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17471 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17472 tp->bufmgr_config.mbuf_high_water_jumbo =
17473 DEFAULT_MB_HIGH_WATER_JUMBO;
17474 }
17475
17476 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17477 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17478 }
17479
17480 static char *tg3_phy_string(struct tg3 *tp)
17481 {
17482 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17483 case TG3_PHY_ID_BCM5400: return "5400";
17484 case TG3_PHY_ID_BCM5401: return "5401";
17485 case TG3_PHY_ID_BCM5411: return "5411";
17486 case TG3_PHY_ID_BCM5701: return "5701";
17487 case TG3_PHY_ID_BCM5703: return "5703";
17488 case TG3_PHY_ID_BCM5704: return "5704";
17489 case TG3_PHY_ID_BCM5705: return "5705";
17490 case TG3_PHY_ID_BCM5750: return "5750";
17491 case TG3_PHY_ID_BCM5752: return "5752";
17492 case TG3_PHY_ID_BCM5714: return "5714";
17493 case TG3_PHY_ID_BCM5780: return "5780";
17494 case TG3_PHY_ID_BCM5755: return "5755";
17495 case TG3_PHY_ID_BCM5787: return "5787";
17496 case TG3_PHY_ID_BCM5784: return "5784";
17497 case TG3_PHY_ID_BCM5756: return "5722/5756";
17498 case TG3_PHY_ID_BCM5906: return "5906";
17499 case TG3_PHY_ID_BCM5761: return "5761";
17500 case TG3_PHY_ID_BCM5718C: return "5718C";
17501 case TG3_PHY_ID_BCM5718S: return "5718S";
17502 case TG3_PHY_ID_BCM57765: return "57765";
17503 case TG3_PHY_ID_BCM5719C: return "5719C";
17504 case TG3_PHY_ID_BCM5720C: return "5720C";
17505 case TG3_PHY_ID_BCM5762: return "5762C";
17506 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17507 case 0: return "serdes";
17508 default: return "unknown";
17509 }
17510 }
17511
17512 static char *tg3_bus_string(struct tg3 *tp, char *str)
17513 {
17514 if (tg3_flag(tp, PCI_EXPRESS)) {
17515 strcpy(str, "PCI Express");
17516 return str;
17517 } else if (tg3_flag(tp, PCIX_MODE)) {
17518 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17519
17520 strcpy(str, "PCIX:");
17521
17522 if ((clock_ctrl == 7) ||
17523 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17524 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17525 strcat(str, "133MHz");
17526 else if (clock_ctrl == 0)
17527 strcat(str, "33MHz");
17528 else if (clock_ctrl == 2)
17529 strcat(str, "50MHz");
17530 else if (clock_ctrl == 4)
17531 strcat(str, "66MHz");
17532 else if (clock_ctrl == 6)
17533 strcat(str, "100MHz");
17534 } else {
17535 strcpy(str, "PCI:");
17536 if (tg3_flag(tp, PCI_HIGH_SPEED))
17537 strcat(str, "66MHz");
17538 else
17539 strcat(str, "33MHz");
17540 }
17541 if (tg3_flag(tp, PCI_32BIT))
17542 strcat(str, ":32-bit");
17543 else
17544 strcat(str, ":64-bit");
17545 return str;
17546 }
17547
17548 static void tg3_init_coal(struct tg3 *tp)
17549 {
17550 struct ethtool_coalesce *ec = &tp->coal;
17551
17552 memset(ec, 0, sizeof(*ec));
17553 ec->cmd = ETHTOOL_GCOALESCE;
17554 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17555 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17556 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17557 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17558 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17559 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17560 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17561 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17562 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17563
17564 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17565 HOSTCC_MODE_CLRTICK_TXBD)) {
17566 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17567 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17568 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17569 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17570 }
17571
17572 if (tg3_flag(tp, 5705_PLUS)) {
17573 ec->rx_coalesce_usecs_irq = 0;
17574 ec->tx_coalesce_usecs_irq = 0;
17575 ec->stats_block_coalesce_usecs = 0;
17576 }
17577 }
17578
17579 static int tg3_init_one(struct pci_dev *pdev,
17580 const struct pci_device_id *ent)
17581 {
17582 struct net_device *dev;
17583 struct tg3 *tp;
17584 int i, err;
17585 u32 sndmbx, rcvmbx, intmbx;
17586 char str[40];
17587 u64 dma_mask, persist_dma_mask;
17588 netdev_features_t features = 0;
17589
17590 printk_once(KERN_INFO "%s\n", version);
17591
17592 err = pci_enable_device(pdev);
17593 if (err) {
17594 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17595 return err;
17596 }
17597
17598 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17599 if (err) {
17600 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17601 goto err_out_disable_pdev;
17602 }
17603
17604 pci_set_master(pdev);
17605
17606 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17607 if (!dev) {
17608 err = -ENOMEM;
17609 goto err_out_free_res;
17610 }
17611
17612 SET_NETDEV_DEV(dev, &pdev->dev);
17613
17614 tp = netdev_priv(dev);
17615 tp->pdev = pdev;
17616 tp->dev = dev;
17617 tp->rx_mode = TG3_DEF_RX_MODE;
17618 tp->tx_mode = TG3_DEF_TX_MODE;
17619 tp->irq_sync = 1;
17620 tp->pcierr_recovery = false;
17621
17622 if (tg3_debug > 0)
17623 tp->msg_enable = tg3_debug;
17624 else
17625 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17626
17627 if (pdev_is_ssb_gige_core(pdev)) {
17628 tg3_flag_set(tp, IS_SSB_CORE);
17629 if (ssb_gige_must_flush_posted_writes(pdev))
17630 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17631 if (ssb_gige_one_dma_at_once(pdev))
17632 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17633 if (ssb_gige_have_roboswitch(pdev)) {
17634 tg3_flag_set(tp, USE_PHYLIB);
17635 tg3_flag_set(tp, ROBOSWITCH);
17636 }
17637 if (ssb_gige_is_rgmii(pdev))
17638 tg3_flag_set(tp, RGMII_MODE);
17639 }
17640
17641 /* The word/byte swap controls here control register access byte
17642 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17643 * setting below.
17644 */
17645 tp->misc_host_ctrl =
17646 MISC_HOST_CTRL_MASK_PCI_INT |
17647 MISC_HOST_CTRL_WORD_SWAP |
17648 MISC_HOST_CTRL_INDIR_ACCESS |
17649 MISC_HOST_CTRL_PCISTATE_RW;
17650
17651 /* The NONFRM (non-frame) byte/word swap controls take effect
17652 * on descriptor entries, anything which isn't packet data.
17653 *
17654 * The StrongARM chips on the board (one for tx, one for rx)
17655 * are running in big-endian mode.
17656 */
17657 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17658 GRC_MODE_WSWAP_NONFRM_DATA);
17659 #ifdef __BIG_ENDIAN
17660 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17661 #endif
17662 spin_lock_init(&tp->lock);
17663 spin_lock_init(&tp->indirect_lock);
17664 INIT_WORK(&tp->reset_task, tg3_reset_task);
17665
17666 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17667 if (!tp->regs) {
17668 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17669 err = -ENOMEM;
17670 goto err_out_free_dev;
17671 }
17672
17673 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17674 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17675 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17676 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17677 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17678 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17679 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17681 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17683 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17684 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17685 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17686 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17687 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17688 tg3_flag_set(tp, ENABLE_APE);
17689 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17690 if (!tp->aperegs) {
17691 dev_err(&pdev->dev,
17692 "Cannot map APE registers, aborting\n");
17693 err = -ENOMEM;
17694 goto err_out_iounmap;
17695 }
17696 }
17697
17698 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17699 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17700
17701 dev->ethtool_ops = &tg3_ethtool_ops;
17702 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17703 dev->netdev_ops = &tg3_netdev_ops;
17704 dev->irq = pdev->irq;
17705
17706 err = tg3_get_invariants(tp, ent);
17707 if (err) {
17708 dev_err(&pdev->dev,
17709 "Problem fetching invariants of chip, aborting\n");
17710 goto err_out_apeunmap;
17711 }
17712
17713 /* The EPB bridge inside 5714, 5715, and 5780 and any
17714 * device behind the EPB cannot support DMA addresses > 40-bit.
17715 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17716 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17717 * do DMA address check in tg3_start_xmit().
17718 */
17719 if (tg3_flag(tp, IS_5788))
17720 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17721 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17722 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17723 #ifdef CONFIG_HIGHMEM
17724 dma_mask = DMA_BIT_MASK(64);
17725 #endif
17726 } else
17727 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17728
17729 /* Configure DMA attributes. */
17730 if (dma_mask > DMA_BIT_MASK(32)) {
17731 err = pci_set_dma_mask(pdev, dma_mask);
17732 if (!err) {
17733 features |= NETIF_F_HIGHDMA;
17734 err = pci_set_consistent_dma_mask(pdev,
17735 persist_dma_mask);
17736 if (err < 0) {
17737 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17738 "DMA for consistent allocations\n");
17739 goto err_out_apeunmap;
17740 }
17741 }
17742 }
17743 if (err || dma_mask == DMA_BIT_MASK(32)) {
17744 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17745 if (err) {
17746 dev_err(&pdev->dev,
17747 "No usable DMA configuration, aborting\n");
17748 goto err_out_apeunmap;
17749 }
17750 }
17751
17752 tg3_init_bufmgr_config(tp);
17753
17754 /* 5700 B0 chips do not support checksumming correctly due
17755 * to hardware bugs.
17756 */
17757 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17758 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17759
17760 if (tg3_flag(tp, 5755_PLUS))
17761 features |= NETIF_F_IPV6_CSUM;
17762 }
17763
17764 /* TSO is on by default on chips that support hardware TSO.
17765 * Firmware TSO on older chips gives lower performance, so it
17766 * is off by default, but can be enabled using ethtool.
17767 */
17768 if ((tg3_flag(tp, HW_TSO_1) ||
17769 tg3_flag(tp, HW_TSO_2) ||
17770 tg3_flag(tp, HW_TSO_3)) &&
17771 (features & NETIF_F_IP_CSUM))
17772 features |= NETIF_F_TSO;
17773 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17774 if (features & NETIF_F_IPV6_CSUM)
17775 features |= NETIF_F_TSO6;
17776 if (tg3_flag(tp, HW_TSO_3) ||
17777 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17778 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17779 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17780 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17781 tg3_asic_rev(tp) == ASIC_REV_57780)
17782 features |= NETIF_F_TSO_ECN;
17783 }
17784
17785 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17786 NETIF_F_HW_VLAN_CTAG_RX;
17787 dev->vlan_features |= features;
17788
17789 /*
17790 * Add loopback capability only for a subset of devices that support
17791 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17792 * loopback for the remaining devices.
17793 */
17794 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17795 !tg3_flag(tp, CPMU_PRESENT))
17796 /* Add the loopback capability */
17797 features |= NETIF_F_LOOPBACK;
17798
17799 dev->hw_features |= features;
17800 dev->priv_flags |= IFF_UNICAST_FLT;
17801
17802 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17803 dev->min_mtu = TG3_MIN_MTU;
17804 dev->max_mtu = TG3_MAX_MTU(tp);
17805
17806 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17807 !tg3_flag(tp, TSO_CAPABLE) &&
17808 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17809 tg3_flag_set(tp, MAX_RXPEND_64);
17810 tp->rx_pending = 63;
17811 }
17812
17813 err = tg3_get_device_address(tp);
17814 if (err) {
17815 dev_err(&pdev->dev,
17816 "Could not obtain valid ethernet address, aborting\n");
17817 goto err_out_apeunmap;
17818 }
17819
17820 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17821 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17822 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17823 for (i = 0; i < tp->irq_max; i++) {
17824 struct tg3_napi *tnapi = &tp->napi[i];
17825
17826 tnapi->tp = tp;
17827 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17828
17829 tnapi->int_mbox = intmbx;
17830 if (i <= 4)
17831 intmbx += 0x8;
17832 else
17833 intmbx += 0x4;
17834
17835 tnapi->consmbox = rcvmbx;
17836 tnapi->prodmbox = sndmbx;
17837
17838 if (i)
17839 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17840 else
17841 tnapi->coal_now = HOSTCC_MODE_NOW;
17842
17843 if (!tg3_flag(tp, SUPPORT_MSIX))
17844 break;
17845
17846 /*
17847 * If we support MSIX, we'll be using RSS. If we're using
17848 * RSS, the first vector only handles link interrupts and the
17849 * remaining vectors handle rx and tx interrupts. Reuse the
17850 * mailbox values for the next iteration. The values we setup
17851 * above are still useful for the single vectored mode.
17852 */
17853 if (!i)
17854 continue;
17855
17856 rcvmbx += 0x8;
17857
17858 if (sndmbx & 0x4)
17859 sndmbx -= 0x4;
17860 else
17861 sndmbx += 0xc;
17862 }
17863
17864 /*
17865 * Reset chip in case UNDI or EFI driver did not shutdown
17866 * DMA self test will enable WDMAC and we'll see (spurious)
17867 * pending DMA on the PCI bus at that point.
17868 */
17869 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17870 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17871 tg3_full_lock(tp, 0);
17872 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17873 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17874 tg3_full_unlock(tp);
17875 }
17876
17877 err = tg3_test_dma(tp);
17878 if (err) {
17879 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17880 goto err_out_apeunmap;
17881 }
17882
17883 tg3_init_coal(tp);
17884
17885 pci_set_drvdata(pdev, dev);
17886
17887 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17888 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17889 tg3_asic_rev(tp) == ASIC_REV_5762)
17890 tg3_flag_set(tp, PTP_CAPABLE);
17891
17892 tg3_timer_init(tp);
17893
17894 tg3_carrier_off(tp);
17895
17896 err = register_netdev(dev);
17897 if (err) {
17898 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17899 goto err_out_apeunmap;
17900 }
17901
17902 if (tg3_flag(tp, PTP_CAPABLE)) {
17903 tg3_ptp_init(tp);
17904 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17905 &tp->pdev->dev);
17906 if (IS_ERR(tp->ptp_clock))
17907 tp->ptp_clock = NULL;
17908 }
17909
17910 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17911 tp->board_part_number,
17912 tg3_chip_rev_id(tp),
17913 tg3_bus_string(tp, str),
17914 dev->dev_addr);
17915
17916 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17917 char *ethtype;
17918
17919 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17920 ethtype = "10/100Base-TX";
17921 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17922 ethtype = "1000Base-SX";
17923 else
17924 ethtype = "10/100/1000Base-T";
17925
17926 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17927 "(WireSpeed[%d], EEE[%d])\n",
17928 tg3_phy_string(tp), ethtype,
17929 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17930 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17931 }
17932
17933 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17934 (dev->features & NETIF_F_RXCSUM) != 0,
17935 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17936 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17937 tg3_flag(tp, ENABLE_ASF) != 0,
17938 tg3_flag(tp, TSO_CAPABLE) != 0);
17939 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17940 tp->dma_rwctrl,
17941 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17942 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17943
17944 pci_save_state(pdev);
17945
17946 return 0;
17947
17948 err_out_apeunmap:
17949 if (tp->aperegs) {
17950 iounmap(tp->aperegs);
17951 tp->aperegs = NULL;
17952 }
17953
17954 err_out_iounmap:
17955 if (tp->regs) {
17956 iounmap(tp->regs);
17957 tp->regs = NULL;
17958 }
17959
17960 err_out_free_dev:
17961 free_netdev(dev);
17962
17963 err_out_free_res:
17964 pci_release_regions(pdev);
17965
17966 err_out_disable_pdev:
17967 if (pci_is_enabled(pdev))
17968 pci_disable_device(pdev);
17969 return err;
17970 }
17971
17972 static void tg3_remove_one(struct pci_dev *pdev)
17973 {
17974 struct net_device *dev = pci_get_drvdata(pdev);
17975
17976 if (dev) {
17977 struct tg3 *tp = netdev_priv(dev);
17978
17979 tg3_ptp_fini(tp);
17980
17981 release_firmware(tp->fw);
17982
17983 tg3_reset_task_cancel(tp);
17984
17985 if (tg3_flag(tp, USE_PHYLIB)) {
17986 tg3_phy_fini(tp);
17987 tg3_mdio_fini(tp);
17988 }
17989
17990 unregister_netdev(dev);
17991 if (tp->aperegs) {
17992 iounmap(tp->aperegs);
17993 tp->aperegs = NULL;
17994 }
17995 if (tp->regs) {
17996 iounmap(tp->regs);
17997 tp->regs = NULL;
17998 }
17999 free_netdev(dev);
18000 pci_release_regions(pdev);
18001 pci_disable_device(pdev);
18002 }
18003 }
18004
18005 #ifdef CONFIG_PM_SLEEP
18006 static int tg3_suspend(struct device *device)
18007 {
18008 struct pci_dev *pdev = to_pci_dev(device);
18009 struct net_device *dev = pci_get_drvdata(pdev);
18010 struct tg3 *tp = netdev_priv(dev);
18011 int err = 0;
18012
18013 rtnl_lock();
18014
18015 if (!netif_running(dev))
18016 goto unlock;
18017
18018 tg3_reset_task_cancel(tp);
18019 tg3_phy_stop(tp);
18020 tg3_netif_stop(tp);
18021
18022 tg3_timer_stop(tp);
18023
18024 tg3_full_lock(tp, 1);
18025 tg3_disable_ints(tp);
18026 tg3_full_unlock(tp);
18027
18028 netif_device_detach(dev);
18029
18030 tg3_full_lock(tp, 0);
18031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18032 tg3_flag_clear(tp, INIT_COMPLETE);
18033 tg3_full_unlock(tp);
18034
18035 err = tg3_power_down_prepare(tp);
18036 if (err) {
18037 int err2;
18038
18039 tg3_full_lock(tp, 0);
18040
18041 tg3_flag_set(tp, INIT_COMPLETE);
18042 err2 = tg3_restart_hw(tp, true);
18043 if (err2)
18044 goto out;
18045
18046 tg3_timer_start(tp);
18047
18048 netif_device_attach(dev);
18049 tg3_netif_start(tp);
18050
18051 out:
18052 tg3_full_unlock(tp);
18053
18054 if (!err2)
18055 tg3_phy_start(tp);
18056 }
18057
18058 unlock:
18059 rtnl_unlock();
18060 return err;
18061 }
18062
18063 static int tg3_resume(struct device *device)
18064 {
18065 struct pci_dev *pdev = to_pci_dev(device);
18066 struct net_device *dev = pci_get_drvdata(pdev);
18067 struct tg3 *tp = netdev_priv(dev);
18068 int err = 0;
18069
18070 rtnl_lock();
18071
18072 if (!netif_running(dev))
18073 goto unlock;
18074
18075 netif_device_attach(dev);
18076
18077 tg3_full_lock(tp, 0);
18078
18079 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18080
18081 tg3_flag_set(tp, INIT_COMPLETE);
18082 err = tg3_restart_hw(tp,
18083 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18084 if (err)
18085 goto out;
18086
18087 tg3_timer_start(tp);
18088
18089 tg3_netif_start(tp);
18090
18091 out:
18092 tg3_full_unlock(tp);
18093
18094 if (!err)
18095 tg3_phy_start(tp);
18096
18097 unlock:
18098 rtnl_unlock();
18099 return err;
18100 }
18101 #endif /* CONFIG_PM_SLEEP */
18102
18103 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18104
18105 static void tg3_shutdown(struct pci_dev *pdev)
18106 {
18107 struct net_device *dev = pci_get_drvdata(pdev);
18108 struct tg3 *tp = netdev_priv(dev);
18109
18110 rtnl_lock();
18111 netif_device_detach(dev);
18112
18113 if (netif_running(dev))
18114 dev_close(dev);
18115
18116 if (system_state == SYSTEM_POWER_OFF)
18117 tg3_power_down(tp);
18118
18119 rtnl_unlock();
18120 }
18121
18122 /**
18123 * tg3_io_error_detected - called when PCI error is detected
18124 * @pdev: Pointer to PCI device
18125 * @state: The current pci connection state
18126 *
18127 * This function is called after a PCI bus error affecting
18128 * this device has been detected.
18129 */
18130 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18131 pci_channel_state_t state)
18132 {
18133 struct net_device *netdev = pci_get_drvdata(pdev);
18134 struct tg3 *tp = netdev_priv(netdev);
18135 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18136
18137 netdev_info(netdev, "PCI I/O error detected\n");
18138
18139 rtnl_lock();
18140
18141 /* We probably don't have netdev yet */
18142 if (!netdev || !netif_running(netdev))
18143 goto done;
18144
18145 /* We needn't recover from permanent error */
18146 if (state == pci_channel_io_frozen)
18147 tp->pcierr_recovery = true;
18148
18149 tg3_phy_stop(tp);
18150
18151 tg3_netif_stop(tp);
18152
18153 tg3_timer_stop(tp);
18154
18155 /* Want to make sure that the reset task doesn't run */
18156 tg3_reset_task_cancel(tp);
18157
18158 netif_device_detach(netdev);
18159
18160 /* Clean up software state, even if MMIO is blocked */
18161 tg3_full_lock(tp, 0);
18162 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18163 tg3_full_unlock(tp);
18164
18165 done:
18166 if (state == pci_channel_io_perm_failure) {
18167 if (netdev) {
18168 tg3_napi_enable(tp);
18169 dev_close(netdev);
18170 }
18171 err = PCI_ERS_RESULT_DISCONNECT;
18172 } else {
18173 pci_disable_device(pdev);
18174 }
18175
18176 rtnl_unlock();
18177
18178 return err;
18179 }
18180
18181 /**
18182 * tg3_io_slot_reset - called after the pci bus has been reset.
18183 * @pdev: Pointer to PCI device
18184 *
18185 * Restart the card from scratch, as if from a cold-boot.
18186 * At this point, the card has exprienced a hard reset,
18187 * followed by fixups by BIOS, and has its config space
18188 * set up identically to what it was at cold boot.
18189 */
18190 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18191 {
18192 struct net_device *netdev = pci_get_drvdata(pdev);
18193 struct tg3 *tp = netdev_priv(netdev);
18194 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18195 int err;
18196
18197 rtnl_lock();
18198
18199 if (pci_enable_device(pdev)) {
18200 dev_err(&pdev->dev,
18201 "Cannot re-enable PCI device after reset.\n");
18202 goto done;
18203 }
18204
18205 pci_set_master(pdev);
18206 pci_restore_state(pdev);
18207 pci_save_state(pdev);
18208
18209 if (!netdev || !netif_running(netdev)) {
18210 rc = PCI_ERS_RESULT_RECOVERED;
18211 goto done;
18212 }
18213
18214 err = tg3_power_up(tp);
18215 if (err)
18216 goto done;
18217
18218 rc = PCI_ERS_RESULT_RECOVERED;
18219
18220 done:
18221 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18222 tg3_napi_enable(tp);
18223 dev_close(netdev);
18224 }
18225 rtnl_unlock();
18226
18227 return rc;
18228 }
18229
18230 /**
18231 * tg3_io_resume - called when traffic can start flowing again.
18232 * @pdev: Pointer to PCI device
18233 *
18234 * This callback is called when the error recovery driver tells
18235 * us that its OK to resume normal operation.
18236 */
18237 static void tg3_io_resume(struct pci_dev *pdev)
18238 {
18239 struct net_device *netdev = pci_get_drvdata(pdev);
18240 struct tg3 *tp = netdev_priv(netdev);
18241 int err;
18242
18243 rtnl_lock();
18244
18245 if (!netdev || !netif_running(netdev))
18246 goto done;
18247
18248 tg3_full_lock(tp, 0);
18249 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18250 tg3_flag_set(tp, INIT_COMPLETE);
18251 err = tg3_restart_hw(tp, true);
18252 if (err) {
18253 tg3_full_unlock(tp);
18254 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18255 goto done;
18256 }
18257
18258 netif_device_attach(netdev);
18259
18260 tg3_timer_start(tp);
18261
18262 tg3_netif_start(tp);
18263
18264 tg3_full_unlock(tp);
18265
18266 tg3_phy_start(tp);
18267
18268 done:
18269 tp->pcierr_recovery = false;
18270 rtnl_unlock();
18271 }
18272
18273 static const struct pci_error_handlers tg3_err_handler = {
18274 .error_detected = tg3_io_error_detected,
18275 .slot_reset = tg3_io_slot_reset,
18276 .resume = tg3_io_resume
18277 };
18278
18279 static struct pci_driver tg3_driver = {
18280 .name = DRV_MODULE_NAME,
18281 .id_table = tg3_pci_tbl,
18282 .probe = tg3_init_one,
18283 .remove = tg3_remove_one,
18284 .err_handler = &tg3_err_handler,
18285 .driver.pm = &tg3_pm_ops,
18286 .shutdown = tg3_shutdown,
18287 };
18288
18289 module_pci_driver(tg3_driver);