]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/broadcom/tg3.c
jfs: fix error path in ialloc
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 133
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Jul 29, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
750 udelay(10);
751 }
752
753 if (status != bit) {
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
756 ret = -EBUSY;
757 }
758
759 return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764 u32 gnt, bit;
765
766 if (!tg3_flag(tp, ENABLE_APE))
767 return;
768
769 switch (locknum) {
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772 return;
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
775 if (!tp->pci_fn)
776 bit = APE_LOCK_GRANT_DRIVER;
777 else
778 bit = 1 << tp->pci_fn;
779 break;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
785 break;
786 default:
787 return;
788 }
789
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
792 else
793 gnt = TG3_APE_PER_LOCK_GRANT;
794
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800 u32 apedata;
801
802 while (timeout_us) {
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804 return -EBUSY;
805
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808 break;
809
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812 udelay(10);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814 }
815
816 return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821 u32 i, apedata;
822
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827 break;
828
829 udelay(10);
830 }
831
832 return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 len)
837 {
838 int err;
839 u32 i, bufoff, msgoff, maxlen, apedata;
840
841 if (!tg3_flag(tp, APE_HAS_NCSI))
842 return 0;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
846 return -ENODEV;
847
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
850 return -EAGAIN;
851
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853 TG3_APE_SHMEM_BASE;
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857 while (len) {
858 u32 length;
859
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
862 len -= length;
863
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
866 return -EAGAIN;
867
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
870 if (err)
871 return err;
872
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884 base_off += length;
885
886 if (tg3_ape_wait_for_event(tp, 30000))
887 return -EAGAIN;
888
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
892 data++;
893 }
894 }
895
896 return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901 int err;
902 u32 apedata;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
906 return -EAGAIN;
907
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
910 return -EAGAIN;
911
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
914 if (err)
915 return err;
916
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
919
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923 return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928 u32 event;
929 u32 apedata;
930
931 if (!tg3_flag(tp, ENABLE_APE))
932 return;
933
934 switch (kind) {
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
948
949 event = APE_EVENT_STATUS_STATE_START;
950 break;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
956 */
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964 } else
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 int i;
983
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 int i;
993
994 tp->irq_sync = 0;
995 wmb();
996
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008 tp->coal_now |= tnapi->coal_now;
1009 }
1010
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
1016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
1032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 work_exists = 1;
1041
1042 return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1049 */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 struct tg3 *tp = tnapi->tp;
1053
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 mmiowb();
1056
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 u32 clock_ctrl;
1069 u32 orig_clock_ctrl;
1070
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 return;
1073
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
1095 }
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS 5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
1103 {
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116 *val = 0x0;
1117
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152 return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
1162 {
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 return 0;
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211 return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239 return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262 return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 u32 val;
1311 int err;
1312
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317
1318 if (enable)
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
1354 if (limit < 0)
1355 return -EBUSY;
1356
1357 return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 struct tg3 *tp = bp->priv;
1363 u32 val;
1364
1365 spin_lock_bh(&tp->lock);
1366
1367 if (tg3_readphy(tp, reg, &val))
1368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
1371
1372 return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 struct tg3 *tp = bp->priv;
1378 u32 ret = 0;
1379
1380 spin_lock_bh(&tp->lock);
1381
1382 if (tg3_writephy(tp, reg, val))
1383 ret = -EIO;
1384
1385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 u32 val;
1398 struct phy_device *phydev;
1399
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
1416 return;
1417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
1440
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1453
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
1481
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
1493 if (tg3_flag(tp, 5717_PLUS)) {
1494 u32 is_serdes;
1495
1496 tp->phy_addr = tp->pci_fn + 1;
1497
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508 tg3_mdio_start(tp);
1509
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 return 0;
1512
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
1516
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
1539 i = mdiobus_register(tp->mdio_bus);
1540 if (i) {
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1543 return i;
1544 }
1545
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 break;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 /* fallthru */
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 break;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 break;
1582 }
1583
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1588
1589 return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1598 }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 int i;
1619 unsigned int delay_cnt;
1620 long time_remain;
1621
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
1638 if (pci_channel_offline(tp->pdev))
1639 break;
1640
1641 udelay(8);
1642 }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1647 {
1648 u32 reg, val;
1649
1650 val = 0;
1651 if (!tg3_readphy(tp, MII_BMCR, &reg))
1652 val = reg << 16;
1653 if (!tg3_readphy(tp, MII_BMSR, &reg))
1654 val |= (reg & 0xffff);
1655 *data++ = val;
1656
1657 val = 0;
1658 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1659 val = reg << 16;
1660 if (!tg3_readphy(tp, MII_LPA, &reg))
1661 val |= (reg & 0xffff);
1662 *data++ = val;
1663
1664 val = 0;
1665 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1667 val = reg << 16;
1668 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1669 val |= (reg & 0xffff);
1670 }
1671 *data++ = val;
1672
1673 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1674 val = reg << 16;
1675 else
1676 val = 0;
1677 *data++ = val;
1678 }
1679
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1682 {
1683 u32 data[4];
1684
1685 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1686 return;
1687
1688 tg3_phy_gather_ump_data(tp, data);
1689
1690 tg3_wait_for_event_ack(tp);
1691
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698
1699 tg3_generate_fw_event(tp);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1704 {
1705 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706 /* Wait for RX cpu to ACK the previous event. */
1707 tg3_wait_for_event_ack(tp);
1708
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710
1711 tg3_generate_fw_event(tp);
1712
1713 /* Wait for RX cpu to ACK this event. */
1714 tg3_wait_for_event_ack(tp);
1715 }
1716 }
1717
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 {
1721 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723
1724 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725 switch (kind) {
1726 case RESET_KIND_INIT:
1727 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 DRV_STATE_START);
1729 break;
1730
1731 case RESET_KIND_SHUTDOWN:
1732 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 DRV_STATE_UNLOAD);
1734 break;
1735
1736 case RESET_KIND_SUSPEND:
1737 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738 DRV_STATE_SUSPEND);
1739 break;
1740
1741 default:
1742 break;
1743 }
1744 }
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 switch (kind) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1755 break;
1756
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1760 break;
1761
1762 default:
1763 break;
1764 }
1765 }
1766 }
1767
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 {
1771 if (tg3_flag(tp, ENABLE_ASF)) {
1772 switch (kind) {
1773 case RESET_KIND_INIT:
1774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775 DRV_STATE_START);
1776 break;
1777
1778 case RESET_KIND_SHUTDOWN:
1779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780 DRV_STATE_UNLOAD);
1781 break;
1782
1783 case RESET_KIND_SUSPEND:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 DRV_STATE_SUSPEND);
1786 break;
1787
1788 default:
1789 break;
1790 }
1791 }
1792 }
1793
1794 static int tg3_poll_fw(struct tg3 *tp)
1795 {
1796 int i;
1797 u32 val;
1798
1799 if (tg3_flag(tp, NO_FWARE_REPORTED))
1800 return 0;
1801
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1804 return 0;
1805 }
1806
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 return 0;
1812 if (pci_channel_offline(tp->pdev))
1813 return -ENODEV;
1814
1815 udelay(100);
1816 }
1817 return -ENODEV;
1818 }
1819
1820 /* Wait for firmware initialization to complete. */
1821 for (i = 0; i < 100000; i++) {
1822 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824 break;
1825 if (pci_channel_offline(tp->pdev)) {
1826 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827 tg3_flag_set(tp, NO_FWARE_REPORTED);
1828 netdev_info(tp->dev, "No firmware running\n");
1829 }
1830
1831 break;
1832 }
1833
1834 udelay(10);
1835 }
1836
1837 /* Chip might not be fitted with firmware. Some Sun onboard
1838 * parts are configured like that. So don't signal the timeout
1839 * of the above loop as an error, but do report the lack of
1840 * running firmware once.
1841 */
1842 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844
1845 netdev_info(tp->dev, "No firmware running\n");
1846 }
1847
1848 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849 /* The 57765 A0 needs a little more
1850 * time to do some important work.
1851 */
1852 mdelay(10);
1853 }
1854
1855 return 0;
1856 }
1857
1858 static void tg3_link_report(struct tg3 *tp)
1859 {
1860 if (!netif_carrier_ok(tp->dev)) {
1861 netif_info(tp, link, tp->dev, "Link is down\n");
1862 tg3_ump_link_report(tp);
1863 } else if (netif_msg_link(tp)) {
1864 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865 (tp->link_config.active_speed == SPEED_1000 ?
1866 1000 :
1867 (tp->link_config.active_speed == SPEED_100 ?
1868 100 : 10)),
1869 (tp->link_config.active_duplex == DUPLEX_FULL ?
1870 "full" : "half"));
1871
1872 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874 "on" : "off",
1875 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1876 "on" : "off");
1877
1878 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879 netdev_info(tp->dev, "EEE is %s\n",
1880 tp->setlpicnt ? "enabled" : "disabled");
1881
1882 tg3_ump_link_report(tp);
1883 }
1884
1885 tp->link_up = netif_carrier_ok(tp->dev);
1886 }
1887
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1889 {
1890 u32 flowctrl = 0;
1891
1892 if (adv & ADVERTISE_PAUSE_CAP) {
1893 flowctrl |= FLOW_CTRL_RX;
1894 if (!(adv & ADVERTISE_PAUSE_ASYM))
1895 flowctrl |= FLOW_CTRL_TX;
1896 } else if (adv & ADVERTISE_PAUSE_ASYM)
1897 flowctrl |= FLOW_CTRL_TX;
1898
1899 return flowctrl;
1900 }
1901
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1903 {
1904 u16 miireg;
1905
1906 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907 miireg = ADVERTISE_1000XPAUSE;
1908 else if (flow_ctrl & FLOW_CTRL_TX)
1909 miireg = ADVERTISE_1000XPSE_ASYM;
1910 else if (flow_ctrl & FLOW_CTRL_RX)
1911 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1912 else
1913 miireg = 0;
1914
1915 return miireg;
1916 }
1917
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1919 {
1920 u32 flowctrl = 0;
1921
1922 if (adv & ADVERTISE_1000XPAUSE) {
1923 flowctrl |= FLOW_CTRL_RX;
1924 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925 flowctrl |= FLOW_CTRL_TX;
1926 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1927 flowctrl |= FLOW_CTRL_TX;
1928
1929 return flowctrl;
1930 }
1931
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1933 {
1934 u8 cap = 0;
1935
1936 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939 if (lcladv & ADVERTISE_1000XPAUSE)
1940 cap = FLOW_CTRL_RX;
1941 if (rmtadv & ADVERTISE_1000XPAUSE)
1942 cap = FLOW_CTRL_TX;
1943 }
1944
1945 return cap;
1946 }
1947
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1949 {
1950 u8 autoneg;
1951 u8 flowctrl = 0;
1952 u32 old_rx_mode = tp->rx_mode;
1953 u32 old_tx_mode = tp->tx_mode;
1954
1955 if (tg3_flag(tp, USE_PHYLIB))
1956 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1957 else
1958 autoneg = tp->link_config.autoneg;
1959
1960 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963 else
1964 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965 } else
1966 flowctrl = tp->link_config.flowctrl;
1967
1968 tp->link_config.active_flowctrl = flowctrl;
1969
1970 if (flowctrl & FLOW_CTRL_RX)
1971 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972 else
1973 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974
1975 if (old_rx_mode != tp->rx_mode)
1976 tw32_f(MAC_RX_MODE, tp->rx_mode);
1977
1978 if (flowctrl & FLOW_CTRL_TX)
1979 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980 else
1981 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982
1983 if (old_tx_mode != tp->tx_mode)
1984 tw32_f(MAC_TX_MODE, tp->tx_mode);
1985 }
1986
1987 static void tg3_adjust_link(struct net_device *dev)
1988 {
1989 u8 oldflowctrl, linkmesg = 0;
1990 u32 mac_mode, lcl_adv, rmt_adv;
1991 struct tg3 *tp = netdev_priv(dev);
1992 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1993
1994 spin_lock_bh(&tp->lock);
1995
1996 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997 MAC_MODE_HALF_DUPLEX);
1998
1999 oldflowctrl = tp->link_config.active_flowctrl;
2000
2001 if (phydev->link) {
2002 lcl_adv = 0;
2003 rmt_adv = 0;
2004
2005 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006 mac_mode |= MAC_MODE_PORT_MODE_MII;
2007 else if (phydev->speed == SPEED_1000 ||
2008 tg3_asic_rev(tp) != ASIC_REV_5785)
2009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010 else
2011 mac_mode |= MAC_MODE_PORT_MODE_MII;
2012
2013 if (phydev->duplex == DUPLEX_HALF)
2014 mac_mode |= MAC_MODE_HALF_DUPLEX;
2015 else {
2016 lcl_adv = mii_advertise_flowctrl(
2017 tp->link_config.flowctrl);
2018
2019 if (phydev->pause)
2020 rmt_adv = LPA_PAUSE_CAP;
2021 if (phydev->asym_pause)
2022 rmt_adv |= LPA_PAUSE_ASYM;
2023 }
2024
2025 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026 } else
2027 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028
2029 if (mac_mode != tp->mac_mode) {
2030 tp->mac_mode = mac_mode;
2031 tw32_f(MAC_MODE, tp->mac_mode);
2032 udelay(40);
2033 }
2034
2035 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036 if (phydev->speed == SPEED_10)
2037 tw32(MAC_MI_STAT,
2038 MAC_MI_STAT_10MBPS_MODE |
2039 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040 else
2041 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2042 }
2043
2044 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045 tw32(MAC_TX_LENGTHS,
2046 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047 (6 << TX_LENGTHS_IPG_SHIFT) |
2048 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049 else
2050 tw32(MAC_TX_LENGTHS,
2051 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052 (6 << TX_LENGTHS_IPG_SHIFT) |
2053 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054
2055 if (phydev->link != tp->old_link ||
2056 phydev->speed != tp->link_config.active_speed ||
2057 phydev->duplex != tp->link_config.active_duplex ||
2058 oldflowctrl != tp->link_config.active_flowctrl)
2059 linkmesg = 1;
2060
2061 tp->old_link = phydev->link;
2062 tp->link_config.active_speed = phydev->speed;
2063 tp->link_config.active_duplex = phydev->duplex;
2064
2065 spin_unlock_bh(&tp->lock);
2066
2067 if (linkmesg)
2068 tg3_link_report(tp);
2069 }
2070
2071 static int tg3_phy_init(struct tg3 *tp)
2072 {
2073 struct phy_device *phydev;
2074
2075 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2076 return 0;
2077
2078 /* Bring the PHY back to a known state. */
2079 tg3_bmcr_reset(tp);
2080
2081 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2082
2083 /* Attach the MAC to the PHY. */
2084 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085 tg3_adjust_link, phydev->interface);
2086 if (IS_ERR(phydev)) {
2087 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088 return PTR_ERR(phydev);
2089 }
2090
2091 /* Mask with MAC supported features. */
2092 switch (phydev->interface) {
2093 case PHY_INTERFACE_MODE_GMII:
2094 case PHY_INTERFACE_MODE_RGMII:
2095 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096 phydev->supported &= (PHY_GBIT_FEATURES |
2097 SUPPORTED_Pause |
2098 SUPPORTED_Asym_Pause);
2099 break;
2100 }
2101 /* fallthru */
2102 case PHY_INTERFACE_MODE_MII:
2103 phydev->supported &= (PHY_BASIC_FEATURES |
2104 SUPPORTED_Pause |
2105 SUPPORTED_Asym_Pause);
2106 break;
2107 default:
2108 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2109 return -EINVAL;
2110 }
2111
2112 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2113
2114 phydev->advertising = phydev->supported;
2115
2116 return 0;
2117 }
2118
2119 static void tg3_phy_start(struct tg3 *tp)
2120 {
2121 struct phy_device *phydev;
2122
2123 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2124 return;
2125
2126 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2127
2128 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130 phydev->speed = tp->link_config.speed;
2131 phydev->duplex = tp->link_config.duplex;
2132 phydev->autoneg = tp->link_config.autoneg;
2133 phydev->advertising = tp->link_config.advertising;
2134 }
2135
2136 phy_start(phydev);
2137
2138 phy_start_aneg(phydev);
2139 }
2140
2141 static void tg3_phy_stop(struct tg3 *tp)
2142 {
2143 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 return;
2145
2146 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2147 }
2148
2149 static void tg3_phy_fini(struct tg3 *tp)
2150 {
2151 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2154 }
2155 }
2156
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2158 {
2159 int err;
2160 u32 val;
2161
2162 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163 return 0;
2164
2165 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166 /* Cannot do read-modify-write on 5401 */
2167 err = tg3_phy_auxctl_write(tp,
2168 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2170 0x4c20);
2171 goto done;
2172 }
2173
2174 err = tg3_phy_auxctl_read(tp,
2175 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2176 if (err)
2177 return err;
2178
2179 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180 err = tg3_phy_auxctl_write(tp,
2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2182
2183 done:
2184 return err;
2185 }
2186
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2188 {
2189 u32 phytest;
2190
2191 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192 u32 phy;
2193
2194 tg3_writephy(tp, MII_TG3_FET_TEST,
2195 phytest | MII_TG3_FET_SHADOW_EN);
2196 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2197 if (enable)
2198 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199 else
2200 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2202 }
2203 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2204 }
2205 }
2206
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2208 {
2209 u32 reg;
2210
2211 if (!tg3_flag(tp, 5705_PLUS) ||
2212 (tg3_flag(tp, 5717_PLUS) &&
2213 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214 return;
2215
2216 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217 tg3_phy_fet_toggle_apd(tp, enable);
2218 return;
2219 }
2220
2221 reg = MII_TG3_MISC_SHDW_WREN |
2222 MII_TG3_MISC_SHDW_SCR5_SEL |
2223 MII_TG3_MISC_SHDW_SCR5_LPED |
2224 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225 MII_TG3_MISC_SHDW_SCR5_SDTL |
2226 MII_TG3_MISC_SHDW_SCR5_C125OE;
2227 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2229
2230 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231
2232
2233 reg = MII_TG3_MISC_SHDW_WREN |
2234 MII_TG3_MISC_SHDW_APD_SEL |
2235 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236 if (enable)
2237 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238
2239 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2240 }
2241
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2243 {
2244 u32 phy;
2245
2246 if (!tg3_flag(tp, 5705_PLUS) ||
2247 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2248 return;
2249
2250 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2251 u32 ephy;
2252
2253 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2255
2256 tg3_writephy(tp, MII_TG3_FET_TEST,
2257 ephy | MII_TG3_FET_SHADOW_EN);
2258 if (!tg3_readphy(tp, reg, &phy)) {
2259 if (enable)
2260 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2261 else
2262 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263 tg3_writephy(tp, reg, phy);
2264 }
2265 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2266 }
2267 } else {
2268 int ret;
2269
2270 ret = tg3_phy_auxctl_read(tp,
2271 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2272 if (!ret) {
2273 if (enable)
2274 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2275 else
2276 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277 tg3_phy_auxctl_write(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2279 }
2280 }
2281 }
2282
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2284 {
2285 int ret;
2286 u32 val;
2287
2288 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2289 return;
2290
2291 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2292 if (!ret)
2293 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2295 }
2296
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2298 {
2299 u32 otp, phy;
2300
2301 if (!tp->phy_otp)
2302 return;
2303
2304 otp = tp->phy_otp;
2305
2306 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2307 return;
2308
2309 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2312
2313 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2316
2317 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2320
2321 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2323
2324 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2326
2327 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2330
2331 tg3_phy_toggle_auxctl_smdsp(tp, false);
2332 }
2333
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335 {
2336 u32 val;
2337 struct ethtool_eee *dest = &tp->eee;
2338
2339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340 return;
2341
2342 if (eee)
2343 dest = eee;
2344
2345 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346 return;
2347
2348 /* Pull eee_active */
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351 dest->eee_active = 1;
2352 } else
2353 dest->eee_active = 0;
2354
2355 /* Pull lp advertised settings */
2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357 return;
2358 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360 /* Pull advertised and eee_enabled settings */
2361 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362 return;
2363 dest->eee_enabled = !!val;
2364 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366 /* Pull tx_lpi_enabled */
2367 val = tr32(TG3_CPMU_EEE_MODE);
2368 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369
2370 /* Pull lpi timer value */
2371 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372 }
2373
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2375 {
2376 u32 val;
2377
2378 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2379 return;
2380
2381 tp->setlpicnt = 0;
2382
2383 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2384 current_link_up &&
2385 tp->link_config.active_duplex == DUPLEX_FULL &&
2386 (tp->link_config.active_speed == SPEED_100 ||
2387 tp->link_config.active_speed == SPEED_1000)) {
2388 u32 eeectl;
2389
2390 if (tp->link_config.active_speed == SPEED_1000)
2391 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2392 else
2393 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2394
2395 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2396
2397 tg3_eee_pull_config(tp, NULL);
2398 if (tp->eee.eee_active)
2399 tp->setlpicnt = 2;
2400 }
2401
2402 if (!tp->setlpicnt) {
2403 if (current_link_up &&
2404 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406 tg3_phy_toggle_auxctl_smdsp(tp, false);
2407 }
2408
2409 val = tr32(TG3_CPMU_EEE_MODE);
2410 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2411 }
2412 }
2413
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2415 {
2416 u32 val;
2417
2418 if (tp->link_config.active_speed == SPEED_1000 &&
2419 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421 tg3_flag(tp, 57765_CLASS)) &&
2422 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 val = MII_TG3_DSP_TAP26_ALNOKO |
2424 MII_TG3_DSP_TAP26_RMRXSTO;
2425 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426 tg3_phy_toggle_auxctl_smdsp(tp, false);
2427 }
2428
2429 val = tr32(TG3_CPMU_EEE_MODE);
2430 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2431 }
2432
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2434 {
2435 int limit = 100;
2436
2437 while (limit--) {
2438 u32 tmp32;
2439
2440 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441 if ((tmp32 & 0x1000) == 0)
2442 break;
2443 }
2444 }
2445 if (limit < 0)
2446 return -EBUSY;
2447
2448 return 0;
2449 }
2450
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2452 {
2453 static const u32 test_pat[4][6] = {
2454 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2458 };
2459 int chan;
2460
2461 for (chan = 0; chan < 4; chan++) {
2462 int i;
2463
2464 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465 (chan * 0x2000) | 0x0200);
2466 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2467
2468 for (i = 0; i < 6; i++)
2469 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2470 test_pat[chan][i]);
2471
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473 if (tg3_wait_macro_done(tp)) {
2474 *resetp = 1;
2475 return -EBUSY;
2476 }
2477
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 (chan * 0x2000) | 0x0200);
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481 if (tg3_wait_macro_done(tp)) {
2482 *resetp = 1;
2483 return -EBUSY;
2484 }
2485
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487 if (tg3_wait_macro_done(tp)) {
2488 *resetp = 1;
2489 return -EBUSY;
2490 }
2491
2492 for (i = 0; i < 6; i += 2) {
2493 u32 low, high;
2494
2495 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497 tg3_wait_macro_done(tp)) {
2498 *resetp = 1;
2499 return -EBUSY;
2500 }
2501 low &= 0x7fff;
2502 high &= 0x000f;
2503 if (low != test_pat[chan][i] ||
2504 high != test_pat[chan][i+1]) {
2505 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508
2509 return -EBUSY;
2510 }
2511 }
2512 }
2513
2514 return 0;
2515 }
2516
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2518 {
2519 int chan;
2520
2521 for (chan = 0; chan < 4; chan++) {
2522 int i;
2523
2524 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525 (chan * 0x2000) | 0x0200);
2526 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527 for (i = 0; i < 6; i++)
2528 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530 if (tg3_wait_macro_done(tp))
2531 return -EBUSY;
2532 }
2533
2534 return 0;
2535 }
2536
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2538 {
2539 u32 reg32, phy9_orig;
2540 int retries, do_phy_reset, err;
2541
2542 retries = 10;
2543 do_phy_reset = 1;
2544 do {
2545 if (do_phy_reset) {
2546 err = tg3_bmcr_reset(tp);
2547 if (err)
2548 return err;
2549 do_phy_reset = 0;
2550 }
2551
2552 /* Disable transmitter and interrupt. */
2553 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2554 continue;
2555
2556 reg32 |= 0x3000;
2557 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2558
2559 /* Set full-duplex, 1000 mbps. */
2560 tg3_writephy(tp, MII_BMCR,
2561 BMCR_FULLDPLX | BMCR_SPEED1000);
2562
2563 /* Set to master mode. */
2564 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2565 continue;
2566
2567 tg3_writephy(tp, MII_CTRL1000,
2568 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2569
2570 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2571 if (err)
2572 return err;
2573
2574 /* Block the PHY control access. */
2575 tg3_phydsp_write(tp, 0x8005, 0x0800);
2576
2577 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2578 if (!err)
2579 break;
2580 } while (--retries);
2581
2582 err = tg3_phy_reset_chanpat(tp);
2583 if (err)
2584 return err;
2585
2586 tg3_phydsp_write(tp, 0x8005, 0x0000);
2587
2588 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2590
2591 tg3_phy_toggle_auxctl_smdsp(tp, false);
2592
2593 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2594
2595 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2596 reg32 &= ~0x3000;
2597 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2598 } else if (!err)
2599 err = -EBUSY;
2600
2601 return err;
2602 }
2603
2604 static void tg3_carrier_off(struct tg3 *tp)
2605 {
2606 netif_carrier_off(tp->dev);
2607 tp->link_up = false;
2608 }
2609
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2611 {
2612 if (tg3_flag(tp, ENABLE_ASF))
2613 netdev_warn(tp->dev,
2614 "Management side-band traffic will be interrupted during phy settings change\n");
2615 }
2616
2617 /* This will reset the tigon3 PHY if there is no valid
2618 * link unless the FORCE argument is non-zero.
2619 */
2620 static int tg3_phy_reset(struct tg3 *tp)
2621 {
2622 u32 val, cpmuctrl;
2623 int err;
2624
2625 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626 val = tr32(GRC_MISC_CFG);
2627 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2628 udelay(40);
2629 }
2630 err = tg3_readphy(tp, MII_BMSR, &val);
2631 err |= tg3_readphy(tp, MII_BMSR, &val);
2632 if (err != 0)
2633 return -EBUSY;
2634
2635 if (netif_running(tp->dev) && tp->link_up) {
2636 netif_carrier_off(tp->dev);
2637 tg3_link_report(tp);
2638 }
2639
2640 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642 tg3_asic_rev(tp) == ASIC_REV_5705) {
2643 err = tg3_phy_reset_5703_4_5(tp);
2644 if (err)
2645 return err;
2646 goto out;
2647 }
2648
2649 cpmuctrl = 0;
2650 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652 cpmuctrl = tr32(TG3_CPMU_CTRL);
2653 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2654 tw32(TG3_CPMU_CTRL,
2655 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2656 }
2657
2658 err = tg3_bmcr_reset(tp);
2659 if (err)
2660 return err;
2661
2662 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2665
2666 tw32(TG3_CPMU_CTRL, cpmuctrl);
2667 }
2668
2669 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673 CPMU_LSPD_1000MB_MACCLK_12_5) {
2674 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2675 udelay(40);
2676 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2677 }
2678 }
2679
2680 if (tg3_flag(tp, 5717_PLUS) &&
2681 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2682 return 0;
2683
2684 tg3_phy_apply_otp(tp);
2685
2686 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687 tg3_phy_toggle_apd(tp, true);
2688 else
2689 tg3_phy_toggle_apd(tp, false);
2690
2691 out:
2692 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695 tg3_phydsp_write(tp, 0x000a, 0x0323);
2696 tg3_phy_toggle_auxctl_smdsp(tp, false);
2697 }
2698
2699 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2702 }
2703
2704 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 tg3_phydsp_write(tp, 0x000a, 0x310b);
2707 tg3_phydsp_write(tp, 0x201f, 0x9506);
2708 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709 tg3_phy_toggle_auxctl_smdsp(tp, false);
2710 }
2711 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716 tg3_writephy(tp, MII_TG3_TEST1,
2717 MII_TG3_TEST1_TRIM_EN | 0x4);
2718 } else
2719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2720
2721 tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 }
2723 }
2724
2725 /* Set Extended packet length bit (bit 14) on all chips that */
2726 /* support jumbo frames */
2727 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728 /* Cannot do read-modify-write on 5401 */
2729 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731 /* Set bit 14 with read-modify-write to preserve other bits */
2732 err = tg3_phy_auxctl_read(tp,
2733 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2734 if (!err)
2735 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2737 }
2738
2739 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740 * jumbo frames transmission.
2741 */
2742 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2746 }
2747
2748 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749 /* adjust output voltage */
2750 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2751 }
2752
2753 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754 tg3_phydsp_write(tp, 0xffb, 0x4000);
2755
2756 tg3_phy_toggle_automdix(tp, true);
2757 tg3_phy_set_wirespeed(tp);
2758 return 0;
2759 }
2760
2761 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2763 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2764 TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769 (TG3_GPIO_MSG_DRVR_PRES << 12))
2770
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775 (TG3_GPIO_MSG_NEED_VAUX << 12))
2776
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2778 {
2779 u32 status, shift;
2780
2781 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782 tg3_asic_rev(tp) == ASIC_REV_5719)
2783 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2784 else
2785 status = tr32(TG3_CPMU_DRV_STATUS);
2786
2787 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788 status &= ~(TG3_GPIO_MSG_MASK << shift);
2789 status |= (newstat << shift);
2790
2791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 tg3_asic_rev(tp) == ASIC_REV_5719)
2793 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2794 else
2795 tw32(TG3_CPMU_DRV_STATUS, status);
2796
2797 return status >> TG3_APE_GPIO_MSG_SHIFT;
2798 }
2799
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2801 {
2802 if (!tg3_flag(tp, IS_NIC))
2803 return 0;
2804
2805 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5720) {
2808 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2809 return -EIO;
2810
2811 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2812
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815
2816 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2817 } else {
2818 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 }
2821
2822 return 0;
2823 }
2824
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2826 {
2827 u32 grc_local_ctrl;
2828
2829 if (!tg3_flag(tp, IS_NIC) ||
2830 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831 tg3_asic_rev(tp) == ASIC_REV_5701)
2832 return;
2833
2834 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2835
2836 tw32_wait_f(GRC_LOCAL_CTRL,
2837 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2839
2840 tw32_wait_f(GRC_LOCAL_CTRL,
2841 grc_local_ctrl,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 }
2848
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2850 {
2851 if (!tg3_flag(tp, IS_NIC))
2852 return;
2853
2854 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 tg3_asic_rev(tp) == ASIC_REV_5701) {
2856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857 (GRC_LCLCTRL_GPIO_OE0 |
2858 GRC_LCLCTRL_GPIO_OE1 |
2859 GRC_LCLCTRL_GPIO_OE2 |
2860 GRC_LCLCTRL_GPIO_OUTPUT0 |
2861 GRC_LCLCTRL_GPIO_OUTPUT1),
2862 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867 GRC_LCLCTRL_GPIO_OE1 |
2868 GRC_LCLCTRL_GPIO_OE2 |
2869 GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 GRC_LCLCTRL_GPIO_OUTPUT1 |
2871 tp->grc_local_ctrl;
2872 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874
2875 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877 TG3_GRC_LCLCTL_PWRSW_DELAY);
2878
2879 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 } else {
2883 u32 no_gpio2;
2884 u32 grc_local_ctrl = 0;
2885
2886 /* Workaround to prevent overdrawing Amps. */
2887 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2890 grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 }
2893
2894 /* On 5753 and variants, GPIO2 cannot be used. */
2895 no_gpio2 = tp->nic_sram_data_cfg &
2896 NIC_SRAM_DATA_CFG_NO_GPIO2;
2897
2898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899 GRC_LCLCTRL_GPIO_OE1 |
2900 GRC_LCLCTRL_GPIO_OE2 |
2901 GRC_LCLCTRL_GPIO_OUTPUT1 |
2902 GRC_LCLCTRL_GPIO_OUTPUT2;
2903 if (no_gpio2) {
2904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905 GRC_LCLCTRL_GPIO_OUTPUT2);
2906 }
2907 tw32_wait_f(GRC_LOCAL_CTRL,
2908 tp->grc_local_ctrl | grc_local_ctrl,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY);
2910
2911 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2912
2913 tw32_wait_f(GRC_LOCAL_CTRL,
2914 tp->grc_local_ctrl | grc_local_ctrl,
2915 TG3_GRC_LCLCTL_PWRSW_DELAY);
2916
2917 if (!no_gpio2) {
2918 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919 tw32_wait_f(GRC_LOCAL_CTRL,
2920 tp->grc_local_ctrl | grc_local_ctrl,
2921 TG3_GRC_LCLCTL_PWRSW_DELAY);
2922 }
2923 }
2924 }
2925
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2927 {
2928 u32 msg = 0;
2929
2930 /* Serialize power state transitions */
2931 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2932 return;
2933
2934 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935 msg = TG3_GPIO_MSG_NEED_VAUX;
2936
2937 msg = tg3_set_function_status(tp, msg);
2938
2939 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2940 goto done;
2941
2942 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943 tg3_pwrsrc_switch_to_vaux(tp);
2944 else
2945 tg3_pwrsrc_die_with_vmain(tp);
2946
2947 done:
2948 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2949 }
2950
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2952 {
2953 bool need_vaux = false;
2954
2955 /* The GPIOs do something completely different on 57765. */
2956 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2957 return;
2958
2959 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961 tg3_asic_rev(tp) == ASIC_REV_5720) {
2962 tg3_frob_aux_power_5717(tp, include_wol ?
2963 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2964 return;
2965 }
2966
2967 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968 struct net_device *dev_peer;
2969
2970 dev_peer = pci_get_drvdata(tp->pdev_peer);
2971
2972 /* remove_one() may have been run on the peer. */
2973 if (dev_peer) {
2974 struct tg3 *tp_peer = netdev_priv(dev_peer);
2975
2976 if (tg3_flag(tp_peer, INIT_COMPLETE))
2977 return;
2978
2979 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980 tg3_flag(tp_peer, ENABLE_ASF))
2981 need_vaux = true;
2982 }
2983 }
2984
2985 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986 tg3_flag(tp, ENABLE_ASF))
2987 need_vaux = true;
2988
2989 if (need_vaux)
2990 tg3_pwrsrc_switch_to_vaux(tp);
2991 else
2992 tg3_pwrsrc_die_with_vmain(tp);
2993 }
2994
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2996 {
2997 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2998 return 1;
2999 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000 if (speed != SPEED_10)
3001 return 1;
3002 } else if (speed == SPEED_10)
3003 return 1;
3004
3005 return 0;
3006 }
3007
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3009 {
3010 switch (tg3_asic_rev(tp)) {
3011 case ASIC_REV_5700:
3012 case ASIC_REV_5704:
3013 return true;
3014 case ASIC_REV_5780:
3015 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3016 return true;
3017 return false;
3018 case ASIC_REV_5717:
3019 if (!tp->pci_fn)
3020 return true;
3021 return false;
3022 case ASIC_REV_5719:
3023 case ASIC_REV_5720:
3024 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3025 !tp->pci_fn)
3026 return true;
3027 return false;
3028 }
3029
3030 return false;
3031 }
3032
3033 static bool tg3_phy_led_bug(struct tg3 *tp)
3034 {
3035 switch (tg3_asic_rev(tp)) {
3036 case ASIC_REV_5719:
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 !tp->pci_fn)
3039 return true;
3040 return false;
3041 }
3042
3043 return false;
3044 }
3045
3046 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3047 {
3048 u32 val;
3049
3050 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3051 return;
3052
3053 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3054 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3055 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3056 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3057
3058 sg_dig_ctrl |=
3059 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3060 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3061 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3062 }
3063 return;
3064 }
3065
3066 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3067 tg3_bmcr_reset(tp);
3068 val = tr32(GRC_MISC_CFG);
3069 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3070 udelay(40);
3071 return;
3072 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3073 u32 phytest;
3074 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3075 u32 phy;
3076
3077 tg3_writephy(tp, MII_ADVERTISE, 0);
3078 tg3_writephy(tp, MII_BMCR,
3079 BMCR_ANENABLE | BMCR_ANRESTART);
3080
3081 tg3_writephy(tp, MII_TG3_FET_TEST,
3082 phytest | MII_TG3_FET_SHADOW_EN);
3083 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3084 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3085 tg3_writephy(tp,
3086 MII_TG3_FET_SHDW_AUXMODE4,
3087 phy);
3088 }
3089 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3090 }
3091 return;
3092 } else if (do_low_power) {
3093 if (!tg3_phy_led_bug(tp))
3094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3095 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3096
3097 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3098 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3099 MII_TG3_AUXCTL_PCTL_VREG_11V;
3100 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3101 }
3102
3103 /* The PHY should not be powered down on some chips because
3104 * of bugs.
3105 */
3106 if (tg3_phy_power_bug(tp))
3107 return;
3108
3109 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3110 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3111 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3112 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3113 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3114 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3115 }
3116
3117 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3118 }
3119
3120 /* tp->lock is held. */
3121 static int tg3_nvram_lock(struct tg3 *tp)
3122 {
3123 if (tg3_flag(tp, NVRAM)) {
3124 int i;
3125
3126 if (tp->nvram_lock_cnt == 0) {
3127 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3128 for (i = 0; i < 8000; i++) {
3129 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3130 break;
3131 udelay(20);
3132 }
3133 if (i == 8000) {
3134 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3135 return -ENODEV;
3136 }
3137 }
3138 tp->nvram_lock_cnt++;
3139 }
3140 return 0;
3141 }
3142
3143 /* tp->lock is held. */
3144 static void tg3_nvram_unlock(struct tg3 *tp)
3145 {
3146 if (tg3_flag(tp, NVRAM)) {
3147 if (tp->nvram_lock_cnt > 0)
3148 tp->nvram_lock_cnt--;
3149 if (tp->nvram_lock_cnt == 0)
3150 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3151 }
3152 }
3153
3154 /* tp->lock is held. */
3155 static void tg3_enable_nvram_access(struct tg3 *tp)
3156 {
3157 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3158 u32 nvaccess = tr32(NVRAM_ACCESS);
3159
3160 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3161 }
3162 }
3163
3164 /* tp->lock is held. */
3165 static void tg3_disable_nvram_access(struct tg3 *tp)
3166 {
3167 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 u32 nvaccess = tr32(NVRAM_ACCESS);
3169
3170 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3171 }
3172 }
3173
3174 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3175 u32 offset, u32 *val)
3176 {
3177 u32 tmp;
3178 int i;
3179
3180 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3181 return -EINVAL;
3182
3183 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3184 EEPROM_ADDR_DEVID_MASK |
3185 EEPROM_ADDR_READ);
3186 tw32(GRC_EEPROM_ADDR,
3187 tmp |
3188 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3189 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3190 EEPROM_ADDR_ADDR_MASK) |
3191 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3192
3193 for (i = 0; i < 1000; i++) {
3194 tmp = tr32(GRC_EEPROM_ADDR);
3195
3196 if (tmp & EEPROM_ADDR_COMPLETE)
3197 break;
3198 msleep(1);
3199 }
3200 if (!(tmp & EEPROM_ADDR_COMPLETE))
3201 return -EBUSY;
3202
3203 tmp = tr32(GRC_EEPROM_DATA);
3204
3205 /*
3206 * The data will always be opposite the native endian
3207 * format. Perform a blind byteswap to compensate.
3208 */
3209 *val = swab32(tmp);
3210
3211 return 0;
3212 }
3213
3214 #define NVRAM_CMD_TIMEOUT 10000
3215
3216 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3217 {
3218 int i;
3219
3220 tw32(NVRAM_CMD, nvram_cmd);
3221 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3222 udelay(10);
3223 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3224 udelay(10);
3225 break;
3226 }
3227 }
3228
3229 if (i == NVRAM_CMD_TIMEOUT)
3230 return -EBUSY;
3231
3232 return 0;
3233 }
3234
3235 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3236 {
3237 if (tg3_flag(tp, NVRAM) &&
3238 tg3_flag(tp, NVRAM_BUFFERED) &&
3239 tg3_flag(tp, FLASH) &&
3240 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3241 (tp->nvram_jedecnum == JEDEC_ATMEL))
3242
3243 addr = ((addr / tp->nvram_pagesize) <<
3244 ATMEL_AT45DB0X1B_PAGE_POS) +
3245 (addr % tp->nvram_pagesize);
3246
3247 return addr;
3248 }
3249
3250 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3251 {
3252 if (tg3_flag(tp, NVRAM) &&
3253 tg3_flag(tp, NVRAM_BUFFERED) &&
3254 tg3_flag(tp, FLASH) &&
3255 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 (tp->nvram_jedecnum == JEDEC_ATMEL))
3257
3258 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3259 tp->nvram_pagesize) +
3260 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3261
3262 return addr;
3263 }
3264
3265 /* NOTE: Data read in from NVRAM is byteswapped according to
3266 * the byteswapping settings for all other register accesses.
3267 * tg3 devices are BE devices, so on a BE machine, the data
3268 * returned will be exactly as it is seen in NVRAM. On a LE
3269 * machine, the 32-bit value will be byteswapped.
3270 */
3271 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3272 {
3273 int ret;
3274
3275 if (!tg3_flag(tp, NVRAM))
3276 return tg3_nvram_read_using_eeprom(tp, offset, val);
3277
3278 offset = tg3_nvram_phys_addr(tp, offset);
3279
3280 if (offset > NVRAM_ADDR_MSK)
3281 return -EINVAL;
3282
3283 ret = tg3_nvram_lock(tp);
3284 if (ret)
3285 return ret;
3286
3287 tg3_enable_nvram_access(tp);
3288
3289 tw32(NVRAM_ADDR, offset);
3290 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3291 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3292
3293 if (ret == 0)
3294 *val = tr32(NVRAM_RDDATA);
3295
3296 tg3_disable_nvram_access(tp);
3297
3298 tg3_nvram_unlock(tp);
3299
3300 return ret;
3301 }
3302
3303 /* Ensures NVRAM data is in bytestream format. */
3304 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3305 {
3306 u32 v;
3307 int res = tg3_nvram_read(tp, offset, &v);
3308 if (!res)
3309 *val = cpu_to_be32(v);
3310 return res;
3311 }
3312
3313 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3314 u32 offset, u32 len, u8 *buf)
3315 {
3316 int i, j, rc = 0;
3317 u32 val;
3318
3319 for (i = 0; i < len; i += 4) {
3320 u32 addr;
3321 __be32 data;
3322
3323 addr = offset + i;
3324
3325 memcpy(&data, buf + i, 4);
3326
3327 /*
3328 * The SEEPROM interface expects the data to always be opposite
3329 * the native endian format. We accomplish this by reversing
3330 * all the operations that would have been performed on the
3331 * data from a call to tg3_nvram_read_be32().
3332 */
3333 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3334
3335 val = tr32(GRC_EEPROM_ADDR);
3336 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3337
3338 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3339 EEPROM_ADDR_READ);
3340 tw32(GRC_EEPROM_ADDR, val |
3341 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3342 (addr & EEPROM_ADDR_ADDR_MASK) |
3343 EEPROM_ADDR_START |
3344 EEPROM_ADDR_WRITE);
3345
3346 for (j = 0; j < 1000; j++) {
3347 val = tr32(GRC_EEPROM_ADDR);
3348
3349 if (val & EEPROM_ADDR_COMPLETE)
3350 break;
3351 msleep(1);
3352 }
3353 if (!(val & EEPROM_ADDR_COMPLETE)) {
3354 rc = -EBUSY;
3355 break;
3356 }
3357 }
3358
3359 return rc;
3360 }
3361
3362 /* offset and length are dword aligned */
3363 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3364 u8 *buf)
3365 {
3366 int ret = 0;
3367 u32 pagesize = tp->nvram_pagesize;
3368 u32 pagemask = pagesize - 1;
3369 u32 nvram_cmd;
3370 u8 *tmp;
3371
3372 tmp = kmalloc(pagesize, GFP_KERNEL);
3373 if (tmp == NULL)
3374 return -ENOMEM;
3375
3376 while (len) {
3377 int j;
3378 u32 phy_addr, page_off, size;
3379
3380 phy_addr = offset & ~pagemask;
3381
3382 for (j = 0; j < pagesize; j += 4) {
3383 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3384 (__be32 *) (tmp + j));
3385 if (ret)
3386 break;
3387 }
3388 if (ret)
3389 break;
3390
3391 page_off = offset & pagemask;
3392 size = pagesize;
3393 if (len < size)
3394 size = len;
3395
3396 len -= size;
3397
3398 memcpy(tmp + page_off, buf, size);
3399
3400 offset = offset + (pagesize - page_off);
3401
3402 tg3_enable_nvram_access(tp);
3403
3404 /*
3405 * Before we can erase the flash page, we need
3406 * to issue a special "write enable" command.
3407 */
3408 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3409
3410 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3411 break;
3412
3413 /* Erase the target page */
3414 tw32(NVRAM_ADDR, phy_addr);
3415
3416 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3417 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3418
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 break;
3421
3422 /* Issue another write enable to start the write. */
3423 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3424
3425 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426 break;
3427
3428 for (j = 0; j < pagesize; j += 4) {
3429 __be32 data;
3430
3431 data = *((__be32 *) (tmp + j));
3432
3433 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3434
3435 tw32(NVRAM_ADDR, phy_addr + j);
3436
3437 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3438 NVRAM_CMD_WR;
3439
3440 if (j == 0)
3441 nvram_cmd |= NVRAM_CMD_FIRST;
3442 else if (j == (pagesize - 4))
3443 nvram_cmd |= NVRAM_CMD_LAST;
3444
3445 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3446 if (ret)
3447 break;
3448 }
3449 if (ret)
3450 break;
3451 }
3452
3453 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3454 tg3_nvram_exec_cmd(tp, nvram_cmd);
3455
3456 kfree(tmp);
3457
3458 return ret;
3459 }
3460
3461 /* offset and length are dword aligned */
3462 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3463 u8 *buf)
3464 {
3465 int i, ret = 0;
3466
3467 for (i = 0; i < len; i += 4, offset += 4) {
3468 u32 page_off, phy_addr, nvram_cmd;
3469 __be32 data;
3470
3471 memcpy(&data, buf + i, 4);
3472 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3473
3474 page_off = offset % tp->nvram_pagesize;
3475
3476 phy_addr = tg3_nvram_phys_addr(tp, offset);
3477
3478 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3479
3480 if (page_off == 0 || i == 0)
3481 nvram_cmd |= NVRAM_CMD_FIRST;
3482 if (page_off == (tp->nvram_pagesize - 4))
3483 nvram_cmd |= NVRAM_CMD_LAST;
3484
3485 if (i == (len - 4))
3486 nvram_cmd |= NVRAM_CMD_LAST;
3487
3488 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3489 !tg3_flag(tp, FLASH) ||
3490 !tg3_flag(tp, 57765_PLUS))
3491 tw32(NVRAM_ADDR, phy_addr);
3492
3493 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3494 !tg3_flag(tp, 5755_PLUS) &&
3495 (tp->nvram_jedecnum == JEDEC_ST) &&
3496 (nvram_cmd & NVRAM_CMD_FIRST)) {
3497 u32 cmd;
3498
3499 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3500 ret = tg3_nvram_exec_cmd(tp, cmd);
3501 if (ret)
3502 break;
3503 }
3504 if (!tg3_flag(tp, FLASH)) {
3505 /* We always do complete word writes to eeprom. */
3506 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3507 }
3508
3509 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3510 if (ret)
3511 break;
3512 }
3513 return ret;
3514 }
3515
3516 /* offset and length are dword aligned */
3517 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3518 {
3519 int ret;
3520
3521 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3522 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3523 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3524 udelay(40);
3525 }
3526
3527 if (!tg3_flag(tp, NVRAM)) {
3528 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3529 } else {
3530 u32 grc_mode;
3531
3532 ret = tg3_nvram_lock(tp);
3533 if (ret)
3534 return ret;
3535
3536 tg3_enable_nvram_access(tp);
3537 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3538 tw32(NVRAM_WRITE1, 0x406);
3539
3540 grc_mode = tr32(GRC_MODE);
3541 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3542
3543 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3544 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3545 buf);
3546 } else {
3547 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3548 buf);
3549 }
3550
3551 grc_mode = tr32(GRC_MODE);
3552 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3553
3554 tg3_disable_nvram_access(tp);
3555 tg3_nvram_unlock(tp);
3556 }
3557
3558 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3559 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3560 udelay(40);
3561 }
3562
3563 return ret;
3564 }
3565
3566 #define RX_CPU_SCRATCH_BASE 0x30000
3567 #define RX_CPU_SCRATCH_SIZE 0x04000
3568 #define TX_CPU_SCRATCH_BASE 0x34000
3569 #define TX_CPU_SCRATCH_SIZE 0x04000
3570
3571 /* tp->lock is held. */
3572 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3573 {
3574 int i;
3575 const int iters = 10000;
3576
3577 for (i = 0; i < iters; i++) {
3578 tw32(cpu_base + CPU_STATE, 0xffffffff);
3579 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3580 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3581 break;
3582 if (pci_channel_offline(tp->pdev))
3583 return -EBUSY;
3584 }
3585
3586 return (i == iters) ? -EBUSY : 0;
3587 }
3588
3589 /* tp->lock is held. */
3590 static int tg3_rxcpu_pause(struct tg3 *tp)
3591 {
3592 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3593
3594 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3595 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3596 udelay(10);
3597
3598 return rc;
3599 }
3600
3601 /* tp->lock is held. */
3602 static int tg3_txcpu_pause(struct tg3 *tp)
3603 {
3604 return tg3_pause_cpu(tp, TX_CPU_BASE);
3605 }
3606
3607 /* tp->lock is held. */
3608 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3609 {
3610 tw32(cpu_base + CPU_STATE, 0xffffffff);
3611 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3612 }
3613
3614 /* tp->lock is held. */
3615 static void tg3_rxcpu_resume(struct tg3 *tp)
3616 {
3617 tg3_resume_cpu(tp, RX_CPU_BASE);
3618 }
3619
3620 /* tp->lock is held. */
3621 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3622 {
3623 int rc;
3624
3625 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3626
3627 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3628 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3629
3630 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3631 return 0;
3632 }
3633 if (cpu_base == RX_CPU_BASE) {
3634 rc = tg3_rxcpu_pause(tp);
3635 } else {
3636 /*
3637 * There is only an Rx CPU for the 5750 derivative in the
3638 * BCM4785.
3639 */
3640 if (tg3_flag(tp, IS_SSB_CORE))
3641 return 0;
3642
3643 rc = tg3_txcpu_pause(tp);
3644 }
3645
3646 if (rc) {
3647 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3648 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3649 return -ENODEV;
3650 }
3651
3652 /* Clear firmware's nvram arbitration. */
3653 if (tg3_flag(tp, NVRAM))
3654 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3655 return 0;
3656 }
3657
3658 static int tg3_fw_data_len(struct tg3 *tp,
3659 const struct tg3_firmware_hdr *fw_hdr)
3660 {
3661 int fw_len;
3662
3663 /* Non fragmented firmware have one firmware header followed by a
3664 * contiguous chunk of data to be written. The length field in that
3665 * header is not the length of data to be written but the complete
3666 * length of the bss. The data length is determined based on
3667 * tp->fw->size minus headers.
3668 *
3669 * Fragmented firmware have a main header followed by multiple
3670 * fragments. Each fragment is identical to non fragmented firmware
3671 * with a firmware header followed by a contiguous chunk of data. In
3672 * the main header, the length field is unused and set to 0xffffffff.
3673 * In each fragment header the length is the entire size of that
3674 * fragment i.e. fragment data + header length. Data length is
3675 * therefore length field in the header minus TG3_FW_HDR_LEN.
3676 */
3677 if (tp->fw_len == 0xffffffff)
3678 fw_len = be32_to_cpu(fw_hdr->len);
3679 else
3680 fw_len = tp->fw->size;
3681
3682 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3683 }
3684
3685 /* tp->lock is held. */
3686 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3687 u32 cpu_scratch_base, int cpu_scratch_size,
3688 const struct tg3_firmware_hdr *fw_hdr)
3689 {
3690 int err, i;
3691 void (*write_op)(struct tg3 *, u32, u32);
3692 int total_len = tp->fw->size;
3693
3694 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3695 netdev_err(tp->dev,
3696 "%s: Trying to load TX cpu firmware which is 5705\n",
3697 __func__);
3698 return -EINVAL;
3699 }
3700
3701 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3702 write_op = tg3_write_mem;
3703 else
3704 write_op = tg3_write_indirect_reg32;
3705
3706 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3707 /* It is possible that bootcode is still loading at this point.
3708 * Get the nvram lock first before halting the cpu.
3709 */
3710 int lock_err = tg3_nvram_lock(tp);
3711 err = tg3_halt_cpu(tp, cpu_base);
3712 if (!lock_err)
3713 tg3_nvram_unlock(tp);
3714 if (err)
3715 goto out;
3716
3717 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3718 write_op(tp, cpu_scratch_base + i, 0);
3719 tw32(cpu_base + CPU_STATE, 0xffffffff);
3720 tw32(cpu_base + CPU_MODE,
3721 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3722 } else {
3723 /* Subtract additional main header for fragmented firmware and
3724 * advance to the first fragment
3725 */
3726 total_len -= TG3_FW_HDR_LEN;
3727 fw_hdr++;
3728 }
3729
3730 do {
3731 u32 *fw_data = (u32 *)(fw_hdr + 1);
3732 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3733 write_op(tp, cpu_scratch_base +
3734 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3735 (i * sizeof(u32)),
3736 be32_to_cpu(fw_data[i]));
3737
3738 total_len -= be32_to_cpu(fw_hdr->len);
3739
3740 /* Advance to next fragment */
3741 fw_hdr = (struct tg3_firmware_hdr *)
3742 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3743 } while (total_len > 0);
3744
3745 err = 0;
3746
3747 out:
3748 return err;
3749 }
3750
3751 /* tp->lock is held. */
3752 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3753 {
3754 int i;
3755 const int iters = 5;
3756
3757 tw32(cpu_base + CPU_STATE, 0xffffffff);
3758 tw32_f(cpu_base + CPU_PC, pc);
3759
3760 for (i = 0; i < iters; i++) {
3761 if (tr32(cpu_base + CPU_PC) == pc)
3762 break;
3763 tw32(cpu_base + CPU_STATE, 0xffffffff);
3764 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3765 tw32_f(cpu_base + CPU_PC, pc);
3766 udelay(1000);
3767 }
3768
3769 return (i == iters) ? -EBUSY : 0;
3770 }
3771
3772 /* tp->lock is held. */
3773 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3774 {
3775 const struct tg3_firmware_hdr *fw_hdr;
3776 int err;
3777
3778 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3779
3780 /* Firmware blob starts with version numbers, followed by
3781 start address and length. We are setting complete length.
3782 length = end_address_of_bss - start_address_of_text.
3783 Remainder is the blob to be loaded contiguously
3784 from start address. */
3785
3786 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3787 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3788 fw_hdr);
3789 if (err)
3790 return err;
3791
3792 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3793 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3794 fw_hdr);
3795 if (err)
3796 return err;
3797
3798 /* Now startup only the RX cpu. */
3799 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3800 be32_to_cpu(fw_hdr->base_addr));
3801 if (err) {
3802 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3803 "should be %08x\n", __func__,
3804 tr32(RX_CPU_BASE + CPU_PC),
3805 be32_to_cpu(fw_hdr->base_addr));
3806 return -ENODEV;
3807 }
3808
3809 tg3_rxcpu_resume(tp);
3810
3811 return 0;
3812 }
3813
3814 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3815 {
3816 const int iters = 1000;
3817 int i;
3818 u32 val;
3819
3820 /* Wait for boot code to complete initialization and enter service
3821 * loop. It is then safe to download service patches
3822 */
3823 for (i = 0; i < iters; i++) {
3824 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3825 break;
3826
3827 udelay(10);
3828 }
3829
3830 if (i == iters) {
3831 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3832 return -EBUSY;
3833 }
3834
3835 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3836 if (val & 0xff) {
3837 netdev_warn(tp->dev,
3838 "Other patches exist. Not downloading EEE patch\n");
3839 return -EEXIST;
3840 }
3841
3842 return 0;
3843 }
3844
3845 /* tp->lock is held. */
3846 static void tg3_load_57766_firmware(struct tg3 *tp)
3847 {
3848 struct tg3_firmware_hdr *fw_hdr;
3849
3850 if (!tg3_flag(tp, NO_NVRAM))
3851 return;
3852
3853 if (tg3_validate_rxcpu_state(tp))
3854 return;
3855
3856 if (!tp->fw)
3857 return;
3858
3859 /* This firmware blob has a different format than older firmware
3860 * releases as given below. The main difference is we have fragmented
3861 * data to be written to non-contiguous locations.
3862 *
3863 * In the beginning we have a firmware header identical to other
3864 * firmware which consists of version, base addr and length. The length
3865 * here is unused and set to 0xffffffff.
3866 *
3867 * This is followed by a series of firmware fragments which are
3868 * individually identical to previous firmware. i.e. they have the
3869 * firmware header and followed by data for that fragment. The version
3870 * field of the individual fragment header is unused.
3871 */
3872
3873 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3874 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3875 return;
3876
3877 if (tg3_rxcpu_pause(tp))
3878 return;
3879
3880 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3881 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3882
3883 tg3_rxcpu_resume(tp);
3884 }
3885
3886 /* tp->lock is held. */
3887 static int tg3_load_tso_firmware(struct tg3 *tp)
3888 {
3889 const struct tg3_firmware_hdr *fw_hdr;
3890 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3891 int err;
3892
3893 if (!tg3_flag(tp, FW_TSO))
3894 return 0;
3895
3896 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3897
3898 /* Firmware blob starts with version numbers, followed by
3899 start address and length. We are setting complete length.
3900 length = end_address_of_bss - start_address_of_text.
3901 Remainder is the blob to be loaded contiguously
3902 from start address. */
3903
3904 cpu_scratch_size = tp->fw_len;
3905
3906 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3907 cpu_base = RX_CPU_BASE;
3908 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3909 } else {
3910 cpu_base = TX_CPU_BASE;
3911 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3912 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3913 }
3914
3915 err = tg3_load_firmware_cpu(tp, cpu_base,
3916 cpu_scratch_base, cpu_scratch_size,
3917 fw_hdr);
3918 if (err)
3919 return err;
3920
3921 /* Now startup the cpu. */
3922 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3923 be32_to_cpu(fw_hdr->base_addr));
3924 if (err) {
3925 netdev_err(tp->dev,
3926 "%s fails to set CPU PC, is %08x should be %08x\n",
3927 __func__, tr32(cpu_base + CPU_PC),
3928 be32_to_cpu(fw_hdr->base_addr));
3929 return -ENODEV;
3930 }
3931
3932 tg3_resume_cpu(tp, cpu_base);
3933 return 0;
3934 }
3935
3936
3937 /* tp->lock is held. */
3938 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3939 {
3940 u32 addr_high, addr_low;
3941 int i;
3942
3943 addr_high = ((tp->dev->dev_addr[0] << 8) |
3944 tp->dev->dev_addr[1]);
3945 addr_low = ((tp->dev->dev_addr[2] << 24) |
3946 (tp->dev->dev_addr[3] << 16) |
3947 (tp->dev->dev_addr[4] << 8) |
3948 (tp->dev->dev_addr[5] << 0));
3949 for (i = 0; i < 4; i++) {
3950 if (i == 1 && skip_mac_1)
3951 continue;
3952 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3953 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3954 }
3955
3956 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3957 tg3_asic_rev(tp) == ASIC_REV_5704) {
3958 for (i = 0; i < 12; i++) {
3959 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3960 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3961 }
3962 }
3963
3964 addr_high = (tp->dev->dev_addr[0] +
3965 tp->dev->dev_addr[1] +
3966 tp->dev->dev_addr[2] +
3967 tp->dev->dev_addr[3] +
3968 tp->dev->dev_addr[4] +
3969 tp->dev->dev_addr[5]) &
3970 TX_BACKOFF_SEED_MASK;
3971 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3972 }
3973
3974 static void tg3_enable_register_access(struct tg3 *tp)
3975 {
3976 /*
3977 * Make sure register accesses (indirect or otherwise) will function
3978 * correctly.
3979 */
3980 pci_write_config_dword(tp->pdev,
3981 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3982 }
3983
3984 static int tg3_power_up(struct tg3 *tp)
3985 {
3986 int err;
3987
3988 tg3_enable_register_access(tp);
3989
3990 err = pci_set_power_state(tp->pdev, PCI_D0);
3991 if (!err) {
3992 /* Switch out of Vaux if it is a NIC */
3993 tg3_pwrsrc_switch_to_vmain(tp);
3994 } else {
3995 netdev_err(tp->dev, "Transition to D0 failed\n");
3996 }
3997
3998 return err;
3999 }
4000
4001 static int tg3_setup_phy(struct tg3 *, bool);
4002
4003 static int tg3_power_down_prepare(struct tg3 *tp)
4004 {
4005 u32 misc_host_ctrl;
4006 bool device_should_wake, do_low_power;
4007
4008 tg3_enable_register_access(tp);
4009
4010 /* Restore the CLKREQ setting. */
4011 if (tg3_flag(tp, CLKREQ_BUG))
4012 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4013 PCI_EXP_LNKCTL_CLKREQ_EN);
4014
4015 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4016 tw32(TG3PCI_MISC_HOST_CTRL,
4017 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4018
4019 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4020 tg3_flag(tp, WOL_ENABLE);
4021
4022 if (tg3_flag(tp, USE_PHYLIB)) {
4023 do_low_power = false;
4024 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4025 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4026 struct phy_device *phydev;
4027 u32 phyid, advertising;
4028
4029 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4030
4031 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4032
4033 tp->link_config.speed = phydev->speed;
4034 tp->link_config.duplex = phydev->duplex;
4035 tp->link_config.autoneg = phydev->autoneg;
4036 tp->link_config.advertising = phydev->advertising;
4037
4038 advertising = ADVERTISED_TP |
4039 ADVERTISED_Pause |
4040 ADVERTISED_Autoneg |
4041 ADVERTISED_10baseT_Half;
4042
4043 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4044 if (tg3_flag(tp, WOL_SPEED_100MB))
4045 advertising |=
4046 ADVERTISED_100baseT_Half |
4047 ADVERTISED_100baseT_Full |
4048 ADVERTISED_10baseT_Full;
4049 else
4050 advertising |= ADVERTISED_10baseT_Full;
4051 }
4052
4053 phydev->advertising = advertising;
4054
4055 phy_start_aneg(phydev);
4056
4057 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4058 if (phyid != PHY_ID_BCMAC131) {
4059 phyid &= PHY_BCM_OUI_MASK;
4060 if (phyid == PHY_BCM_OUI_1 ||
4061 phyid == PHY_BCM_OUI_2 ||
4062 phyid == PHY_BCM_OUI_3)
4063 do_low_power = true;
4064 }
4065 }
4066 } else {
4067 do_low_power = true;
4068
4069 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4070 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4071
4072 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4073 tg3_setup_phy(tp, false);
4074 }
4075
4076 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4077 u32 val;
4078
4079 val = tr32(GRC_VCPU_EXT_CTRL);
4080 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4081 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4082 int i;
4083 u32 val;
4084
4085 for (i = 0; i < 200; i++) {
4086 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4087 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4088 break;
4089 msleep(1);
4090 }
4091 }
4092 if (tg3_flag(tp, WOL_CAP))
4093 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4094 WOL_DRV_STATE_SHUTDOWN |
4095 WOL_DRV_WOL |
4096 WOL_SET_MAGIC_PKT);
4097
4098 if (device_should_wake) {
4099 u32 mac_mode;
4100
4101 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4102 if (do_low_power &&
4103 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4104 tg3_phy_auxctl_write(tp,
4105 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4106 MII_TG3_AUXCTL_PCTL_WOL_EN |
4107 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4108 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4109 udelay(40);
4110 }
4111
4112 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4113 mac_mode = MAC_MODE_PORT_MODE_GMII;
4114 else if (tp->phy_flags &
4115 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4116 if (tp->link_config.active_speed == SPEED_1000)
4117 mac_mode = MAC_MODE_PORT_MODE_GMII;
4118 else
4119 mac_mode = MAC_MODE_PORT_MODE_MII;
4120 } else
4121 mac_mode = MAC_MODE_PORT_MODE_MII;
4122
4123 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4124 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4125 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4126 SPEED_100 : SPEED_10;
4127 if (tg3_5700_link_polarity(tp, speed))
4128 mac_mode |= MAC_MODE_LINK_POLARITY;
4129 else
4130 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4131 }
4132 } else {
4133 mac_mode = MAC_MODE_PORT_MODE_TBI;
4134 }
4135
4136 if (!tg3_flag(tp, 5750_PLUS))
4137 tw32(MAC_LED_CTRL, tp->led_ctrl);
4138
4139 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4140 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4141 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4142 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4143
4144 if (tg3_flag(tp, ENABLE_APE))
4145 mac_mode |= MAC_MODE_APE_TX_EN |
4146 MAC_MODE_APE_RX_EN |
4147 MAC_MODE_TDE_ENABLE;
4148
4149 tw32_f(MAC_MODE, mac_mode);
4150 udelay(100);
4151
4152 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4153 udelay(10);
4154 }
4155
4156 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4157 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4158 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4159 u32 base_val;
4160
4161 base_val = tp->pci_clock_ctrl;
4162 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4163 CLOCK_CTRL_TXCLK_DISABLE);
4164
4165 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4166 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4167 } else if (tg3_flag(tp, 5780_CLASS) ||
4168 tg3_flag(tp, CPMU_PRESENT) ||
4169 tg3_asic_rev(tp) == ASIC_REV_5906) {
4170 /* do nothing */
4171 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4172 u32 newbits1, newbits2;
4173
4174 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4175 tg3_asic_rev(tp) == ASIC_REV_5701) {
4176 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4177 CLOCK_CTRL_TXCLK_DISABLE |
4178 CLOCK_CTRL_ALTCLK);
4179 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4180 } else if (tg3_flag(tp, 5705_PLUS)) {
4181 newbits1 = CLOCK_CTRL_625_CORE;
4182 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4183 } else {
4184 newbits1 = CLOCK_CTRL_ALTCLK;
4185 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4186 }
4187
4188 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4189 40);
4190
4191 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4192 40);
4193
4194 if (!tg3_flag(tp, 5705_PLUS)) {
4195 u32 newbits3;
4196
4197 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4198 tg3_asic_rev(tp) == ASIC_REV_5701) {
4199 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4200 CLOCK_CTRL_TXCLK_DISABLE |
4201 CLOCK_CTRL_44MHZ_CORE);
4202 } else {
4203 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4204 }
4205
4206 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4207 tp->pci_clock_ctrl | newbits3, 40);
4208 }
4209 }
4210
4211 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4212 tg3_power_down_phy(tp, do_low_power);
4213
4214 tg3_frob_aux_power(tp, true);
4215
4216 /* Workaround for unstable PLL clock */
4217 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4218 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4219 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4220 u32 val = tr32(0x7d00);
4221
4222 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4223 tw32(0x7d00, val);
4224 if (!tg3_flag(tp, ENABLE_ASF)) {
4225 int err;
4226
4227 err = tg3_nvram_lock(tp);
4228 tg3_halt_cpu(tp, RX_CPU_BASE);
4229 if (!err)
4230 tg3_nvram_unlock(tp);
4231 }
4232 }
4233
4234 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4235
4236 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4237
4238 return 0;
4239 }
4240
4241 static void tg3_power_down(struct tg3 *tp)
4242 {
4243 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4244 pci_set_power_state(tp->pdev, PCI_D3hot);
4245 }
4246
4247 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4248 {
4249 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4250 case MII_TG3_AUX_STAT_10HALF:
4251 *speed = SPEED_10;
4252 *duplex = DUPLEX_HALF;
4253 break;
4254
4255 case MII_TG3_AUX_STAT_10FULL:
4256 *speed = SPEED_10;
4257 *duplex = DUPLEX_FULL;
4258 break;
4259
4260 case MII_TG3_AUX_STAT_100HALF:
4261 *speed = SPEED_100;
4262 *duplex = DUPLEX_HALF;
4263 break;
4264
4265 case MII_TG3_AUX_STAT_100FULL:
4266 *speed = SPEED_100;
4267 *duplex = DUPLEX_FULL;
4268 break;
4269
4270 case MII_TG3_AUX_STAT_1000HALF:
4271 *speed = SPEED_1000;
4272 *duplex = DUPLEX_HALF;
4273 break;
4274
4275 case MII_TG3_AUX_STAT_1000FULL:
4276 *speed = SPEED_1000;
4277 *duplex = DUPLEX_FULL;
4278 break;
4279
4280 default:
4281 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4282 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4283 SPEED_10;
4284 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4285 DUPLEX_HALF;
4286 break;
4287 }
4288 *speed = SPEED_UNKNOWN;
4289 *duplex = DUPLEX_UNKNOWN;
4290 break;
4291 }
4292 }
4293
4294 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4295 {
4296 int err = 0;
4297 u32 val, new_adv;
4298
4299 new_adv = ADVERTISE_CSMA;
4300 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4301 new_adv |= mii_advertise_flowctrl(flowctrl);
4302
4303 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4304 if (err)
4305 goto done;
4306
4307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4308 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4309
4310 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4311 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4312 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4313
4314 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4315 if (err)
4316 goto done;
4317 }
4318
4319 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4320 goto done;
4321
4322 tw32(TG3_CPMU_EEE_MODE,
4323 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4324
4325 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4326 if (!err) {
4327 u32 err2;
4328
4329 val = 0;
4330 /* Advertise 100-BaseTX EEE ability */
4331 if (advertise & ADVERTISED_100baseT_Full)
4332 val |= MDIO_AN_EEE_ADV_100TX;
4333 /* Advertise 1000-BaseT EEE ability */
4334 if (advertise & ADVERTISED_1000baseT_Full)
4335 val |= MDIO_AN_EEE_ADV_1000T;
4336
4337 if (!tp->eee.eee_enabled) {
4338 val = 0;
4339 tp->eee.advertised = 0;
4340 } else {
4341 tp->eee.advertised = advertise &
4342 (ADVERTISED_100baseT_Full |
4343 ADVERTISED_1000baseT_Full);
4344 }
4345
4346 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4347 if (err)
4348 val = 0;
4349
4350 switch (tg3_asic_rev(tp)) {
4351 case ASIC_REV_5717:
4352 case ASIC_REV_57765:
4353 case ASIC_REV_57766:
4354 case ASIC_REV_5719:
4355 /* If we advertised any eee advertisements above... */
4356 if (val)
4357 val = MII_TG3_DSP_TAP26_ALNOKO |
4358 MII_TG3_DSP_TAP26_RMRXSTO |
4359 MII_TG3_DSP_TAP26_OPCSINPT;
4360 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4361 /* Fall through */
4362 case ASIC_REV_5720:
4363 case ASIC_REV_5762:
4364 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4365 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4366 MII_TG3_DSP_CH34TP2_HIBW01);
4367 }
4368
4369 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4370 if (!err)
4371 err = err2;
4372 }
4373
4374 done:
4375 return err;
4376 }
4377
4378 static void tg3_phy_copper_begin(struct tg3 *tp)
4379 {
4380 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4381 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4382 u32 adv, fc;
4383
4384 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4385 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4386 adv = ADVERTISED_10baseT_Half |
4387 ADVERTISED_10baseT_Full;
4388 if (tg3_flag(tp, WOL_SPEED_100MB))
4389 adv |= ADVERTISED_100baseT_Half |
4390 ADVERTISED_100baseT_Full;
4391 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4392 adv |= ADVERTISED_1000baseT_Half |
4393 ADVERTISED_1000baseT_Full;
4394
4395 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4396 } else {
4397 adv = tp->link_config.advertising;
4398 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4399 adv &= ~(ADVERTISED_1000baseT_Half |
4400 ADVERTISED_1000baseT_Full);
4401
4402 fc = tp->link_config.flowctrl;
4403 }
4404
4405 tg3_phy_autoneg_cfg(tp, adv, fc);
4406
4407 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4408 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4409 /* Normally during power down we want to autonegotiate
4410 * the lowest possible speed for WOL. However, to avoid
4411 * link flap, we leave it untouched.
4412 */
4413 return;
4414 }
4415
4416 tg3_writephy(tp, MII_BMCR,
4417 BMCR_ANENABLE | BMCR_ANRESTART);
4418 } else {
4419 int i;
4420 u32 bmcr, orig_bmcr;
4421
4422 tp->link_config.active_speed = tp->link_config.speed;
4423 tp->link_config.active_duplex = tp->link_config.duplex;
4424
4425 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4426 /* With autoneg disabled, 5715 only links up when the
4427 * advertisement register has the configured speed
4428 * enabled.
4429 */
4430 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4431 }
4432
4433 bmcr = 0;
4434 switch (tp->link_config.speed) {
4435 default:
4436 case SPEED_10:
4437 break;
4438
4439 case SPEED_100:
4440 bmcr |= BMCR_SPEED100;
4441 break;
4442
4443 case SPEED_1000:
4444 bmcr |= BMCR_SPEED1000;
4445 break;
4446 }
4447
4448 if (tp->link_config.duplex == DUPLEX_FULL)
4449 bmcr |= BMCR_FULLDPLX;
4450
4451 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4452 (bmcr != orig_bmcr)) {
4453 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4454 for (i = 0; i < 1500; i++) {
4455 u32 tmp;
4456
4457 udelay(10);
4458 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4459 tg3_readphy(tp, MII_BMSR, &tmp))
4460 continue;
4461 if (!(tmp & BMSR_LSTATUS)) {
4462 udelay(40);
4463 break;
4464 }
4465 }
4466 tg3_writephy(tp, MII_BMCR, bmcr);
4467 udelay(40);
4468 }
4469 }
4470 }
4471
4472 static int tg3_phy_pull_config(struct tg3 *tp)
4473 {
4474 int err;
4475 u32 val;
4476
4477 err = tg3_readphy(tp, MII_BMCR, &val);
4478 if (err)
4479 goto done;
4480
4481 if (!(val & BMCR_ANENABLE)) {
4482 tp->link_config.autoneg = AUTONEG_DISABLE;
4483 tp->link_config.advertising = 0;
4484 tg3_flag_clear(tp, PAUSE_AUTONEG);
4485
4486 err = -EIO;
4487
4488 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4489 case 0:
4490 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4491 goto done;
4492
4493 tp->link_config.speed = SPEED_10;
4494 break;
4495 case BMCR_SPEED100:
4496 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4497 goto done;
4498
4499 tp->link_config.speed = SPEED_100;
4500 break;
4501 case BMCR_SPEED1000:
4502 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4503 tp->link_config.speed = SPEED_1000;
4504 break;
4505 }
4506 /* Fall through */
4507 default:
4508 goto done;
4509 }
4510
4511 if (val & BMCR_FULLDPLX)
4512 tp->link_config.duplex = DUPLEX_FULL;
4513 else
4514 tp->link_config.duplex = DUPLEX_HALF;
4515
4516 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4517
4518 err = 0;
4519 goto done;
4520 }
4521
4522 tp->link_config.autoneg = AUTONEG_ENABLE;
4523 tp->link_config.advertising = ADVERTISED_Autoneg;
4524 tg3_flag_set(tp, PAUSE_AUTONEG);
4525
4526 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4527 u32 adv;
4528
4529 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4530 if (err)
4531 goto done;
4532
4533 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4534 tp->link_config.advertising |= adv | ADVERTISED_TP;
4535
4536 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4537 } else {
4538 tp->link_config.advertising |= ADVERTISED_FIBRE;
4539 }
4540
4541 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4542 u32 adv;
4543
4544 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4545 err = tg3_readphy(tp, MII_CTRL1000, &val);
4546 if (err)
4547 goto done;
4548
4549 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4550 } else {
4551 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4552 if (err)
4553 goto done;
4554
4555 adv = tg3_decode_flowctrl_1000X(val);
4556 tp->link_config.flowctrl = adv;
4557
4558 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4559 adv = mii_adv_to_ethtool_adv_x(val);
4560 }
4561
4562 tp->link_config.advertising |= adv;
4563 }
4564
4565 done:
4566 return err;
4567 }
4568
4569 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4570 {
4571 int err;
4572
4573 /* Turn off tap power management. */
4574 /* Set Extended packet length bit */
4575 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4576
4577 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4578 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4579 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4580 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4581 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4582
4583 udelay(40);
4584
4585 return err;
4586 }
4587
4588 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4589 {
4590 struct ethtool_eee eee;
4591
4592 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4593 return true;
4594
4595 tg3_eee_pull_config(tp, &eee);
4596
4597 if (tp->eee.eee_enabled) {
4598 if (tp->eee.advertised != eee.advertised ||
4599 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4600 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4601 return false;
4602 } else {
4603 /* EEE is disabled but we're advertising */
4604 if (eee.advertised)
4605 return false;
4606 }
4607
4608 return true;
4609 }
4610
4611 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4612 {
4613 u32 advmsk, tgtadv, advertising;
4614
4615 advertising = tp->link_config.advertising;
4616 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4617
4618 advmsk = ADVERTISE_ALL;
4619 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4620 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4621 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4622 }
4623
4624 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4625 return false;
4626
4627 if ((*lcladv & advmsk) != tgtadv)
4628 return false;
4629
4630 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4631 u32 tg3_ctrl;
4632
4633 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4634
4635 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4636 return false;
4637
4638 if (tgtadv &&
4639 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4640 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4641 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4642 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4643 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4644 } else {
4645 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4646 }
4647
4648 if (tg3_ctrl != tgtadv)
4649 return false;
4650 }
4651
4652 return true;
4653 }
4654
4655 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4656 {
4657 u32 lpeth = 0;
4658
4659 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4660 u32 val;
4661
4662 if (tg3_readphy(tp, MII_STAT1000, &val))
4663 return false;
4664
4665 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4666 }
4667
4668 if (tg3_readphy(tp, MII_LPA, rmtadv))
4669 return false;
4670
4671 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4672 tp->link_config.rmt_adv = lpeth;
4673
4674 return true;
4675 }
4676
4677 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4678 {
4679 if (curr_link_up != tp->link_up) {
4680 if (curr_link_up) {
4681 netif_carrier_on(tp->dev);
4682 } else {
4683 netif_carrier_off(tp->dev);
4684 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4685 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4686 }
4687
4688 tg3_link_report(tp);
4689 return true;
4690 }
4691
4692 return false;
4693 }
4694
4695 static void tg3_clear_mac_status(struct tg3 *tp)
4696 {
4697 tw32(MAC_EVENT, 0);
4698
4699 tw32_f(MAC_STATUS,
4700 MAC_STATUS_SYNC_CHANGED |
4701 MAC_STATUS_CFG_CHANGED |
4702 MAC_STATUS_MI_COMPLETION |
4703 MAC_STATUS_LNKSTATE_CHANGED);
4704 udelay(40);
4705 }
4706
4707 static void tg3_setup_eee(struct tg3 *tp)
4708 {
4709 u32 val;
4710
4711 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4712 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4713 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4714 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4715
4716 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4717
4718 tw32_f(TG3_CPMU_EEE_CTRL,
4719 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4720
4721 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4722 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4723 TG3_CPMU_EEEMD_LPI_IN_RX |
4724 TG3_CPMU_EEEMD_EEE_ENABLE;
4725
4726 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4727 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4728
4729 if (tg3_flag(tp, ENABLE_APE))
4730 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4731
4732 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4733
4734 tw32_f(TG3_CPMU_EEE_DBTMR1,
4735 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4736 (tp->eee.tx_lpi_timer & 0xffff));
4737
4738 tw32_f(TG3_CPMU_EEE_DBTMR2,
4739 TG3_CPMU_DBTMR2_APE_TX_2047US |
4740 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4741 }
4742
4743 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4744 {
4745 bool current_link_up;
4746 u32 bmsr, val;
4747 u32 lcl_adv, rmt_adv;
4748 u16 current_speed;
4749 u8 current_duplex;
4750 int i, err;
4751
4752 tg3_clear_mac_status(tp);
4753
4754 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4755 tw32_f(MAC_MI_MODE,
4756 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4757 udelay(80);
4758 }
4759
4760 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4761
4762 /* Some third-party PHYs need to be reset on link going
4763 * down.
4764 */
4765 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4766 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4767 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4768 tp->link_up) {
4769 tg3_readphy(tp, MII_BMSR, &bmsr);
4770 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4771 !(bmsr & BMSR_LSTATUS))
4772 force_reset = true;
4773 }
4774 if (force_reset)
4775 tg3_phy_reset(tp);
4776
4777 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4778 tg3_readphy(tp, MII_BMSR, &bmsr);
4779 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4780 !tg3_flag(tp, INIT_COMPLETE))
4781 bmsr = 0;
4782
4783 if (!(bmsr & BMSR_LSTATUS)) {
4784 err = tg3_init_5401phy_dsp(tp);
4785 if (err)
4786 return err;
4787
4788 tg3_readphy(tp, MII_BMSR, &bmsr);
4789 for (i = 0; i < 1000; i++) {
4790 udelay(10);
4791 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4792 (bmsr & BMSR_LSTATUS)) {
4793 udelay(40);
4794 break;
4795 }
4796 }
4797
4798 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4799 TG3_PHY_REV_BCM5401_B0 &&
4800 !(bmsr & BMSR_LSTATUS) &&
4801 tp->link_config.active_speed == SPEED_1000) {
4802 err = tg3_phy_reset(tp);
4803 if (!err)
4804 err = tg3_init_5401phy_dsp(tp);
4805 if (err)
4806 return err;
4807 }
4808 }
4809 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4810 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4811 /* 5701 {A0,B0} CRC bug workaround */
4812 tg3_writephy(tp, 0x15, 0x0a75);
4813 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4814 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4815 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4816 }
4817
4818 /* Clear pending interrupts... */
4819 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4820 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4821
4822 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4823 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4824 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4825 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4826
4827 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4828 tg3_asic_rev(tp) == ASIC_REV_5701) {
4829 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4830 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4831 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4832 else
4833 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4834 }
4835
4836 current_link_up = false;
4837 current_speed = SPEED_UNKNOWN;
4838 current_duplex = DUPLEX_UNKNOWN;
4839 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4840 tp->link_config.rmt_adv = 0;
4841
4842 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4843 err = tg3_phy_auxctl_read(tp,
4844 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4845 &val);
4846 if (!err && !(val & (1 << 10))) {
4847 tg3_phy_auxctl_write(tp,
4848 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4849 val | (1 << 10));
4850 goto relink;
4851 }
4852 }
4853
4854 bmsr = 0;
4855 for (i = 0; i < 100; i++) {
4856 tg3_readphy(tp, MII_BMSR, &bmsr);
4857 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4858 (bmsr & BMSR_LSTATUS))
4859 break;
4860 udelay(40);
4861 }
4862
4863 if (bmsr & BMSR_LSTATUS) {
4864 u32 aux_stat, bmcr;
4865
4866 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4867 for (i = 0; i < 2000; i++) {
4868 udelay(10);
4869 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4870 aux_stat)
4871 break;
4872 }
4873
4874 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4875 &current_speed,
4876 &current_duplex);
4877
4878 bmcr = 0;
4879 for (i = 0; i < 200; i++) {
4880 tg3_readphy(tp, MII_BMCR, &bmcr);
4881 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4882 continue;
4883 if (bmcr && bmcr != 0x7fff)
4884 break;
4885 udelay(10);
4886 }
4887
4888 lcl_adv = 0;
4889 rmt_adv = 0;
4890
4891 tp->link_config.active_speed = current_speed;
4892 tp->link_config.active_duplex = current_duplex;
4893
4894 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4895 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4896
4897 if ((bmcr & BMCR_ANENABLE) &&
4898 eee_config_ok &&
4899 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4900 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4901 current_link_up = true;
4902
4903 /* EEE settings changes take effect only after a phy
4904 * reset. If we have skipped a reset due to Link Flap
4905 * Avoidance being enabled, do it now.
4906 */
4907 if (!eee_config_ok &&
4908 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4909 !force_reset) {
4910 tg3_setup_eee(tp);
4911 tg3_phy_reset(tp);
4912 }
4913 } else {
4914 if (!(bmcr & BMCR_ANENABLE) &&
4915 tp->link_config.speed == current_speed &&
4916 tp->link_config.duplex == current_duplex) {
4917 current_link_up = true;
4918 }
4919 }
4920
4921 if (current_link_up &&
4922 tp->link_config.active_duplex == DUPLEX_FULL) {
4923 u32 reg, bit;
4924
4925 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4926 reg = MII_TG3_FET_GEN_STAT;
4927 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4928 } else {
4929 reg = MII_TG3_EXT_STAT;
4930 bit = MII_TG3_EXT_STAT_MDIX;
4931 }
4932
4933 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4934 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4935
4936 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4937 }
4938 }
4939
4940 relink:
4941 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4942 tg3_phy_copper_begin(tp);
4943
4944 if (tg3_flag(tp, ROBOSWITCH)) {
4945 current_link_up = true;
4946 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4947 current_speed = SPEED_1000;
4948 current_duplex = DUPLEX_FULL;
4949 tp->link_config.active_speed = current_speed;
4950 tp->link_config.active_duplex = current_duplex;
4951 }
4952
4953 tg3_readphy(tp, MII_BMSR, &bmsr);
4954 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4955 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4956 current_link_up = true;
4957 }
4958
4959 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4960 if (current_link_up) {
4961 if (tp->link_config.active_speed == SPEED_100 ||
4962 tp->link_config.active_speed == SPEED_10)
4963 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4964 else
4965 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4966 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4967 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4968 else
4969 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4970
4971 /* In order for the 5750 core in BCM4785 chip to work properly
4972 * in RGMII mode, the Led Control Register must be set up.
4973 */
4974 if (tg3_flag(tp, RGMII_MODE)) {
4975 u32 led_ctrl = tr32(MAC_LED_CTRL);
4976 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4977
4978 if (tp->link_config.active_speed == SPEED_10)
4979 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4980 else if (tp->link_config.active_speed == SPEED_100)
4981 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4982 LED_CTRL_100MBPS_ON);
4983 else if (tp->link_config.active_speed == SPEED_1000)
4984 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4985 LED_CTRL_1000MBPS_ON);
4986
4987 tw32(MAC_LED_CTRL, led_ctrl);
4988 udelay(40);
4989 }
4990
4991 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4992 if (tp->link_config.active_duplex == DUPLEX_HALF)
4993 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4994
4995 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4996 if (current_link_up &&
4997 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4998 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4999 else
5000 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5001 }
5002
5003 /* ??? Without this setting Netgear GA302T PHY does not
5004 * ??? send/receive packets...
5005 */
5006 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5007 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5008 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5009 tw32_f(MAC_MI_MODE, tp->mi_mode);
5010 udelay(80);
5011 }
5012
5013 tw32_f(MAC_MODE, tp->mac_mode);
5014 udelay(40);
5015
5016 tg3_phy_eee_adjust(tp, current_link_up);
5017
5018 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5019 /* Polled via timer. */
5020 tw32_f(MAC_EVENT, 0);
5021 } else {
5022 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5023 }
5024 udelay(40);
5025
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5027 current_link_up &&
5028 tp->link_config.active_speed == SPEED_1000 &&
5029 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5030 udelay(120);
5031 tw32_f(MAC_STATUS,
5032 (MAC_STATUS_SYNC_CHANGED |
5033 MAC_STATUS_CFG_CHANGED));
5034 udelay(40);
5035 tg3_write_mem(tp,
5036 NIC_SRAM_FIRMWARE_MBOX,
5037 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5038 }
5039
5040 /* Prevent send BD corruption. */
5041 if (tg3_flag(tp, CLKREQ_BUG)) {
5042 if (tp->link_config.active_speed == SPEED_100 ||
5043 tp->link_config.active_speed == SPEED_10)
5044 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5045 PCI_EXP_LNKCTL_CLKREQ_EN);
5046 else
5047 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5048 PCI_EXP_LNKCTL_CLKREQ_EN);
5049 }
5050
5051 tg3_test_and_report_link_chg(tp, current_link_up);
5052
5053 return 0;
5054 }
5055
5056 struct tg3_fiber_aneginfo {
5057 int state;
5058 #define ANEG_STATE_UNKNOWN 0
5059 #define ANEG_STATE_AN_ENABLE 1
5060 #define ANEG_STATE_RESTART_INIT 2
5061 #define ANEG_STATE_RESTART 3
5062 #define ANEG_STATE_DISABLE_LINK_OK 4
5063 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5064 #define ANEG_STATE_ABILITY_DETECT 6
5065 #define ANEG_STATE_ACK_DETECT_INIT 7
5066 #define ANEG_STATE_ACK_DETECT 8
5067 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5068 #define ANEG_STATE_COMPLETE_ACK 10
5069 #define ANEG_STATE_IDLE_DETECT_INIT 11
5070 #define ANEG_STATE_IDLE_DETECT 12
5071 #define ANEG_STATE_LINK_OK 13
5072 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5073 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5074
5075 u32 flags;
5076 #define MR_AN_ENABLE 0x00000001
5077 #define MR_RESTART_AN 0x00000002
5078 #define MR_AN_COMPLETE 0x00000004
5079 #define MR_PAGE_RX 0x00000008
5080 #define MR_NP_LOADED 0x00000010
5081 #define MR_TOGGLE_TX 0x00000020
5082 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5083 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5084 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5085 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5086 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5087 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5088 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5089 #define MR_TOGGLE_RX 0x00002000
5090 #define MR_NP_RX 0x00004000
5091
5092 #define MR_LINK_OK 0x80000000
5093
5094 unsigned long link_time, cur_time;
5095
5096 u32 ability_match_cfg;
5097 int ability_match_count;
5098
5099 char ability_match, idle_match, ack_match;
5100
5101 u32 txconfig, rxconfig;
5102 #define ANEG_CFG_NP 0x00000080
5103 #define ANEG_CFG_ACK 0x00000040
5104 #define ANEG_CFG_RF2 0x00000020
5105 #define ANEG_CFG_RF1 0x00000010
5106 #define ANEG_CFG_PS2 0x00000001
5107 #define ANEG_CFG_PS1 0x00008000
5108 #define ANEG_CFG_HD 0x00004000
5109 #define ANEG_CFG_FD 0x00002000
5110 #define ANEG_CFG_INVAL 0x00001f06
5111
5112 };
5113 #define ANEG_OK 0
5114 #define ANEG_DONE 1
5115 #define ANEG_TIMER_ENAB 2
5116 #define ANEG_FAILED -1
5117
5118 #define ANEG_STATE_SETTLE_TIME 10000
5119
5120 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5121 struct tg3_fiber_aneginfo *ap)
5122 {
5123 u16 flowctrl;
5124 unsigned long delta;
5125 u32 rx_cfg_reg;
5126 int ret;
5127
5128 if (ap->state == ANEG_STATE_UNKNOWN) {
5129 ap->rxconfig = 0;
5130 ap->link_time = 0;
5131 ap->cur_time = 0;
5132 ap->ability_match_cfg = 0;
5133 ap->ability_match_count = 0;
5134 ap->ability_match = 0;
5135 ap->idle_match = 0;
5136 ap->ack_match = 0;
5137 }
5138 ap->cur_time++;
5139
5140 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5141 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5142
5143 if (rx_cfg_reg != ap->ability_match_cfg) {
5144 ap->ability_match_cfg = rx_cfg_reg;
5145 ap->ability_match = 0;
5146 ap->ability_match_count = 0;
5147 } else {
5148 if (++ap->ability_match_count > 1) {
5149 ap->ability_match = 1;
5150 ap->ability_match_cfg = rx_cfg_reg;
5151 }
5152 }
5153 if (rx_cfg_reg & ANEG_CFG_ACK)
5154 ap->ack_match = 1;
5155 else
5156 ap->ack_match = 0;
5157
5158 ap->idle_match = 0;
5159 } else {
5160 ap->idle_match = 1;
5161 ap->ability_match_cfg = 0;
5162 ap->ability_match_count = 0;
5163 ap->ability_match = 0;
5164 ap->ack_match = 0;
5165
5166 rx_cfg_reg = 0;
5167 }
5168
5169 ap->rxconfig = rx_cfg_reg;
5170 ret = ANEG_OK;
5171
5172 switch (ap->state) {
5173 case ANEG_STATE_UNKNOWN:
5174 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5175 ap->state = ANEG_STATE_AN_ENABLE;
5176
5177 /* fallthru */
5178 case ANEG_STATE_AN_ENABLE:
5179 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5180 if (ap->flags & MR_AN_ENABLE) {
5181 ap->link_time = 0;
5182 ap->cur_time = 0;
5183 ap->ability_match_cfg = 0;
5184 ap->ability_match_count = 0;
5185 ap->ability_match = 0;
5186 ap->idle_match = 0;
5187 ap->ack_match = 0;
5188
5189 ap->state = ANEG_STATE_RESTART_INIT;
5190 } else {
5191 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5192 }
5193 break;
5194
5195 case ANEG_STATE_RESTART_INIT:
5196 ap->link_time = ap->cur_time;
5197 ap->flags &= ~(MR_NP_LOADED);
5198 ap->txconfig = 0;
5199 tw32(MAC_TX_AUTO_NEG, 0);
5200 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5201 tw32_f(MAC_MODE, tp->mac_mode);
5202 udelay(40);
5203
5204 ret = ANEG_TIMER_ENAB;
5205 ap->state = ANEG_STATE_RESTART;
5206
5207 /* fallthru */
5208 case ANEG_STATE_RESTART:
5209 delta = ap->cur_time - ap->link_time;
5210 if (delta > ANEG_STATE_SETTLE_TIME)
5211 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5212 else
5213 ret = ANEG_TIMER_ENAB;
5214 break;
5215
5216 case ANEG_STATE_DISABLE_LINK_OK:
5217 ret = ANEG_DONE;
5218 break;
5219
5220 case ANEG_STATE_ABILITY_DETECT_INIT:
5221 ap->flags &= ~(MR_TOGGLE_TX);
5222 ap->txconfig = ANEG_CFG_FD;
5223 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5224 if (flowctrl & ADVERTISE_1000XPAUSE)
5225 ap->txconfig |= ANEG_CFG_PS1;
5226 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5227 ap->txconfig |= ANEG_CFG_PS2;
5228 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5229 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5230 tw32_f(MAC_MODE, tp->mac_mode);
5231 udelay(40);
5232
5233 ap->state = ANEG_STATE_ABILITY_DETECT;
5234 break;
5235
5236 case ANEG_STATE_ABILITY_DETECT:
5237 if (ap->ability_match != 0 && ap->rxconfig != 0)
5238 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5239 break;
5240
5241 case ANEG_STATE_ACK_DETECT_INIT:
5242 ap->txconfig |= ANEG_CFG_ACK;
5243 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5244 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5245 tw32_f(MAC_MODE, tp->mac_mode);
5246 udelay(40);
5247
5248 ap->state = ANEG_STATE_ACK_DETECT;
5249
5250 /* fallthru */
5251 case ANEG_STATE_ACK_DETECT:
5252 if (ap->ack_match != 0) {
5253 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5254 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5255 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5256 } else {
5257 ap->state = ANEG_STATE_AN_ENABLE;
5258 }
5259 } else if (ap->ability_match != 0 &&
5260 ap->rxconfig == 0) {
5261 ap->state = ANEG_STATE_AN_ENABLE;
5262 }
5263 break;
5264
5265 case ANEG_STATE_COMPLETE_ACK_INIT:
5266 if (ap->rxconfig & ANEG_CFG_INVAL) {
5267 ret = ANEG_FAILED;
5268 break;
5269 }
5270 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5271 MR_LP_ADV_HALF_DUPLEX |
5272 MR_LP_ADV_SYM_PAUSE |
5273 MR_LP_ADV_ASYM_PAUSE |
5274 MR_LP_ADV_REMOTE_FAULT1 |
5275 MR_LP_ADV_REMOTE_FAULT2 |
5276 MR_LP_ADV_NEXT_PAGE |
5277 MR_TOGGLE_RX |
5278 MR_NP_RX);
5279 if (ap->rxconfig & ANEG_CFG_FD)
5280 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5281 if (ap->rxconfig & ANEG_CFG_HD)
5282 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5283 if (ap->rxconfig & ANEG_CFG_PS1)
5284 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5285 if (ap->rxconfig & ANEG_CFG_PS2)
5286 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5287 if (ap->rxconfig & ANEG_CFG_RF1)
5288 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5289 if (ap->rxconfig & ANEG_CFG_RF2)
5290 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5291 if (ap->rxconfig & ANEG_CFG_NP)
5292 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5293
5294 ap->link_time = ap->cur_time;
5295
5296 ap->flags ^= (MR_TOGGLE_TX);
5297 if (ap->rxconfig & 0x0008)
5298 ap->flags |= MR_TOGGLE_RX;
5299 if (ap->rxconfig & ANEG_CFG_NP)
5300 ap->flags |= MR_NP_RX;
5301 ap->flags |= MR_PAGE_RX;
5302
5303 ap->state = ANEG_STATE_COMPLETE_ACK;
5304 ret = ANEG_TIMER_ENAB;
5305 break;
5306
5307 case ANEG_STATE_COMPLETE_ACK:
5308 if (ap->ability_match != 0 &&
5309 ap->rxconfig == 0) {
5310 ap->state = ANEG_STATE_AN_ENABLE;
5311 break;
5312 }
5313 delta = ap->cur_time - ap->link_time;
5314 if (delta > ANEG_STATE_SETTLE_TIME) {
5315 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5316 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5317 } else {
5318 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5319 !(ap->flags & MR_NP_RX)) {
5320 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5321 } else {
5322 ret = ANEG_FAILED;
5323 }
5324 }
5325 }
5326 break;
5327
5328 case ANEG_STATE_IDLE_DETECT_INIT:
5329 ap->link_time = ap->cur_time;
5330 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331 tw32_f(MAC_MODE, tp->mac_mode);
5332 udelay(40);
5333
5334 ap->state = ANEG_STATE_IDLE_DETECT;
5335 ret = ANEG_TIMER_ENAB;
5336 break;
5337
5338 case ANEG_STATE_IDLE_DETECT:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5342 break;
5343 }
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 /* XXX another gem from the Broadcom driver :( */
5347 ap->state = ANEG_STATE_LINK_OK;
5348 }
5349 break;
5350
5351 case ANEG_STATE_LINK_OK:
5352 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5353 ret = ANEG_DONE;
5354 break;
5355
5356 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5357 /* ??? unimplemented */
5358 break;
5359
5360 case ANEG_STATE_NEXT_PAGE_WAIT:
5361 /* ??? unimplemented */
5362 break;
5363
5364 default:
5365 ret = ANEG_FAILED;
5366 break;
5367 }
5368
5369 return ret;
5370 }
5371
5372 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5373 {
5374 int res = 0;
5375 struct tg3_fiber_aneginfo aninfo;
5376 int status = ANEG_FAILED;
5377 unsigned int tick;
5378 u32 tmp;
5379
5380 tw32_f(MAC_TX_AUTO_NEG, 0);
5381
5382 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5383 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5384 udelay(40);
5385
5386 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5387 udelay(40);
5388
5389 memset(&aninfo, 0, sizeof(aninfo));
5390 aninfo.flags |= MR_AN_ENABLE;
5391 aninfo.state = ANEG_STATE_UNKNOWN;
5392 aninfo.cur_time = 0;
5393 tick = 0;
5394 while (++tick < 195000) {
5395 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5396 if (status == ANEG_DONE || status == ANEG_FAILED)
5397 break;
5398
5399 udelay(1);
5400 }
5401
5402 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5403 tw32_f(MAC_MODE, tp->mac_mode);
5404 udelay(40);
5405
5406 *txflags = aninfo.txconfig;
5407 *rxflags = aninfo.flags;
5408
5409 if (status == ANEG_DONE &&
5410 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5411 MR_LP_ADV_FULL_DUPLEX)))
5412 res = 1;
5413
5414 return res;
5415 }
5416
5417 static void tg3_init_bcm8002(struct tg3 *tp)
5418 {
5419 u32 mac_status = tr32(MAC_STATUS);
5420 int i;
5421
5422 /* Reset when initting first time or we have a link. */
5423 if (tg3_flag(tp, INIT_COMPLETE) &&
5424 !(mac_status & MAC_STATUS_PCS_SYNCED))
5425 return;
5426
5427 /* Set PLL lock range. */
5428 tg3_writephy(tp, 0x16, 0x8007);
5429
5430 /* SW reset */
5431 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5432
5433 /* Wait for reset to complete. */
5434 /* XXX schedule_timeout() ... */
5435 for (i = 0; i < 500; i++)
5436 udelay(10);
5437
5438 /* Config mode; select PMA/Ch 1 regs. */
5439 tg3_writephy(tp, 0x10, 0x8411);
5440
5441 /* Enable auto-lock and comdet, select txclk for tx. */
5442 tg3_writephy(tp, 0x11, 0x0a10);
5443
5444 tg3_writephy(tp, 0x18, 0x00a0);
5445 tg3_writephy(tp, 0x16, 0x41ff);
5446
5447 /* Assert and deassert POR. */
5448 tg3_writephy(tp, 0x13, 0x0400);
5449 udelay(40);
5450 tg3_writephy(tp, 0x13, 0x0000);
5451
5452 tg3_writephy(tp, 0x11, 0x0a50);
5453 udelay(40);
5454 tg3_writephy(tp, 0x11, 0x0a10);
5455
5456 /* Wait for signal to stabilize */
5457 /* XXX schedule_timeout() ... */
5458 for (i = 0; i < 15000; i++)
5459 udelay(10);
5460
5461 /* Deselect the channel register so we can read the PHYID
5462 * later.
5463 */
5464 tg3_writephy(tp, 0x10, 0x8011);
5465 }
5466
5467 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5468 {
5469 u16 flowctrl;
5470 bool current_link_up;
5471 u32 sg_dig_ctrl, sg_dig_status;
5472 u32 serdes_cfg, expected_sg_dig_ctrl;
5473 int workaround, port_a;
5474
5475 serdes_cfg = 0;
5476 expected_sg_dig_ctrl = 0;
5477 workaround = 0;
5478 port_a = 1;
5479 current_link_up = false;
5480
5481 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5482 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5483 workaround = 1;
5484 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5485 port_a = 0;
5486
5487 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5488 /* preserve bits 20-23 for voltage regulator */
5489 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5490 }
5491
5492 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5493
5494 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5495 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5496 if (workaround) {
5497 u32 val = serdes_cfg;
5498
5499 if (port_a)
5500 val |= 0xc010000;
5501 else
5502 val |= 0x4010000;
5503 tw32_f(MAC_SERDES_CFG, val);
5504 }
5505
5506 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5507 }
5508 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5509 tg3_setup_flow_control(tp, 0, 0);
5510 current_link_up = true;
5511 }
5512 goto out;
5513 }
5514
5515 /* Want auto-negotiation. */
5516 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5517
5518 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5519 if (flowctrl & ADVERTISE_1000XPAUSE)
5520 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5521 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5522 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5523
5524 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5525 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5526 tp->serdes_counter &&
5527 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5528 MAC_STATUS_RCVD_CFG)) ==
5529 MAC_STATUS_PCS_SYNCED)) {
5530 tp->serdes_counter--;
5531 current_link_up = true;
5532 goto out;
5533 }
5534 restart_autoneg:
5535 if (workaround)
5536 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5537 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5538 udelay(5);
5539 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5540
5541 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5542 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5543 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5544 MAC_STATUS_SIGNAL_DET)) {
5545 sg_dig_status = tr32(SG_DIG_STATUS);
5546 mac_status = tr32(MAC_STATUS);
5547
5548 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5549 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5550 u32 local_adv = 0, remote_adv = 0;
5551
5552 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5553 local_adv |= ADVERTISE_1000XPAUSE;
5554 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5555 local_adv |= ADVERTISE_1000XPSE_ASYM;
5556
5557 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5558 remote_adv |= LPA_1000XPAUSE;
5559 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5560 remote_adv |= LPA_1000XPAUSE_ASYM;
5561
5562 tp->link_config.rmt_adv =
5563 mii_adv_to_ethtool_adv_x(remote_adv);
5564
5565 tg3_setup_flow_control(tp, local_adv, remote_adv);
5566 current_link_up = true;
5567 tp->serdes_counter = 0;
5568 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5569 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5570 if (tp->serdes_counter)
5571 tp->serdes_counter--;
5572 else {
5573 if (workaround) {
5574 u32 val = serdes_cfg;
5575
5576 if (port_a)
5577 val |= 0xc010000;
5578 else
5579 val |= 0x4010000;
5580
5581 tw32_f(MAC_SERDES_CFG, val);
5582 }
5583
5584 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5585 udelay(40);
5586
5587 /* Link parallel detection - link is up */
5588 /* only if we have PCS_SYNC and not */
5589 /* receiving config code words */
5590 mac_status = tr32(MAC_STATUS);
5591 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5592 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5593 tg3_setup_flow_control(tp, 0, 0);
5594 current_link_up = true;
5595 tp->phy_flags |=
5596 TG3_PHYFLG_PARALLEL_DETECT;
5597 tp->serdes_counter =
5598 SERDES_PARALLEL_DET_TIMEOUT;
5599 } else
5600 goto restart_autoneg;
5601 }
5602 }
5603 } else {
5604 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5605 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5606 }
5607
5608 out:
5609 return current_link_up;
5610 }
5611
5612 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5613 {
5614 bool current_link_up = false;
5615
5616 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5617 goto out;
5618
5619 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5620 u32 txflags, rxflags;
5621 int i;
5622
5623 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5624 u32 local_adv = 0, remote_adv = 0;
5625
5626 if (txflags & ANEG_CFG_PS1)
5627 local_adv |= ADVERTISE_1000XPAUSE;
5628 if (txflags & ANEG_CFG_PS2)
5629 local_adv |= ADVERTISE_1000XPSE_ASYM;
5630
5631 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5632 remote_adv |= LPA_1000XPAUSE;
5633 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5634 remote_adv |= LPA_1000XPAUSE_ASYM;
5635
5636 tp->link_config.rmt_adv =
5637 mii_adv_to_ethtool_adv_x(remote_adv);
5638
5639 tg3_setup_flow_control(tp, local_adv, remote_adv);
5640
5641 current_link_up = true;
5642 }
5643 for (i = 0; i < 30; i++) {
5644 udelay(20);
5645 tw32_f(MAC_STATUS,
5646 (MAC_STATUS_SYNC_CHANGED |
5647 MAC_STATUS_CFG_CHANGED));
5648 udelay(40);
5649 if ((tr32(MAC_STATUS) &
5650 (MAC_STATUS_SYNC_CHANGED |
5651 MAC_STATUS_CFG_CHANGED)) == 0)
5652 break;
5653 }
5654
5655 mac_status = tr32(MAC_STATUS);
5656 if (!current_link_up &&
5657 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5658 !(mac_status & MAC_STATUS_RCVD_CFG))
5659 current_link_up = true;
5660 } else {
5661 tg3_setup_flow_control(tp, 0, 0);
5662
5663 /* Forcing 1000FD link up. */
5664 current_link_up = true;
5665
5666 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5667 udelay(40);
5668
5669 tw32_f(MAC_MODE, tp->mac_mode);
5670 udelay(40);
5671 }
5672
5673 out:
5674 return current_link_up;
5675 }
5676
5677 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5678 {
5679 u32 orig_pause_cfg;
5680 u16 orig_active_speed;
5681 u8 orig_active_duplex;
5682 u32 mac_status;
5683 bool current_link_up;
5684 int i;
5685
5686 orig_pause_cfg = tp->link_config.active_flowctrl;
5687 orig_active_speed = tp->link_config.active_speed;
5688 orig_active_duplex = tp->link_config.active_duplex;
5689
5690 if (!tg3_flag(tp, HW_AUTONEG) &&
5691 tp->link_up &&
5692 tg3_flag(tp, INIT_COMPLETE)) {
5693 mac_status = tr32(MAC_STATUS);
5694 mac_status &= (MAC_STATUS_PCS_SYNCED |
5695 MAC_STATUS_SIGNAL_DET |
5696 MAC_STATUS_CFG_CHANGED |
5697 MAC_STATUS_RCVD_CFG);
5698 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5699 MAC_STATUS_SIGNAL_DET)) {
5700 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5701 MAC_STATUS_CFG_CHANGED));
5702 return 0;
5703 }
5704 }
5705
5706 tw32_f(MAC_TX_AUTO_NEG, 0);
5707
5708 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5709 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5710 tw32_f(MAC_MODE, tp->mac_mode);
5711 udelay(40);
5712
5713 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5714 tg3_init_bcm8002(tp);
5715
5716 /* Enable link change event even when serdes polling. */
5717 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5718 udelay(40);
5719
5720 current_link_up = false;
5721 tp->link_config.rmt_adv = 0;
5722 mac_status = tr32(MAC_STATUS);
5723
5724 if (tg3_flag(tp, HW_AUTONEG))
5725 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5726 else
5727 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5728
5729 tp->napi[0].hw_status->status =
5730 (SD_STATUS_UPDATED |
5731 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5732
5733 for (i = 0; i < 100; i++) {
5734 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5735 MAC_STATUS_CFG_CHANGED));
5736 udelay(5);
5737 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5738 MAC_STATUS_CFG_CHANGED |
5739 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5740 break;
5741 }
5742
5743 mac_status = tr32(MAC_STATUS);
5744 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5745 current_link_up = false;
5746 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5747 tp->serdes_counter == 0) {
5748 tw32_f(MAC_MODE, (tp->mac_mode |
5749 MAC_MODE_SEND_CONFIGS));
5750 udelay(1);
5751 tw32_f(MAC_MODE, tp->mac_mode);
5752 }
5753 }
5754
5755 if (current_link_up) {
5756 tp->link_config.active_speed = SPEED_1000;
5757 tp->link_config.active_duplex = DUPLEX_FULL;
5758 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5759 LED_CTRL_LNKLED_OVERRIDE |
5760 LED_CTRL_1000MBPS_ON));
5761 } else {
5762 tp->link_config.active_speed = SPEED_UNKNOWN;
5763 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5764 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5765 LED_CTRL_LNKLED_OVERRIDE |
5766 LED_CTRL_TRAFFIC_OVERRIDE));
5767 }
5768
5769 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5770 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5771 if (orig_pause_cfg != now_pause_cfg ||
5772 orig_active_speed != tp->link_config.active_speed ||
5773 orig_active_duplex != tp->link_config.active_duplex)
5774 tg3_link_report(tp);
5775 }
5776
5777 return 0;
5778 }
5779
5780 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5781 {
5782 int err = 0;
5783 u32 bmsr, bmcr;
5784 u16 current_speed = SPEED_UNKNOWN;
5785 u8 current_duplex = DUPLEX_UNKNOWN;
5786 bool current_link_up = false;
5787 u32 local_adv, remote_adv, sgsr;
5788
5789 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5790 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5791 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5792 (sgsr & SERDES_TG3_SGMII_MODE)) {
5793
5794 if (force_reset)
5795 tg3_phy_reset(tp);
5796
5797 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5798
5799 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5800 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5801 } else {
5802 current_link_up = true;
5803 if (sgsr & SERDES_TG3_SPEED_1000) {
5804 current_speed = SPEED_1000;
5805 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5806 } else if (sgsr & SERDES_TG3_SPEED_100) {
5807 current_speed = SPEED_100;
5808 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5809 } else {
5810 current_speed = SPEED_10;
5811 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5812 }
5813
5814 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5815 current_duplex = DUPLEX_FULL;
5816 else
5817 current_duplex = DUPLEX_HALF;
5818 }
5819
5820 tw32_f(MAC_MODE, tp->mac_mode);
5821 udelay(40);
5822
5823 tg3_clear_mac_status(tp);
5824
5825 goto fiber_setup_done;
5826 }
5827
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 tw32_f(MAC_MODE, tp->mac_mode);
5830 udelay(40);
5831
5832 tg3_clear_mac_status(tp);
5833
5834 if (force_reset)
5835 tg3_phy_reset(tp);
5836
5837 tp->link_config.rmt_adv = 0;
5838
5839 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5840 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5841 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5842 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5843 bmsr |= BMSR_LSTATUS;
5844 else
5845 bmsr &= ~BMSR_LSTATUS;
5846 }
5847
5848 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5849
5850 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5851 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5852 /* do nothing, just check for link up at the end */
5853 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5854 u32 adv, newadv;
5855
5856 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5857 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5858 ADVERTISE_1000XPAUSE |
5859 ADVERTISE_1000XPSE_ASYM |
5860 ADVERTISE_SLCT);
5861
5862 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5863 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5864
5865 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5866 tg3_writephy(tp, MII_ADVERTISE, newadv);
5867 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5868 tg3_writephy(tp, MII_BMCR, bmcr);
5869
5870 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5871 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5872 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5873
5874 return err;
5875 }
5876 } else {
5877 u32 new_bmcr;
5878
5879 bmcr &= ~BMCR_SPEED1000;
5880 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5881
5882 if (tp->link_config.duplex == DUPLEX_FULL)
5883 new_bmcr |= BMCR_FULLDPLX;
5884
5885 if (new_bmcr != bmcr) {
5886 /* BMCR_SPEED1000 is a reserved bit that needs
5887 * to be set on write.
5888 */
5889 new_bmcr |= BMCR_SPEED1000;
5890
5891 /* Force a linkdown */
5892 if (tp->link_up) {
5893 u32 adv;
5894
5895 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5896 adv &= ~(ADVERTISE_1000XFULL |
5897 ADVERTISE_1000XHALF |
5898 ADVERTISE_SLCT);
5899 tg3_writephy(tp, MII_ADVERTISE, adv);
5900 tg3_writephy(tp, MII_BMCR, bmcr |
5901 BMCR_ANRESTART |
5902 BMCR_ANENABLE);
5903 udelay(10);
5904 tg3_carrier_off(tp);
5905 }
5906 tg3_writephy(tp, MII_BMCR, new_bmcr);
5907 bmcr = new_bmcr;
5908 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5909 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5910 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5911 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5912 bmsr |= BMSR_LSTATUS;
5913 else
5914 bmsr &= ~BMSR_LSTATUS;
5915 }
5916 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5917 }
5918 }
5919
5920 if (bmsr & BMSR_LSTATUS) {
5921 current_speed = SPEED_1000;
5922 current_link_up = true;
5923 if (bmcr & BMCR_FULLDPLX)
5924 current_duplex = DUPLEX_FULL;
5925 else
5926 current_duplex = DUPLEX_HALF;
5927
5928 local_adv = 0;
5929 remote_adv = 0;
5930
5931 if (bmcr & BMCR_ANENABLE) {
5932 u32 common;
5933
5934 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5935 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5936 common = local_adv & remote_adv;
5937 if (common & (ADVERTISE_1000XHALF |
5938 ADVERTISE_1000XFULL)) {
5939 if (common & ADVERTISE_1000XFULL)
5940 current_duplex = DUPLEX_FULL;
5941 else
5942 current_duplex = DUPLEX_HALF;
5943
5944 tp->link_config.rmt_adv =
5945 mii_adv_to_ethtool_adv_x(remote_adv);
5946 } else if (!tg3_flag(tp, 5780_CLASS)) {
5947 /* Link is up via parallel detect */
5948 } else {
5949 current_link_up = false;
5950 }
5951 }
5952 }
5953
5954 fiber_setup_done:
5955 if (current_link_up && current_duplex == DUPLEX_FULL)
5956 tg3_setup_flow_control(tp, local_adv, remote_adv);
5957
5958 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5959 if (tp->link_config.active_duplex == DUPLEX_HALF)
5960 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5961
5962 tw32_f(MAC_MODE, tp->mac_mode);
5963 udelay(40);
5964
5965 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5966
5967 tp->link_config.active_speed = current_speed;
5968 tp->link_config.active_duplex = current_duplex;
5969
5970 tg3_test_and_report_link_chg(tp, current_link_up);
5971 return err;
5972 }
5973
5974 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5975 {
5976 if (tp->serdes_counter) {
5977 /* Give autoneg time to complete. */
5978 tp->serdes_counter--;
5979 return;
5980 }
5981
5982 if (!tp->link_up &&
5983 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5984 u32 bmcr;
5985
5986 tg3_readphy(tp, MII_BMCR, &bmcr);
5987 if (bmcr & BMCR_ANENABLE) {
5988 u32 phy1, phy2;
5989
5990 /* Select shadow register 0x1f */
5991 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5992 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5993
5994 /* Select expansion interrupt status register */
5995 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5996 MII_TG3_DSP_EXP1_INT_STAT);
5997 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5998 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5999
6000 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6001 /* We have signal detect and not receiving
6002 * config code words, link is up by parallel
6003 * detection.
6004 */
6005
6006 bmcr &= ~BMCR_ANENABLE;
6007 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6008 tg3_writephy(tp, MII_BMCR, bmcr);
6009 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6010 }
6011 }
6012 } else if (tp->link_up &&
6013 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6014 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6015 u32 phy2;
6016
6017 /* Select expansion interrupt status register */
6018 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6019 MII_TG3_DSP_EXP1_INT_STAT);
6020 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6021 if (phy2 & 0x20) {
6022 u32 bmcr;
6023
6024 /* Config code words received, turn on autoneg. */
6025 tg3_readphy(tp, MII_BMCR, &bmcr);
6026 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6027
6028 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6029
6030 }
6031 }
6032 }
6033
6034 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6035 {
6036 u32 val;
6037 int err;
6038
6039 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6040 err = tg3_setup_fiber_phy(tp, force_reset);
6041 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6042 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6043 else
6044 err = tg3_setup_copper_phy(tp, force_reset);
6045
6046 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6047 u32 scale;
6048
6049 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6050 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6051 scale = 65;
6052 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6053 scale = 6;
6054 else
6055 scale = 12;
6056
6057 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6058 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6059 tw32(GRC_MISC_CFG, val);
6060 }
6061
6062 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6063 (6 << TX_LENGTHS_IPG_SHIFT);
6064 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6065 tg3_asic_rev(tp) == ASIC_REV_5762)
6066 val |= tr32(MAC_TX_LENGTHS) &
6067 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6068 TX_LENGTHS_CNT_DWN_VAL_MSK);
6069
6070 if (tp->link_config.active_speed == SPEED_1000 &&
6071 tp->link_config.active_duplex == DUPLEX_HALF)
6072 tw32(MAC_TX_LENGTHS, val |
6073 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6074 else
6075 tw32(MAC_TX_LENGTHS, val |
6076 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6077
6078 if (!tg3_flag(tp, 5705_PLUS)) {
6079 if (tp->link_up) {
6080 tw32(HOSTCC_STAT_COAL_TICKS,
6081 tp->coal.stats_block_coalesce_usecs);
6082 } else {
6083 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6084 }
6085 }
6086
6087 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6088 val = tr32(PCIE_PWR_MGMT_THRESH);
6089 if (!tp->link_up)
6090 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6091 tp->pwrmgmt_thresh;
6092 else
6093 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6094 tw32(PCIE_PWR_MGMT_THRESH, val);
6095 }
6096
6097 return err;
6098 }
6099
6100 /* tp->lock must be held */
6101 static u64 tg3_refclk_read(struct tg3 *tp)
6102 {
6103 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6104 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6105 }
6106
6107 /* tp->lock must be held */
6108 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6109 {
6110 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6111
6112 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6113 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6114 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6115 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6116 }
6117
6118 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6119 static inline void tg3_full_unlock(struct tg3 *tp);
6120 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6121 {
6122 struct tg3 *tp = netdev_priv(dev);
6123
6124 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6125 SOF_TIMESTAMPING_RX_SOFTWARE |
6126 SOF_TIMESTAMPING_SOFTWARE;
6127
6128 if (tg3_flag(tp, PTP_CAPABLE)) {
6129 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6130 SOF_TIMESTAMPING_RX_HARDWARE |
6131 SOF_TIMESTAMPING_RAW_HARDWARE;
6132 }
6133
6134 if (tp->ptp_clock)
6135 info->phc_index = ptp_clock_index(tp->ptp_clock);
6136 else
6137 info->phc_index = -1;
6138
6139 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6140
6141 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6142 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6143 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6144 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6145 return 0;
6146 }
6147
6148 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6149 {
6150 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6151 bool neg_adj = false;
6152 u32 correction = 0;
6153
6154 if (ppb < 0) {
6155 neg_adj = true;
6156 ppb = -ppb;
6157 }
6158
6159 /* Frequency adjustment is performed using hardware with a 24 bit
6160 * accumulator and a programmable correction value. On each clk, the
6161 * correction value gets added to the accumulator and when it
6162 * overflows, the time counter is incremented/decremented.
6163 *
6164 * So conversion from ppb to correction value is
6165 * ppb * (1 << 24) / 1000000000
6166 */
6167 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6168 TG3_EAV_REF_CLK_CORRECT_MASK;
6169
6170 tg3_full_lock(tp, 0);
6171
6172 if (correction)
6173 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6174 TG3_EAV_REF_CLK_CORRECT_EN |
6175 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6176 else
6177 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6178
6179 tg3_full_unlock(tp);
6180
6181 return 0;
6182 }
6183
6184 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6185 {
6186 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6187
6188 tg3_full_lock(tp, 0);
6189 tp->ptp_adjust += delta;
6190 tg3_full_unlock(tp);
6191
6192 return 0;
6193 }
6194
6195 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6196 {
6197 u64 ns;
6198 u32 remainder;
6199 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6200
6201 tg3_full_lock(tp, 0);
6202 ns = tg3_refclk_read(tp);
6203 ns += tp->ptp_adjust;
6204 tg3_full_unlock(tp);
6205
6206 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6207 ts->tv_nsec = remainder;
6208
6209 return 0;
6210 }
6211
6212 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6213 const struct timespec *ts)
6214 {
6215 u64 ns;
6216 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6217
6218 ns = timespec_to_ns(ts);
6219
6220 tg3_full_lock(tp, 0);
6221 tg3_refclk_write(tp, ns);
6222 tp->ptp_adjust = 0;
6223 tg3_full_unlock(tp);
6224
6225 return 0;
6226 }
6227
6228 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6229 struct ptp_clock_request *rq, int on)
6230 {
6231 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232 u32 clock_ctl;
6233 int rval = 0;
6234
6235 switch (rq->type) {
6236 case PTP_CLK_REQ_PEROUT:
6237 if (rq->perout.index != 0)
6238 return -EINVAL;
6239
6240 tg3_full_lock(tp, 0);
6241 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6242 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6243
6244 if (on) {
6245 u64 nsec;
6246
6247 nsec = rq->perout.start.sec * 1000000000ULL +
6248 rq->perout.start.nsec;
6249
6250 if (rq->perout.period.sec || rq->perout.period.nsec) {
6251 netdev_warn(tp->dev,
6252 "Device supports only a one-shot timesync output, period must be 0\n");
6253 rval = -EINVAL;
6254 goto err_out;
6255 }
6256
6257 if (nsec & (1ULL << 63)) {
6258 netdev_warn(tp->dev,
6259 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6260 rval = -EINVAL;
6261 goto err_out;
6262 }
6263
6264 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6265 tw32(TG3_EAV_WATCHDOG0_MSB,
6266 TG3_EAV_WATCHDOG0_EN |
6267 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6268
6269 tw32(TG3_EAV_REF_CLCK_CTL,
6270 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6271 } else {
6272 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6273 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6274 }
6275
6276 err_out:
6277 tg3_full_unlock(tp);
6278 return rval;
6279
6280 default:
6281 break;
6282 }
6283
6284 return -EOPNOTSUPP;
6285 }
6286
6287 static const struct ptp_clock_info tg3_ptp_caps = {
6288 .owner = THIS_MODULE,
6289 .name = "tg3 clock",
6290 .max_adj = 250000000,
6291 .n_alarm = 0,
6292 .n_ext_ts = 0,
6293 .n_per_out = 1,
6294 .pps = 0,
6295 .adjfreq = tg3_ptp_adjfreq,
6296 .adjtime = tg3_ptp_adjtime,
6297 .gettime = tg3_ptp_gettime,
6298 .settime = tg3_ptp_settime,
6299 .enable = tg3_ptp_enable,
6300 };
6301
6302 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6303 struct skb_shared_hwtstamps *timestamp)
6304 {
6305 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6306 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6307 tp->ptp_adjust);
6308 }
6309
6310 /* tp->lock must be held */
6311 static void tg3_ptp_init(struct tg3 *tp)
6312 {
6313 if (!tg3_flag(tp, PTP_CAPABLE))
6314 return;
6315
6316 /* Initialize the hardware clock to the system time. */
6317 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6318 tp->ptp_adjust = 0;
6319 tp->ptp_info = tg3_ptp_caps;
6320 }
6321
6322 /* tp->lock must be held */
6323 static void tg3_ptp_resume(struct tg3 *tp)
6324 {
6325 if (!tg3_flag(tp, PTP_CAPABLE))
6326 return;
6327
6328 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6329 tp->ptp_adjust = 0;
6330 }
6331
6332 static void tg3_ptp_fini(struct tg3 *tp)
6333 {
6334 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6335 return;
6336
6337 ptp_clock_unregister(tp->ptp_clock);
6338 tp->ptp_clock = NULL;
6339 tp->ptp_adjust = 0;
6340 }
6341
6342 static inline int tg3_irq_sync(struct tg3 *tp)
6343 {
6344 return tp->irq_sync;
6345 }
6346
6347 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6348 {
6349 int i;
6350
6351 dst = (u32 *)((u8 *)dst + off);
6352 for (i = 0; i < len; i += sizeof(u32))
6353 *dst++ = tr32(off + i);
6354 }
6355
6356 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6357 {
6358 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6359 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6360 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6361 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6362 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6363 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6364 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6365 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6366 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6367 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6368 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6369 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6370 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6371 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6372 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6373 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6374 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6375 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6376 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6377
6378 if (tg3_flag(tp, SUPPORT_MSIX))
6379 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6380
6381 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6382 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6383 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6384 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6385 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6386 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6387 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6388 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6389
6390 if (!tg3_flag(tp, 5705_PLUS)) {
6391 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6392 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6393 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6394 }
6395
6396 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6397 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6398 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6399 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6400 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6401
6402 if (tg3_flag(tp, NVRAM))
6403 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6404 }
6405
6406 static void tg3_dump_state(struct tg3 *tp)
6407 {
6408 int i;
6409 u32 *regs;
6410
6411 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6412 if (!regs)
6413 return;
6414
6415 if (tg3_flag(tp, PCI_EXPRESS)) {
6416 /* Read up to but not including private PCI registers */
6417 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6418 regs[i / sizeof(u32)] = tr32(i);
6419 } else
6420 tg3_dump_legacy_regs(tp, regs);
6421
6422 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6423 if (!regs[i + 0] && !regs[i + 1] &&
6424 !regs[i + 2] && !regs[i + 3])
6425 continue;
6426
6427 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6428 i * 4,
6429 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6430 }
6431
6432 kfree(regs);
6433
6434 for (i = 0; i < tp->irq_cnt; i++) {
6435 struct tg3_napi *tnapi = &tp->napi[i];
6436
6437 /* SW status block */
6438 netdev_err(tp->dev,
6439 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6440 i,
6441 tnapi->hw_status->status,
6442 tnapi->hw_status->status_tag,
6443 tnapi->hw_status->rx_jumbo_consumer,
6444 tnapi->hw_status->rx_consumer,
6445 tnapi->hw_status->rx_mini_consumer,
6446 tnapi->hw_status->idx[0].rx_producer,
6447 tnapi->hw_status->idx[0].tx_consumer);
6448
6449 netdev_err(tp->dev,
6450 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6451 i,
6452 tnapi->last_tag, tnapi->last_irq_tag,
6453 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6454 tnapi->rx_rcb_ptr,
6455 tnapi->prodring.rx_std_prod_idx,
6456 tnapi->prodring.rx_std_cons_idx,
6457 tnapi->prodring.rx_jmb_prod_idx,
6458 tnapi->prodring.rx_jmb_cons_idx);
6459 }
6460 }
6461
6462 /* This is called whenever we suspect that the system chipset is re-
6463 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6464 * is bogus tx completions. We try to recover by setting the
6465 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6466 * in the workqueue.
6467 */
6468 static void tg3_tx_recover(struct tg3 *tp)
6469 {
6470 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6471 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6472
6473 netdev_warn(tp->dev,
6474 "The system may be re-ordering memory-mapped I/O "
6475 "cycles to the network device, attempting to recover. "
6476 "Please report the problem to the driver maintainer "
6477 "and include system chipset information.\n");
6478
6479 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6480 }
6481
6482 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6483 {
6484 /* Tell compiler to fetch tx indices from memory. */
6485 barrier();
6486 return tnapi->tx_pending -
6487 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6488 }
6489
6490 /* Tigon3 never reports partial packet sends. So we do not
6491 * need special logic to handle SKBs that have not had all
6492 * of their frags sent yet, like SunGEM does.
6493 */
6494 static void tg3_tx(struct tg3_napi *tnapi)
6495 {
6496 struct tg3 *tp = tnapi->tp;
6497 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6498 u32 sw_idx = tnapi->tx_cons;
6499 struct netdev_queue *txq;
6500 int index = tnapi - tp->napi;
6501 unsigned int pkts_compl = 0, bytes_compl = 0;
6502
6503 if (tg3_flag(tp, ENABLE_TSS))
6504 index--;
6505
6506 txq = netdev_get_tx_queue(tp->dev, index);
6507
6508 while (sw_idx != hw_idx) {
6509 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6510 struct sk_buff *skb = ri->skb;
6511 int i, tx_bug = 0;
6512
6513 if (unlikely(skb == NULL)) {
6514 tg3_tx_recover(tp);
6515 return;
6516 }
6517
6518 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6519 struct skb_shared_hwtstamps timestamp;
6520 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6521 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6522
6523 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6524
6525 skb_tstamp_tx(skb, &timestamp);
6526 }
6527
6528 pci_unmap_single(tp->pdev,
6529 dma_unmap_addr(ri, mapping),
6530 skb_headlen(skb),
6531 PCI_DMA_TODEVICE);
6532
6533 ri->skb = NULL;
6534
6535 while (ri->fragmented) {
6536 ri->fragmented = false;
6537 sw_idx = NEXT_TX(sw_idx);
6538 ri = &tnapi->tx_buffers[sw_idx];
6539 }
6540
6541 sw_idx = NEXT_TX(sw_idx);
6542
6543 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6544 ri = &tnapi->tx_buffers[sw_idx];
6545 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6546 tx_bug = 1;
6547
6548 pci_unmap_page(tp->pdev,
6549 dma_unmap_addr(ri, mapping),
6550 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6551 PCI_DMA_TODEVICE);
6552
6553 while (ri->fragmented) {
6554 ri->fragmented = false;
6555 sw_idx = NEXT_TX(sw_idx);
6556 ri = &tnapi->tx_buffers[sw_idx];
6557 }
6558
6559 sw_idx = NEXT_TX(sw_idx);
6560 }
6561
6562 pkts_compl++;
6563 bytes_compl += skb->len;
6564
6565 dev_kfree_skb(skb);
6566
6567 if (unlikely(tx_bug)) {
6568 tg3_tx_recover(tp);
6569 return;
6570 }
6571 }
6572
6573 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6574
6575 tnapi->tx_cons = sw_idx;
6576
6577 /* Need to make the tx_cons update visible to tg3_start_xmit()
6578 * before checking for netif_queue_stopped(). Without the
6579 * memory barrier, there is a small possibility that tg3_start_xmit()
6580 * will miss it and cause the queue to be stopped forever.
6581 */
6582 smp_mb();
6583
6584 if (unlikely(netif_tx_queue_stopped(txq) &&
6585 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6586 __netif_tx_lock(txq, smp_processor_id());
6587 if (netif_tx_queue_stopped(txq) &&
6588 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6589 netif_tx_wake_queue(txq);
6590 __netif_tx_unlock(txq);
6591 }
6592 }
6593
6594 static void tg3_frag_free(bool is_frag, void *data)
6595 {
6596 if (is_frag)
6597 put_page(virt_to_head_page(data));
6598 else
6599 kfree(data);
6600 }
6601
6602 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6603 {
6604 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6605 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6606
6607 if (!ri->data)
6608 return;
6609
6610 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6611 map_sz, PCI_DMA_FROMDEVICE);
6612 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6613 ri->data = NULL;
6614 }
6615
6616
6617 /* Returns size of skb allocated or < 0 on error.
6618 *
6619 * We only need to fill in the address because the other members
6620 * of the RX descriptor are invariant, see tg3_init_rings.
6621 *
6622 * Note the purposeful assymetry of cpu vs. chip accesses. For
6623 * posting buffers we only dirty the first cache line of the RX
6624 * descriptor (containing the address). Whereas for the RX status
6625 * buffers the cpu only reads the last cacheline of the RX descriptor
6626 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6627 */
6628 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6629 u32 opaque_key, u32 dest_idx_unmasked,
6630 unsigned int *frag_size)
6631 {
6632 struct tg3_rx_buffer_desc *desc;
6633 struct ring_info *map;
6634 u8 *data;
6635 dma_addr_t mapping;
6636 int skb_size, data_size, dest_idx;
6637
6638 switch (opaque_key) {
6639 case RXD_OPAQUE_RING_STD:
6640 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6641 desc = &tpr->rx_std[dest_idx];
6642 map = &tpr->rx_std_buffers[dest_idx];
6643 data_size = tp->rx_pkt_map_sz;
6644 break;
6645
6646 case RXD_OPAQUE_RING_JUMBO:
6647 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6648 desc = &tpr->rx_jmb[dest_idx].std;
6649 map = &tpr->rx_jmb_buffers[dest_idx];
6650 data_size = TG3_RX_JMB_MAP_SZ;
6651 break;
6652
6653 default:
6654 return -EINVAL;
6655 }
6656
6657 /* Do not overwrite any of the map or rp information
6658 * until we are sure we can commit to a new buffer.
6659 *
6660 * Callers depend upon this behavior and assume that
6661 * we leave everything unchanged if we fail.
6662 */
6663 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6664 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6665 if (skb_size <= PAGE_SIZE) {
6666 data = netdev_alloc_frag(skb_size);
6667 *frag_size = skb_size;
6668 } else {
6669 data = kmalloc(skb_size, GFP_ATOMIC);
6670 *frag_size = 0;
6671 }
6672 if (!data)
6673 return -ENOMEM;
6674
6675 mapping = pci_map_single(tp->pdev,
6676 data + TG3_RX_OFFSET(tp),
6677 data_size,
6678 PCI_DMA_FROMDEVICE);
6679 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6680 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6681 return -EIO;
6682 }
6683
6684 map->data = data;
6685 dma_unmap_addr_set(map, mapping, mapping);
6686
6687 desc->addr_hi = ((u64)mapping >> 32);
6688 desc->addr_lo = ((u64)mapping & 0xffffffff);
6689
6690 return data_size;
6691 }
6692
6693 /* We only need to move over in the address because the other
6694 * members of the RX descriptor are invariant. See notes above
6695 * tg3_alloc_rx_data for full details.
6696 */
6697 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6698 struct tg3_rx_prodring_set *dpr,
6699 u32 opaque_key, int src_idx,
6700 u32 dest_idx_unmasked)
6701 {
6702 struct tg3 *tp = tnapi->tp;
6703 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6704 struct ring_info *src_map, *dest_map;
6705 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6706 int dest_idx;
6707
6708 switch (opaque_key) {
6709 case RXD_OPAQUE_RING_STD:
6710 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6711 dest_desc = &dpr->rx_std[dest_idx];
6712 dest_map = &dpr->rx_std_buffers[dest_idx];
6713 src_desc = &spr->rx_std[src_idx];
6714 src_map = &spr->rx_std_buffers[src_idx];
6715 break;
6716
6717 case RXD_OPAQUE_RING_JUMBO:
6718 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6719 dest_desc = &dpr->rx_jmb[dest_idx].std;
6720 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6721 src_desc = &spr->rx_jmb[src_idx].std;
6722 src_map = &spr->rx_jmb_buffers[src_idx];
6723 break;
6724
6725 default:
6726 return;
6727 }
6728
6729 dest_map->data = src_map->data;
6730 dma_unmap_addr_set(dest_map, mapping,
6731 dma_unmap_addr(src_map, mapping));
6732 dest_desc->addr_hi = src_desc->addr_hi;
6733 dest_desc->addr_lo = src_desc->addr_lo;
6734
6735 /* Ensure that the update to the skb happens after the physical
6736 * addresses have been transferred to the new BD location.
6737 */
6738 smp_wmb();
6739
6740 src_map->data = NULL;
6741 }
6742
6743 /* The RX ring scheme is composed of multiple rings which post fresh
6744 * buffers to the chip, and one special ring the chip uses to report
6745 * status back to the host.
6746 *
6747 * The special ring reports the status of received packets to the
6748 * host. The chip does not write into the original descriptor the
6749 * RX buffer was obtained from. The chip simply takes the original
6750 * descriptor as provided by the host, updates the status and length
6751 * field, then writes this into the next status ring entry.
6752 *
6753 * Each ring the host uses to post buffers to the chip is described
6754 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6755 * it is first placed into the on-chip ram. When the packet's length
6756 * is known, it walks down the TG3_BDINFO entries to select the ring.
6757 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6758 * which is within the range of the new packet's length is chosen.
6759 *
6760 * The "separate ring for rx status" scheme may sound queer, but it makes
6761 * sense from a cache coherency perspective. If only the host writes
6762 * to the buffer post rings, and only the chip writes to the rx status
6763 * rings, then cache lines never move beyond shared-modified state.
6764 * If both the host and chip were to write into the same ring, cache line
6765 * eviction could occur since both entities want it in an exclusive state.
6766 */
6767 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6768 {
6769 struct tg3 *tp = tnapi->tp;
6770 u32 work_mask, rx_std_posted = 0;
6771 u32 std_prod_idx, jmb_prod_idx;
6772 u32 sw_idx = tnapi->rx_rcb_ptr;
6773 u16 hw_idx;
6774 int received;
6775 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6776
6777 hw_idx = *(tnapi->rx_rcb_prod_idx);
6778 /*
6779 * We need to order the read of hw_idx and the read of
6780 * the opaque cookie.
6781 */
6782 rmb();
6783 work_mask = 0;
6784 received = 0;
6785 std_prod_idx = tpr->rx_std_prod_idx;
6786 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6787 while (sw_idx != hw_idx && budget > 0) {
6788 struct ring_info *ri;
6789 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6790 unsigned int len;
6791 struct sk_buff *skb;
6792 dma_addr_t dma_addr;
6793 u32 opaque_key, desc_idx, *post_ptr;
6794 u8 *data;
6795 u64 tstamp = 0;
6796
6797 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6798 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6799 if (opaque_key == RXD_OPAQUE_RING_STD) {
6800 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6801 dma_addr = dma_unmap_addr(ri, mapping);
6802 data = ri->data;
6803 post_ptr = &std_prod_idx;
6804 rx_std_posted++;
6805 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6806 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6807 dma_addr = dma_unmap_addr(ri, mapping);
6808 data = ri->data;
6809 post_ptr = &jmb_prod_idx;
6810 } else
6811 goto next_pkt_nopost;
6812
6813 work_mask |= opaque_key;
6814
6815 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6816 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6817 drop_it:
6818 tg3_recycle_rx(tnapi, tpr, opaque_key,
6819 desc_idx, *post_ptr);
6820 drop_it_no_recycle:
6821 /* Other statistics kept track of by card. */
6822 tp->rx_dropped++;
6823 goto next_pkt;
6824 }
6825
6826 prefetch(data + TG3_RX_OFFSET(tp));
6827 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6828 ETH_FCS_LEN;
6829
6830 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6831 RXD_FLAG_PTPSTAT_PTPV1 ||
6832 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6833 RXD_FLAG_PTPSTAT_PTPV2) {
6834 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6835 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6836 }
6837
6838 if (len > TG3_RX_COPY_THRESH(tp)) {
6839 int skb_size;
6840 unsigned int frag_size;
6841
6842 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6843 *post_ptr, &frag_size);
6844 if (skb_size < 0)
6845 goto drop_it;
6846
6847 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6848 PCI_DMA_FROMDEVICE);
6849
6850 skb = build_skb(data, frag_size);
6851 if (!skb) {
6852 tg3_frag_free(frag_size != 0, data);
6853 goto drop_it_no_recycle;
6854 }
6855 skb_reserve(skb, TG3_RX_OFFSET(tp));
6856 /* Ensure that the update to the data happens
6857 * after the usage of the old DMA mapping.
6858 */
6859 smp_wmb();
6860
6861 ri->data = NULL;
6862
6863 } else {
6864 tg3_recycle_rx(tnapi, tpr, opaque_key,
6865 desc_idx, *post_ptr);
6866
6867 skb = netdev_alloc_skb(tp->dev,
6868 len + TG3_RAW_IP_ALIGN);
6869 if (skb == NULL)
6870 goto drop_it_no_recycle;
6871
6872 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6873 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6874 memcpy(skb->data,
6875 data + TG3_RX_OFFSET(tp),
6876 len);
6877 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6878 }
6879
6880 skb_put(skb, len);
6881 if (tstamp)
6882 tg3_hwclock_to_timestamp(tp, tstamp,
6883 skb_hwtstamps(skb));
6884
6885 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6886 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6887 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6888 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6889 skb->ip_summed = CHECKSUM_UNNECESSARY;
6890 else
6891 skb_checksum_none_assert(skb);
6892
6893 skb->protocol = eth_type_trans(skb, tp->dev);
6894
6895 if (len > (tp->dev->mtu + ETH_HLEN) &&
6896 skb->protocol != htons(ETH_P_8021Q)) {
6897 dev_kfree_skb(skb);
6898 goto drop_it_no_recycle;
6899 }
6900
6901 if (desc->type_flags & RXD_FLAG_VLAN &&
6902 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6903 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6904 desc->err_vlan & RXD_VLAN_MASK);
6905
6906 napi_gro_receive(&tnapi->napi, skb);
6907
6908 received++;
6909 budget--;
6910
6911 next_pkt:
6912 (*post_ptr)++;
6913
6914 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6915 tpr->rx_std_prod_idx = std_prod_idx &
6916 tp->rx_std_ring_mask;
6917 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6918 tpr->rx_std_prod_idx);
6919 work_mask &= ~RXD_OPAQUE_RING_STD;
6920 rx_std_posted = 0;
6921 }
6922 next_pkt_nopost:
6923 sw_idx++;
6924 sw_idx &= tp->rx_ret_ring_mask;
6925
6926 /* Refresh hw_idx to see if there is new work */
6927 if (sw_idx == hw_idx) {
6928 hw_idx = *(tnapi->rx_rcb_prod_idx);
6929 rmb();
6930 }
6931 }
6932
6933 /* ACK the status ring. */
6934 tnapi->rx_rcb_ptr = sw_idx;
6935 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6936
6937 /* Refill RX ring(s). */
6938 if (!tg3_flag(tp, ENABLE_RSS)) {
6939 /* Sync BD data before updating mailbox */
6940 wmb();
6941
6942 if (work_mask & RXD_OPAQUE_RING_STD) {
6943 tpr->rx_std_prod_idx = std_prod_idx &
6944 tp->rx_std_ring_mask;
6945 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6946 tpr->rx_std_prod_idx);
6947 }
6948 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6949 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6950 tp->rx_jmb_ring_mask;
6951 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6952 tpr->rx_jmb_prod_idx);
6953 }
6954 mmiowb();
6955 } else if (work_mask) {
6956 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6957 * updated before the producer indices can be updated.
6958 */
6959 smp_wmb();
6960
6961 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6962 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6963
6964 if (tnapi != &tp->napi[1]) {
6965 tp->rx_refill = true;
6966 napi_schedule(&tp->napi[1].napi);
6967 }
6968 }
6969
6970 return received;
6971 }
6972
6973 static void tg3_poll_link(struct tg3 *tp)
6974 {
6975 /* handle link change and other phy events */
6976 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6977 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6978
6979 if (sblk->status & SD_STATUS_LINK_CHG) {
6980 sblk->status = SD_STATUS_UPDATED |
6981 (sblk->status & ~SD_STATUS_LINK_CHG);
6982 spin_lock(&tp->lock);
6983 if (tg3_flag(tp, USE_PHYLIB)) {
6984 tw32_f(MAC_STATUS,
6985 (MAC_STATUS_SYNC_CHANGED |
6986 MAC_STATUS_CFG_CHANGED |
6987 MAC_STATUS_MI_COMPLETION |
6988 MAC_STATUS_LNKSTATE_CHANGED));
6989 udelay(40);
6990 } else
6991 tg3_setup_phy(tp, false);
6992 spin_unlock(&tp->lock);
6993 }
6994 }
6995 }
6996
6997 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6998 struct tg3_rx_prodring_set *dpr,
6999 struct tg3_rx_prodring_set *spr)
7000 {
7001 u32 si, di, cpycnt, src_prod_idx;
7002 int i, err = 0;
7003
7004 while (1) {
7005 src_prod_idx = spr->rx_std_prod_idx;
7006
7007 /* Make sure updates to the rx_std_buffers[] entries and the
7008 * standard producer index are seen in the correct order.
7009 */
7010 smp_rmb();
7011
7012 if (spr->rx_std_cons_idx == src_prod_idx)
7013 break;
7014
7015 if (spr->rx_std_cons_idx < src_prod_idx)
7016 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7017 else
7018 cpycnt = tp->rx_std_ring_mask + 1 -
7019 spr->rx_std_cons_idx;
7020
7021 cpycnt = min(cpycnt,
7022 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7023
7024 si = spr->rx_std_cons_idx;
7025 di = dpr->rx_std_prod_idx;
7026
7027 for (i = di; i < di + cpycnt; i++) {
7028 if (dpr->rx_std_buffers[i].data) {
7029 cpycnt = i - di;
7030 err = -ENOSPC;
7031 break;
7032 }
7033 }
7034
7035 if (!cpycnt)
7036 break;
7037
7038 /* Ensure that updates to the rx_std_buffers ring and the
7039 * shadowed hardware producer ring from tg3_recycle_skb() are
7040 * ordered correctly WRT the skb check above.
7041 */
7042 smp_rmb();
7043
7044 memcpy(&dpr->rx_std_buffers[di],
7045 &spr->rx_std_buffers[si],
7046 cpycnt * sizeof(struct ring_info));
7047
7048 for (i = 0; i < cpycnt; i++, di++, si++) {
7049 struct tg3_rx_buffer_desc *sbd, *dbd;
7050 sbd = &spr->rx_std[si];
7051 dbd = &dpr->rx_std[di];
7052 dbd->addr_hi = sbd->addr_hi;
7053 dbd->addr_lo = sbd->addr_lo;
7054 }
7055
7056 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7057 tp->rx_std_ring_mask;
7058 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7059 tp->rx_std_ring_mask;
7060 }
7061
7062 while (1) {
7063 src_prod_idx = spr->rx_jmb_prod_idx;
7064
7065 /* Make sure updates to the rx_jmb_buffers[] entries and
7066 * the jumbo producer index are seen in the correct order.
7067 */
7068 smp_rmb();
7069
7070 if (spr->rx_jmb_cons_idx == src_prod_idx)
7071 break;
7072
7073 if (spr->rx_jmb_cons_idx < src_prod_idx)
7074 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7075 else
7076 cpycnt = tp->rx_jmb_ring_mask + 1 -
7077 spr->rx_jmb_cons_idx;
7078
7079 cpycnt = min(cpycnt,
7080 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7081
7082 si = spr->rx_jmb_cons_idx;
7083 di = dpr->rx_jmb_prod_idx;
7084
7085 for (i = di; i < di + cpycnt; i++) {
7086 if (dpr->rx_jmb_buffers[i].data) {
7087 cpycnt = i - di;
7088 err = -ENOSPC;
7089 break;
7090 }
7091 }
7092
7093 if (!cpycnt)
7094 break;
7095
7096 /* Ensure that updates to the rx_jmb_buffers ring and the
7097 * shadowed hardware producer ring from tg3_recycle_skb() are
7098 * ordered correctly WRT the skb check above.
7099 */
7100 smp_rmb();
7101
7102 memcpy(&dpr->rx_jmb_buffers[di],
7103 &spr->rx_jmb_buffers[si],
7104 cpycnt * sizeof(struct ring_info));
7105
7106 for (i = 0; i < cpycnt; i++, di++, si++) {
7107 struct tg3_rx_buffer_desc *sbd, *dbd;
7108 sbd = &spr->rx_jmb[si].std;
7109 dbd = &dpr->rx_jmb[di].std;
7110 dbd->addr_hi = sbd->addr_hi;
7111 dbd->addr_lo = sbd->addr_lo;
7112 }
7113
7114 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7115 tp->rx_jmb_ring_mask;
7116 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7117 tp->rx_jmb_ring_mask;
7118 }
7119
7120 return err;
7121 }
7122
7123 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7124 {
7125 struct tg3 *tp = tnapi->tp;
7126
7127 /* run TX completion thread */
7128 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7129 tg3_tx(tnapi);
7130 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7131 return work_done;
7132 }
7133
7134 if (!tnapi->rx_rcb_prod_idx)
7135 return work_done;
7136
7137 /* run RX thread, within the bounds set by NAPI.
7138 * All RX "locking" is done by ensuring outside
7139 * code synchronizes with tg3->napi.poll()
7140 */
7141 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7142 work_done += tg3_rx(tnapi, budget - work_done);
7143
7144 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7145 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7146 int i, err = 0;
7147 u32 std_prod_idx = dpr->rx_std_prod_idx;
7148 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7149
7150 tp->rx_refill = false;
7151 for (i = 1; i <= tp->rxq_cnt; i++)
7152 err |= tg3_rx_prodring_xfer(tp, dpr,
7153 &tp->napi[i].prodring);
7154
7155 wmb();
7156
7157 if (std_prod_idx != dpr->rx_std_prod_idx)
7158 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7159 dpr->rx_std_prod_idx);
7160
7161 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7162 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7163 dpr->rx_jmb_prod_idx);
7164
7165 mmiowb();
7166
7167 if (err)
7168 tw32_f(HOSTCC_MODE, tp->coal_now);
7169 }
7170
7171 return work_done;
7172 }
7173
7174 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7175 {
7176 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7177 schedule_work(&tp->reset_task);
7178 }
7179
7180 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7181 {
7182 cancel_work_sync(&tp->reset_task);
7183 tg3_flag_clear(tp, RESET_TASK_PENDING);
7184 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7185 }
7186
7187 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7188 {
7189 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7190 struct tg3 *tp = tnapi->tp;
7191 int work_done = 0;
7192 struct tg3_hw_status *sblk = tnapi->hw_status;
7193
7194 while (1) {
7195 work_done = tg3_poll_work(tnapi, work_done, budget);
7196
7197 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7198 goto tx_recovery;
7199
7200 if (unlikely(work_done >= budget))
7201 break;
7202
7203 /* tp->last_tag is used in tg3_int_reenable() below
7204 * to tell the hw how much work has been processed,
7205 * so we must read it before checking for more work.
7206 */
7207 tnapi->last_tag = sblk->status_tag;
7208 tnapi->last_irq_tag = tnapi->last_tag;
7209 rmb();
7210
7211 /* check for RX/TX work to do */
7212 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7213 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7214
7215 /* This test here is not race free, but will reduce
7216 * the number of interrupts by looping again.
7217 */
7218 if (tnapi == &tp->napi[1] && tp->rx_refill)
7219 continue;
7220
7221 napi_complete(napi);
7222 /* Reenable interrupts. */
7223 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7224
7225 /* This test here is synchronized by napi_schedule()
7226 * and napi_complete() to close the race condition.
7227 */
7228 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7229 tw32(HOSTCC_MODE, tp->coalesce_mode |
7230 HOSTCC_MODE_ENABLE |
7231 tnapi->coal_now);
7232 }
7233 mmiowb();
7234 break;
7235 }
7236 }
7237
7238 return work_done;
7239
7240 tx_recovery:
7241 /* work_done is guaranteed to be less than budget. */
7242 napi_complete(napi);
7243 tg3_reset_task_schedule(tp);
7244 return work_done;
7245 }
7246
7247 static void tg3_process_error(struct tg3 *tp)
7248 {
7249 u32 val;
7250 bool real_error = false;
7251
7252 if (tg3_flag(tp, ERROR_PROCESSED))
7253 return;
7254
7255 /* Check Flow Attention register */
7256 val = tr32(HOSTCC_FLOW_ATTN);
7257 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7258 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7259 real_error = true;
7260 }
7261
7262 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7263 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7264 real_error = true;
7265 }
7266
7267 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7268 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7269 real_error = true;
7270 }
7271
7272 if (!real_error)
7273 return;
7274
7275 tg3_dump_state(tp);
7276
7277 tg3_flag_set(tp, ERROR_PROCESSED);
7278 tg3_reset_task_schedule(tp);
7279 }
7280
7281 static int tg3_poll(struct napi_struct *napi, int budget)
7282 {
7283 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7284 struct tg3 *tp = tnapi->tp;
7285 int work_done = 0;
7286 struct tg3_hw_status *sblk = tnapi->hw_status;
7287
7288 while (1) {
7289 if (sblk->status & SD_STATUS_ERROR)
7290 tg3_process_error(tp);
7291
7292 tg3_poll_link(tp);
7293
7294 work_done = tg3_poll_work(tnapi, work_done, budget);
7295
7296 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7297 goto tx_recovery;
7298
7299 if (unlikely(work_done >= budget))
7300 break;
7301
7302 if (tg3_flag(tp, TAGGED_STATUS)) {
7303 /* tp->last_tag is used in tg3_int_reenable() below
7304 * to tell the hw how much work has been processed,
7305 * so we must read it before checking for more work.
7306 */
7307 tnapi->last_tag = sblk->status_tag;
7308 tnapi->last_irq_tag = tnapi->last_tag;
7309 rmb();
7310 } else
7311 sblk->status &= ~SD_STATUS_UPDATED;
7312
7313 if (likely(!tg3_has_work(tnapi))) {
7314 napi_complete(napi);
7315 tg3_int_reenable(tnapi);
7316 break;
7317 }
7318 }
7319
7320 return work_done;
7321
7322 tx_recovery:
7323 /* work_done is guaranteed to be less than budget. */
7324 napi_complete(napi);
7325 tg3_reset_task_schedule(tp);
7326 return work_done;
7327 }
7328
7329 static void tg3_napi_disable(struct tg3 *tp)
7330 {
7331 int i;
7332
7333 for (i = tp->irq_cnt - 1; i >= 0; i--)
7334 napi_disable(&tp->napi[i].napi);
7335 }
7336
7337 static void tg3_napi_enable(struct tg3 *tp)
7338 {
7339 int i;
7340
7341 for (i = 0; i < tp->irq_cnt; i++)
7342 napi_enable(&tp->napi[i].napi);
7343 }
7344
7345 static void tg3_napi_init(struct tg3 *tp)
7346 {
7347 int i;
7348
7349 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7350 for (i = 1; i < tp->irq_cnt; i++)
7351 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7352 }
7353
7354 static void tg3_napi_fini(struct tg3 *tp)
7355 {
7356 int i;
7357
7358 for (i = 0; i < tp->irq_cnt; i++)
7359 netif_napi_del(&tp->napi[i].napi);
7360 }
7361
7362 static inline void tg3_netif_stop(struct tg3 *tp)
7363 {
7364 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7365 tg3_napi_disable(tp);
7366 netif_carrier_off(tp->dev);
7367 netif_tx_disable(tp->dev);
7368 }
7369
7370 /* tp->lock must be held */
7371 static inline void tg3_netif_start(struct tg3 *tp)
7372 {
7373 tg3_ptp_resume(tp);
7374
7375 /* NOTE: unconditional netif_tx_wake_all_queues is only
7376 * appropriate so long as all callers are assured to
7377 * have free tx slots (such as after tg3_init_hw)
7378 */
7379 netif_tx_wake_all_queues(tp->dev);
7380
7381 if (tp->link_up)
7382 netif_carrier_on(tp->dev);
7383
7384 tg3_napi_enable(tp);
7385 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7386 tg3_enable_ints(tp);
7387 }
7388
7389 static void tg3_irq_quiesce(struct tg3 *tp)
7390 {
7391 int i;
7392
7393 BUG_ON(tp->irq_sync);
7394
7395 tp->irq_sync = 1;
7396 smp_mb();
7397
7398 for (i = 0; i < tp->irq_cnt; i++)
7399 synchronize_irq(tp->napi[i].irq_vec);
7400 }
7401
7402 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7403 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7404 * with as well. Most of the time, this is not necessary except when
7405 * shutting down the device.
7406 */
7407 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7408 {
7409 spin_lock_bh(&tp->lock);
7410 if (irq_sync)
7411 tg3_irq_quiesce(tp);
7412 }
7413
7414 static inline void tg3_full_unlock(struct tg3 *tp)
7415 {
7416 spin_unlock_bh(&tp->lock);
7417 }
7418
7419 /* One-shot MSI handler - Chip automatically disables interrupt
7420 * after sending MSI so driver doesn't have to do it.
7421 */
7422 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7423 {
7424 struct tg3_napi *tnapi = dev_id;
7425 struct tg3 *tp = tnapi->tp;
7426
7427 prefetch(tnapi->hw_status);
7428 if (tnapi->rx_rcb)
7429 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7430
7431 if (likely(!tg3_irq_sync(tp)))
7432 napi_schedule(&tnapi->napi);
7433
7434 return IRQ_HANDLED;
7435 }
7436
7437 /* MSI ISR - No need to check for interrupt sharing and no need to
7438 * flush status block and interrupt mailbox. PCI ordering rules
7439 * guarantee that MSI will arrive after the status block.
7440 */
7441 static irqreturn_t tg3_msi(int irq, void *dev_id)
7442 {
7443 struct tg3_napi *tnapi = dev_id;
7444 struct tg3 *tp = tnapi->tp;
7445
7446 prefetch(tnapi->hw_status);
7447 if (tnapi->rx_rcb)
7448 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7449 /*
7450 * Writing any value to intr-mbox-0 clears PCI INTA# and
7451 * chip-internal interrupt pending events.
7452 * Writing non-zero to intr-mbox-0 additional tells the
7453 * NIC to stop sending us irqs, engaging "in-intr-handler"
7454 * event coalescing.
7455 */
7456 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7457 if (likely(!tg3_irq_sync(tp)))
7458 napi_schedule(&tnapi->napi);
7459
7460 return IRQ_RETVAL(1);
7461 }
7462
7463 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7464 {
7465 struct tg3_napi *tnapi = dev_id;
7466 struct tg3 *tp = tnapi->tp;
7467 struct tg3_hw_status *sblk = tnapi->hw_status;
7468 unsigned int handled = 1;
7469
7470 /* In INTx mode, it is possible for the interrupt to arrive at
7471 * the CPU before the status block posted prior to the interrupt.
7472 * Reading the PCI State register will confirm whether the
7473 * interrupt is ours and will flush the status block.
7474 */
7475 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7476 if (tg3_flag(tp, CHIP_RESETTING) ||
7477 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7478 handled = 0;
7479 goto out;
7480 }
7481 }
7482
7483 /*
7484 * Writing any value to intr-mbox-0 clears PCI INTA# and
7485 * chip-internal interrupt pending events.
7486 * Writing non-zero to intr-mbox-0 additional tells the
7487 * NIC to stop sending us irqs, engaging "in-intr-handler"
7488 * event coalescing.
7489 *
7490 * Flush the mailbox to de-assert the IRQ immediately to prevent
7491 * spurious interrupts. The flush impacts performance but
7492 * excessive spurious interrupts can be worse in some cases.
7493 */
7494 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7495 if (tg3_irq_sync(tp))
7496 goto out;
7497 sblk->status &= ~SD_STATUS_UPDATED;
7498 if (likely(tg3_has_work(tnapi))) {
7499 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7500 napi_schedule(&tnapi->napi);
7501 } else {
7502 /* No work, shared interrupt perhaps? re-enable
7503 * interrupts, and flush that PCI write
7504 */
7505 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7506 0x00000000);
7507 }
7508 out:
7509 return IRQ_RETVAL(handled);
7510 }
7511
7512 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7513 {
7514 struct tg3_napi *tnapi = dev_id;
7515 struct tg3 *tp = tnapi->tp;
7516 struct tg3_hw_status *sblk = tnapi->hw_status;
7517 unsigned int handled = 1;
7518
7519 /* In INTx mode, it is possible for the interrupt to arrive at
7520 * the CPU before the status block posted prior to the interrupt.
7521 * Reading the PCI State register will confirm whether the
7522 * interrupt is ours and will flush the status block.
7523 */
7524 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7525 if (tg3_flag(tp, CHIP_RESETTING) ||
7526 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7527 handled = 0;
7528 goto out;
7529 }
7530 }
7531
7532 /*
7533 * writing any value to intr-mbox-0 clears PCI INTA# and
7534 * chip-internal interrupt pending events.
7535 * writing non-zero to intr-mbox-0 additional tells the
7536 * NIC to stop sending us irqs, engaging "in-intr-handler"
7537 * event coalescing.
7538 *
7539 * Flush the mailbox to de-assert the IRQ immediately to prevent
7540 * spurious interrupts. The flush impacts performance but
7541 * excessive spurious interrupts can be worse in some cases.
7542 */
7543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7544
7545 /*
7546 * In a shared interrupt configuration, sometimes other devices'
7547 * interrupts will scream. We record the current status tag here
7548 * so that the above check can report that the screaming interrupts
7549 * are unhandled. Eventually they will be silenced.
7550 */
7551 tnapi->last_irq_tag = sblk->status_tag;
7552
7553 if (tg3_irq_sync(tp))
7554 goto out;
7555
7556 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7557
7558 napi_schedule(&tnapi->napi);
7559
7560 out:
7561 return IRQ_RETVAL(handled);
7562 }
7563
7564 /* ISR for interrupt test */
7565 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7566 {
7567 struct tg3_napi *tnapi = dev_id;
7568 struct tg3 *tp = tnapi->tp;
7569 struct tg3_hw_status *sblk = tnapi->hw_status;
7570
7571 if ((sblk->status & SD_STATUS_UPDATED) ||
7572 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7573 tg3_disable_ints(tp);
7574 return IRQ_RETVAL(1);
7575 }
7576 return IRQ_RETVAL(0);
7577 }
7578
7579 #ifdef CONFIG_NET_POLL_CONTROLLER
7580 static void tg3_poll_controller(struct net_device *dev)
7581 {
7582 int i;
7583 struct tg3 *tp = netdev_priv(dev);
7584
7585 if (tg3_irq_sync(tp))
7586 return;
7587
7588 for (i = 0; i < tp->irq_cnt; i++)
7589 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7590 }
7591 #endif
7592
7593 static void tg3_tx_timeout(struct net_device *dev)
7594 {
7595 struct tg3 *tp = netdev_priv(dev);
7596
7597 if (netif_msg_tx_err(tp)) {
7598 netdev_err(dev, "transmit timed out, resetting\n");
7599 tg3_dump_state(tp);
7600 }
7601
7602 tg3_reset_task_schedule(tp);
7603 }
7604
7605 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7606 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7607 {
7608 u32 base = (u32) mapping & 0xffffffff;
7609
7610 return (base > 0xffffdcc0) && (base + len + 8 < base);
7611 }
7612
7613 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7614 * of any 4GB boundaries: 4G, 8G, etc
7615 */
7616 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7617 u32 len, u32 mss)
7618 {
7619 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7620 u32 base = (u32) mapping & 0xffffffff;
7621
7622 return ((base + len + (mss & 0x3fff)) < base);
7623 }
7624 return 0;
7625 }
7626
7627 /* Test for DMA addresses > 40-bit */
7628 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7629 int len)
7630 {
7631 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7632 if (tg3_flag(tp, 40BIT_DMA_BUG))
7633 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7634 return 0;
7635 #else
7636 return 0;
7637 #endif
7638 }
7639
7640 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7641 dma_addr_t mapping, u32 len, u32 flags,
7642 u32 mss, u32 vlan)
7643 {
7644 txbd->addr_hi = ((u64) mapping >> 32);
7645 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7646 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7647 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7648 }
7649
7650 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7651 dma_addr_t map, u32 len, u32 flags,
7652 u32 mss, u32 vlan)
7653 {
7654 struct tg3 *tp = tnapi->tp;
7655 bool hwbug = false;
7656
7657 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7658 hwbug = true;
7659
7660 if (tg3_4g_overflow_test(map, len))
7661 hwbug = true;
7662
7663 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7664 hwbug = true;
7665
7666 if (tg3_40bit_overflow_test(tp, map, len))
7667 hwbug = true;
7668
7669 if (tp->dma_limit) {
7670 u32 prvidx = *entry;
7671 u32 tmp_flag = flags & ~TXD_FLAG_END;
7672 while (len > tp->dma_limit && *budget) {
7673 u32 frag_len = tp->dma_limit;
7674 len -= tp->dma_limit;
7675
7676 /* Avoid the 8byte DMA problem */
7677 if (len <= 8) {
7678 len += tp->dma_limit / 2;
7679 frag_len = tp->dma_limit / 2;
7680 }
7681
7682 tnapi->tx_buffers[*entry].fragmented = true;
7683
7684 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7685 frag_len, tmp_flag, mss, vlan);
7686 *budget -= 1;
7687 prvidx = *entry;
7688 *entry = NEXT_TX(*entry);
7689
7690 map += frag_len;
7691 }
7692
7693 if (len) {
7694 if (*budget) {
7695 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7696 len, flags, mss, vlan);
7697 *budget -= 1;
7698 *entry = NEXT_TX(*entry);
7699 } else {
7700 hwbug = true;
7701 tnapi->tx_buffers[prvidx].fragmented = false;
7702 }
7703 }
7704 } else {
7705 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7706 len, flags, mss, vlan);
7707 *entry = NEXT_TX(*entry);
7708 }
7709
7710 return hwbug;
7711 }
7712
7713 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7714 {
7715 int i;
7716 struct sk_buff *skb;
7717 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7718
7719 skb = txb->skb;
7720 txb->skb = NULL;
7721
7722 pci_unmap_single(tnapi->tp->pdev,
7723 dma_unmap_addr(txb, mapping),
7724 skb_headlen(skb),
7725 PCI_DMA_TODEVICE);
7726
7727 while (txb->fragmented) {
7728 txb->fragmented = false;
7729 entry = NEXT_TX(entry);
7730 txb = &tnapi->tx_buffers[entry];
7731 }
7732
7733 for (i = 0; i <= last; i++) {
7734 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7735
7736 entry = NEXT_TX(entry);
7737 txb = &tnapi->tx_buffers[entry];
7738
7739 pci_unmap_page(tnapi->tp->pdev,
7740 dma_unmap_addr(txb, mapping),
7741 skb_frag_size(frag), PCI_DMA_TODEVICE);
7742
7743 while (txb->fragmented) {
7744 txb->fragmented = false;
7745 entry = NEXT_TX(entry);
7746 txb = &tnapi->tx_buffers[entry];
7747 }
7748 }
7749 }
7750
7751 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7752 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7753 struct sk_buff **pskb,
7754 u32 *entry, u32 *budget,
7755 u32 base_flags, u32 mss, u32 vlan)
7756 {
7757 struct tg3 *tp = tnapi->tp;
7758 struct sk_buff *new_skb, *skb = *pskb;
7759 dma_addr_t new_addr = 0;
7760 int ret = 0;
7761
7762 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7763 new_skb = skb_copy(skb, GFP_ATOMIC);
7764 else {
7765 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7766
7767 new_skb = skb_copy_expand(skb,
7768 skb_headroom(skb) + more_headroom,
7769 skb_tailroom(skb), GFP_ATOMIC);
7770 }
7771
7772 if (!new_skb) {
7773 ret = -1;
7774 } else {
7775 /* New SKB is guaranteed to be linear. */
7776 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7777 PCI_DMA_TODEVICE);
7778 /* Make sure the mapping succeeded */
7779 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7780 dev_kfree_skb(new_skb);
7781 ret = -1;
7782 } else {
7783 u32 save_entry = *entry;
7784
7785 base_flags |= TXD_FLAG_END;
7786
7787 tnapi->tx_buffers[*entry].skb = new_skb;
7788 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7789 mapping, new_addr);
7790
7791 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7792 new_skb->len, base_flags,
7793 mss, vlan)) {
7794 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7795 dev_kfree_skb(new_skb);
7796 ret = -1;
7797 }
7798 }
7799 }
7800
7801 dev_kfree_skb(skb);
7802 *pskb = new_skb;
7803 return ret;
7804 }
7805
7806 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7807
7808 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7809 * TSO header is greater than 80 bytes.
7810 */
7811 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7812 {
7813 struct sk_buff *segs, *nskb;
7814 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7815
7816 /* Estimate the number of fragments in the worst case */
7817 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7818 netif_stop_queue(tp->dev);
7819
7820 /* netif_tx_stop_queue() must be done before checking
7821 * checking tx index in tg3_tx_avail() below, because in
7822 * tg3_tx(), we update tx index before checking for
7823 * netif_tx_queue_stopped().
7824 */
7825 smp_mb();
7826 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7827 return NETDEV_TX_BUSY;
7828
7829 netif_wake_queue(tp->dev);
7830 }
7831
7832 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7833 if (IS_ERR(segs))
7834 goto tg3_tso_bug_end;
7835
7836 do {
7837 nskb = segs;
7838 segs = segs->next;
7839 nskb->next = NULL;
7840 tg3_start_xmit(nskb, tp->dev);
7841 } while (segs);
7842
7843 tg3_tso_bug_end:
7844 dev_kfree_skb(skb);
7845
7846 return NETDEV_TX_OK;
7847 }
7848
7849 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7850 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7851 */
7852 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7853 {
7854 struct tg3 *tp = netdev_priv(dev);
7855 u32 len, entry, base_flags, mss, vlan = 0;
7856 u32 budget;
7857 int i = -1, would_hit_hwbug;
7858 dma_addr_t mapping;
7859 struct tg3_napi *tnapi;
7860 struct netdev_queue *txq;
7861 unsigned int last;
7862
7863 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7864 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7865 if (tg3_flag(tp, ENABLE_TSS))
7866 tnapi++;
7867
7868 budget = tg3_tx_avail(tnapi);
7869
7870 /* We are running in BH disabled context with netif_tx_lock
7871 * and TX reclaim runs via tp->napi.poll inside of a software
7872 * interrupt. Furthermore, IRQ processing runs lockless so we have
7873 * no IRQ context deadlocks to worry about either. Rejoice!
7874 */
7875 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7876 if (!netif_tx_queue_stopped(txq)) {
7877 netif_tx_stop_queue(txq);
7878
7879 /* This is a hard error, log it. */
7880 netdev_err(dev,
7881 "BUG! Tx Ring full when queue awake!\n");
7882 }
7883 return NETDEV_TX_BUSY;
7884 }
7885
7886 entry = tnapi->tx_prod;
7887 base_flags = 0;
7888 if (skb->ip_summed == CHECKSUM_PARTIAL)
7889 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7890
7891 mss = skb_shinfo(skb)->gso_size;
7892 if (mss) {
7893 struct iphdr *iph;
7894 u32 tcp_opt_len, hdr_len;
7895
7896 if (skb_header_cloned(skb) &&
7897 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7898 goto drop;
7899
7900 iph = ip_hdr(skb);
7901 tcp_opt_len = tcp_optlen(skb);
7902
7903 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7904
7905 if (!skb_is_gso_v6(skb)) {
7906 iph->check = 0;
7907 iph->tot_len = htons(mss + hdr_len);
7908 }
7909
7910 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7911 tg3_flag(tp, TSO_BUG))
7912 return tg3_tso_bug(tp, skb);
7913
7914 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7915 TXD_FLAG_CPU_POST_DMA);
7916
7917 if (tg3_flag(tp, HW_TSO_1) ||
7918 tg3_flag(tp, HW_TSO_2) ||
7919 tg3_flag(tp, HW_TSO_3)) {
7920 tcp_hdr(skb)->check = 0;
7921 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7922 } else
7923 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7924 iph->daddr, 0,
7925 IPPROTO_TCP,
7926 0);
7927
7928 if (tg3_flag(tp, HW_TSO_3)) {
7929 mss |= (hdr_len & 0xc) << 12;
7930 if (hdr_len & 0x10)
7931 base_flags |= 0x00000010;
7932 base_flags |= (hdr_len & 0x3e0) << 5;
7933 } else if (tg3_flag(tp, HW_TSO_2))
7934 mss |= hdr_len << 9;
7935 else if (tg3_flag(tp, HW_TSO_1) ||
7936 tg3_asic_rev(tp) == ASIC_REV_5705) {
7937 if (tcp_opt_len || iph->ihl > 5) {
7938 int tsflags;
7939
7940 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7941 mss |= (tsflags << 11);
7942 }
7943 } else {
7944 if (tcp_opt_len || iph->ihl > 5) {
7945 int tsflags;
7946
7947 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7948 base_flags |= tsflags << 12;
7949 }
7950 }
7951 }
7952
7953 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7954 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7955 base_flags |= TXD_FLAG_JMB_PKT;
7956
7957 if (vlan_tx_tag_present(skb)) {
7958 base_flags |= TXD_FLAG_VLAN;
7959 vlan = vlan_tx_tag_get(skb);
7960 }
7961
7962 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7963 tg3_flag(tp, TX_TSTAMP_EN)) {
7964 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7965 base_flags |= TXD_FLAG_HWTSTAMP;
7966 }
7967
7968 len = skb_headlen(skb);
7969
7970 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7971 if (pci_dma_mapping_error(tp->pdev, mapping))
7972 goto drop;
7973
7974
7975 tnapi->tx_buffers[entry].skb = skb;
7976 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7977
7978 would_hit_hwbug = 0;
7979
7980 if (tg3_flag(tp, 5701_DMA_BUG))
7981 would_hit_hwbug = 1;
7982
7983 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7984 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7985 mss, vlan)) {
7986 would_hit_hwbug = 1;
7987 } else if (skb_shinfo(skb)->nr_frags > 0) {
7988 u32 tmp_mss = mss;
7989
7990 if (!tg3_flag(tp, HW_TSO_1) &&
7991 !tg3_flag(tp, HW_TSO_2) &&
7992 !tg3_flag(tp, HW_TSO_3))
7993 tmp_mss = 0;
7994
7995 /* Now loop through additional data
7996 * fragments, and queue them.
7997 */
7998 last = skb_shinfo(skb)->nr_frags - 1;
7999 for (i = 0; i <= last; i++) {
8000 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8001
8002 len = skb_frag_size(frag);
8003 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8004 len, DMA_TO_DEVICE);
8005
8006 tnapi->tx_buffers[entry].skb = NULL;
8007 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8008 mapping);
8009 if (dma_mapping_error(&tp->pdev->dev, mapping))
8010 goto dma_error;
8011
8012 if (!budget ||
8013 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8014 len, base_flags |
8015 ((i == last) ? TXD_FLAG_END : 0),
8016 tmp_mss, vlan)) {
8017 would_hit_hwbug = 1;
8018 break;
8019 }
8020 }
8021 }
8022
8023 if (would_hit_hwbug) {
8024 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8025
8026 /* If the workaround fails due to memory/mapping
8027 * failure, silently drop this packet.
8028 */
8029 entry = tnapi->tx_prod;
8030 budget = tg3_tx_avail(tnapi);
8031 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8032 base_flags, mss, vlan))
8033 goto drop_nofree;
8034 }
8035
8036 skb_tx_timestamp(skb);
8037 netdev_tx_sent_queue(txq, skb->len);
8038
8039 /* Sync BD data before updating mailbox */
8040 wmb();
8041
8042 /* Packets are ready, update Tx producer idx local and on card. */
8043 tw32_tx_mbox(tnapi->prodmbox, entry);
8044
8045 tnapi->tx_prod = entry;
8046 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8047 netif_tx_stop_queue(txq);
8048
8049 /* netif_tx_stop_queue() must be done before checking
8050 * checking tx index in tg3_tx_avail() below, because in
8051 * tg3_tx(), we update tx index before checking for
8052 * netif_tx_queue_stopped().
8053 */
8054 smp_mb();
8055 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8056 netif_tx_wake_queue(txq);
8057 }
8058
8059 mmiowb();
8060 return NETDEV_TX_OK;
8061
8062 dma_error:
8063 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8064 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8065 drop:
8066 dev_kfree_skb(skb);
8067 drop_nofree:
8068 tp->tx_dropped++;
8069 return NETDEV_TX_OK;
8070 }
8071
8072 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8073 {
8074 if (enable) {
8075 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8076 MAC_MODE_PORT_MODE_MASK);
8077
8078 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8079
8080 if (!tg3_flag(tp, 5705_PLUS))
8081 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8082
8083 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8084 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8085 else
8086 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8087 } else {
8088 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8089
8090 if (tg3_flag(tp, 5705_PLUS) ||
8091 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8092 tg3_asic_rev(tp) == ASIC_REV_5700)
8093 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8094 }
8095
8096 tw32(MAC_MODE, tp->mac_mode);
8097 udelay(40);
8098 }
8099
8100 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8101 {
8102 u32 val, bmcr, mac_mode, ptest = 0;
8103
8104 tg3_phy_toggle_apd(tp, false);
8105 tg3_phy_toggle_automdix(tp, false);
8106
8107 if (extlpbk && tg3_phy_set_extloopbk(tp))
8108 return -EIO;
8109
8110 bmcr = BMCR_FULLDPLX;
8111 switch (speed) {
8112 case SPEED_10:
8113 break;
8114 case SPEED_100:
8115 bmcr |= BMCR_SPEED100;
8116 break;
8117 case SPEED_1000:
8118 default:
8119 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8120 speed = SPEED_100;
8121 bmcr |= BMCR_SPEED100;
8122 } else {
8123 speed = SPEED_1000;
8124 bmcr |= BMCR_SPEED1000;
8125 }
8126 }
8127
8128 if (extlpbk) {
8129 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8130 tg3_readphy(tp, MII_CTRL1000, &val);
8131 val |= CTL1000_AS_MASTER |
8132 CTL1000_ENABLE_MASTER;
8133 tg3_writephy(tp, MII_CTRL1000, val);
8134 } else {
8135 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8136 MII_TG3_FET_PTEST_TRIM_2;
8137 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8138 }
8139 } else
8140 bmcr |= BMCR_LOOPBACK;
8141
8142 tg3_writephy(tp, MII_BMCR, bmcr);
8143
8144 /* The write needs to be flushed for the FETs */
8145 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8146 tg3_readphy(tp, MII_BMCR, &bmcr);
8147
8148 udelay(40);
8149
8150 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8151 tg3_asic_rev(tp) == ASIC_REV_5785) {
8152 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8153 MII_TG3_FET_PTEST_FRC_TX_LINK |
8154 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8155
8156 /* The write needs to be flushed for the AC131 */
8157 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8158 }
8159
8160 /* Reset to prevent losing 1st rx packet intermittently */
8161 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8162 tg3_flag(tp, 5780_CLASS)) {
8163 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8164 udelay(10);
8165 tw32_f(MAC_RX_MODE, tp->rx_mode);
8166 }
8167
8168 mac_mode = tp->mac_mode &
8169 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8170 if (speed == SPEED_1000)
8171 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8172 else
8173 mac_mode |= MAC_MODE_PORT_MODE_MII;
8174
8175 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8176 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8177
8178 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8179 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8180 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8181 mac_mode |= MAC_MODE_LINK_POLARITY;
8182
8183 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8184 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8185 }
8186
8187 tw32(MAC_MODE, mac_mode);
8188 udelay(40);
8189
8190 return 0;
8191 }
8192
8193 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8194 {
8195 struct tg3 *tp = netdev_priv(dev);
8196
8197 if (features & NETIF_F_LOOPBACK) {
8198 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8199 return;
8200
8201 spin_lock_bh(&tp->lock);
8202 tg3_mac_loopback(tp, true);
8203 netif_carrier_on(tp->dev);
8204 spin_unlock_bh(&tp->lock);
8205 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8206 } else {
8207 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8208 return;
8209
8210 spin_lock_bh(&tp->lock);
8211 tg3_mac_loopback(tp, false);
8212 /* Force link status check */
8213 tg3_setup_phy(tp, true);
8214 spin_unlock_bh(&tp->lock);
8215 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8216 }
8217 }
8218
8219 static netdev_features_t tg3_fix_features(struct net_device *dev,
8220 netdev_features_t features)
8221 {
8222 struct tg3 *tp = netdev_priv(dev);
8223
8224 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8225 features &= ~NETIF_F_ALL_TSO;
8226
8227 return features;
8228 }
8229
8230 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8231 {
8232 netdev_features_t changed = dev->features ^ features;
8233
8234 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8235 tg3_set_loopback(dev, features);
8236
8237 return 0;
8238 }
8239
8240 static void tg3_rx_prodring_free(struct tg3 *tp,
8241 struct tg3_rx_prodring_set *tpr)
8242 {
8243 int i;
8244
8245 if (tpr != &tp->napi[0].prodring) {
8246 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8247 i = (i + 1) & tp->rx_std_ring_mask)
8248 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8249 tp->rx_pkt_map_sz);
8250
8251 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8252 for (i = tpr->rx_jmb_cons_idx;
8253 i != tpr->rx_jmb_prod_idx;
8254 i = (i + 1) & tp->rx_jmb_ring_mask) {
8255 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8256 TG3_RX_JMB_MAP_SZ);
8257 }
8258 }
8259
8260 return;
8261 }
8262
8263 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8264 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8265 tp->rx_pkt_map_sz);
8266
8267 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8268 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8269 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8270 TG3_RX_JMB_MAP_SZ);
8271 }
8272 }
8273
8274 /* Initialize rx rings for packet processing.
8275 *
8276 * The chip has been shut down and the driver detached from
8277 * the networking, so no interrupts or new tx packets will
8278 * end up in the driver. tp->{tx,}lock are held and thus
8279 * we may not sleep.
8280 */
8281 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8282 struct tg3_rx_prodring_set *tpr)
8283 {
8284 u32 i, rx_pkt_dma_sz;
8285
8286 tpr->rx_std_cons_idx = 0;
8287 tpr->rx_std_prod_idx = 0;
8288 tpr->rx_jmb_cons_idx = 0;
8289 tpr->rx_jmb_prod_idx = 0;
8290
8291 if (tpr != &tp->napi[0].prodring) {
8292 memset(&tpr->rx_std_buffers[0], 0,
8293 TG3_RX_STD_BUFF_RING_SIZE(tp));
8294 if (tpr->rx_jmb_buffers)
8295 memset(&tpr->rx_jmb_buffers[0], 0,
8296 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8297 goto done;
8298 }
8299
8300 /* Zero out all descriptors. */
8301 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8302
8303 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8304 if (tg3_flag(tp, 5780_CLASS) &&
8305 tp->dev->mtu > ETH_DATA_LEN)
8306 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8307 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8308
8309 /* Initialize invariants of the rings, we only set this
8310 * stuff once. This works because the card does not
8311 * write into the rx buffer posting rings.
8312 */
8313 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8314 struct tg3_rx_buffer_desc *rxd;
8315
8316 rxd = &tpr->rx_std[i];
8317 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8318 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8319 rxd->opaque = (RXD_OPAQUE_RING_STD |
8320 (i << RXD_OPAQUE_INDEX_SHIFT));
8321 }
8322
8323 /* Now allocate fresh SKBs for each rx ring. */
8324 for (i = 0; i < tp->rx_pending; i++) {
8325 unsigned int frag_size;
8326
8327 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8328 &frag_size) < 0) {
8329 netdev_warn(tp->dev,
8330 "Using a smaller RX standard ring. Only "
8331 "%d out of %d buffers were allocated "
8332 "successfully\n", i, tp->rx_pending);
8333 if (i == 0)
8334 goto initfail;
8335 tp->rx_pending = i;
8336 break;
8337 }
8338 }
8339
8340 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8341 goto done;
8342
8343 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8344
8345 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8346 goto done;
8347
8348 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8349 struct tg3_rx_buffer_desc *rxd;
8350
8351 rxd = &tpr->rx_jmb[i].std;
8352 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8353 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8354 RXD_FLAG_JUMBO;
8355 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8356 (i << RXD_OPAQUE_INDEX_SHIFT));
8357 }
8358
8359 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8360 unsigned int frag_size;
8361
8362 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8363 &frag_size) < 0) {
8364 netdev_warn(tp->dev,
8365 "Using a smaller RX jumbo ring. Only %d "
8366 "out of %d buffers were allocated "
8367 "successfully\n", i, tp->rx_jumbo_pending);
8368 if (i == 0)
8369 goto initfail;
8370 tp->rx_jumbo_pending = i;
8371 break;
8372 }
8373 }
8374
8375 done:
8376 return 0;
8377
8378 initfail:
8379 tg3_rx_prodring_free(tp, tpr);
8380 return -ENOMEM;
8381 }
8382
8383 static void tg3_rx_prodring_fini(struct tg3 *tp,
8384 struct tg3_rx_prodring_set *tpr)
8385 {
8386 kfree(tpr->rx_std_buffers);
8387 tpr->rx_std_buffers = NULL;
8388 kfree(tpr->rx_jmb_buffers);
8389 tpr->rx_jmb_buffers = NULL;
8390 if (tpr->rx_std) {
8391 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8392 tpr->rx_std, tpr->rx_std_mapping);
8393 tpr->rx_std = NULL;
8394 }
8395 if (tpr->rx_jmb) {
8396 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8397 tpr->rx_jmb, tpr->rx_jmb_mapping);
8398 tpr->rx_jmb = NULL;
8399 }
8400 }
8401
8402 static int tg3_rx_prodring_init(struct tg3 *tp,
8403 struct tg3_rx_prodring_set *tpr)
8404 {
8405 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8406 GFP_KERNEL);
8407 if (!tpr->rx_std_buffers)
8408 return -ENOMEM;
8409
8410 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8411 TG3_RX_STD_RING_BYTES(tp),
8412 &tpr->rx_std_mapping,
8413 GFP_KERNEL);
8414 if (!tpr->rx_std)
8415 goto err_out;
8416
8417 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8418 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8419 GFP_KERNEL);
8420 if (!tpr->rx_jmb_buffers)
8421 goto err_out;
8422
8423 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8424 TG3_RX_JMB_RING_BYTES(tp),
8425 &tpr->rx_jmb_mapping,
8426 GFP_KERNEL);
8427 if (!tpr->rx_jmb)
8428 goto err_out;
8429 }
8430
8431 return 0;
8432
8433 err_out:
8434 tg3_rx_prodring_fini(tp, tpr);
8435 return -ENOMEM;
8436 }
8437
8438 /* Free up pending packets in all rx/tx rings.
8439 *
8440 * The chip has been shut down and the driver detached from
8441 * the networking, so no interrupts or new tx packets will
8442 * end up in the driver. tp->{tx,}lock is not held and we are not
8443 * in an interrupt context and thus may sleep.
8444 */
8445 static void tg3_free_rings(struct tg3 *tp)
8446 {
8447 int i, j;
8448
8449 for (j = 0; j < tp->irq_cnt; j++) {
8450 struct tg3_napi *tnapi = &tp->napi[j];
8451
8452 tg3_rx_prodring_free(tp, &tnapi->prodring);
8453
8454 if (!tnapi->tx_buffers)
8455 continue;
8456
8457 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8458 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8459
8460 if (!skb)
8461 continue;
8462
8463 tg3_tx_skb_unmap(tnapi, i,
8464 skb_shinfo(skb)->nr_frags - 1);
8465
8466 dev_kfree_skb_any(skb);
8467 }
8468 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8469 }
8470 }
8471
8472 /* Initialize tx/rx rings for packet processing.
8473 *
8474 * The chip has been shut down and the driver detached from
8475 * the networking, so no interrupts or new tx packets will
8476 * end up in the driver. tp->{tx,}lock are held and thus
8477 * we may not sleep.
8478 */
8479 static int tg3_init_rings(struct tg3 *tp)
8480 {
8481 int i;
8482
8483 /* Free up all the SKBs. */
8484 tg3_free_rings(tp);
8485
8486 for (i = 0; i < tp->irq_cnt; i++) {
8487 struct tg3_napi *tnapi = &tp->napi[i];
8488
8489 tnapi->last_tag = 0;
8490 tnapi->last_irq_tag = 0;
8491 tnapi->hw_status->status = 0;
8492 tnapi->hw_status->status_tag = 0;
8493 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8494
8495 tnapi->tx_prod = 0;
8496 tnapi->tx_cons = 0;
8497 if (tnapi->tx_ring)
8498 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8499
8500 tnapi->rx_rcb_ptr = 0;
8501 if (tnapi->rx_rcb)
8502 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8503
8504 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8505 tg3_free_rings(tp);
8506 return -ENOMEM;
8507 }
8508 }
8509
8510 return 0;
8511 }
8512
8513 static void tg3_mem_tx_release(struct tg3 *tp)
8514 {
8515 int i;
8516
8517 for (i = 0; i < tp->irq_max; i++) {
8518 struct tg3_napi *tnapi = &tp->napi[i];
8519
8520 if (tnapi->tx_ring) {
8521 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8522 tnapi->tx_ring, tnapi->tx_desc_mapping);
8523 tnapi->tx_ring = NULL;
8524 }
8525
8526 kfree(tnapi->tx_buffers);
8527 tnapi->tx_buffers = NULL;
8528 }
8529 }
8530
8531 static int tg3_mem_tx_acquire(struct tg3 *tp)
8532 {
8533 int i;
8534 struct tg3_napi *tnapi = &tp->napi[0];
8535
8536 /* If multivector TSS is enabled, vector 0 does not handle
8537 * tx interrupts. Don't allocate any resources for it.
8538 */
8539 if (tg3_flag(tp, ENABLE_TSS))
8540 tnapi++;
8541
8542 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8543 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8544 TG3_TX_RING_SIZE, GFP_KERNEL);
8545 if (!tnapi->tx_buffers)
8546 goto err_out;
8547
8548 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8549 TG3_TX_RING_BYTES,
8550 &tnapi->tx_desc_mapping,
8551 GFP_KERNEL);
8552 if (!tnapi->tx_ring)
8553 goto err_out;
8554 }
8555
8556 return 0;
8557
8558 err_out:
8559 tg3_mem_tx_release(tp);
8560 return -ENOMEM;
8561 }
8562
8563 static void tg3_mem_rx_release(struct tg3 *tp)
8564 {
8565 int i;
8566
8567 for (i = 0; i < tp->irq_max; i++) {
8568 struct tg3_napi *tnapi = &tp->napi[i];
8569
8570 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8571
8572 if (!tnapi->rx_rcb)
8573 continue;
8574
8575 dma_free_coherent(&tp->pdev->dev,
8576 TG3_RX_RCB_RING_BYTES(tp),
8577 tnapi->rx_rcb,
8578 tnapi->rx_rcb_mapping);
8579 tnapi->rx_rcb = NULL;
8580 }
8581 }
8582
8583 static int tg3_mem_rx_acquire(struct tg3 *tp)
8584 {
8585 unsigned int i, limit;
8586
8587 limit = tp->rxq_cnt;
8588
8589 /* If RSS is enabled, we need a (dummy) producer ring
8590 * set on vector zero. This is the true hw prodring.
8591 */
8592 if (tg3_flag(tp, ENABLE_RSS))
8593 limit++;
8594
8595 for (i = 0; i < limit; i++) {
8596 struct tg3_napi *tnapi = &tp->napi[i];
8597
8598 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8599 goto err_out;
8600
8601 /* If multivector RSS is enabled, vector 0
8602 * does not handle rx or tx interrupts.
8603 * Don't allocate any resources for it.
8604 */
8605 if (!i && tg3_flag(tp, ENABLE_RSS))
8606 continue;
8607
8608 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8609 TG3_RX_RCB_RING_BYTES(tp),
8610 &tnapi->rx_rcb_mapping,
8611 GFP_KERNEL);
8612 if (!tnapi->rx_rcb)
8613 goto err_out;
8614 }
8615
8616 return 0;
8617
8618 err_out:
8619 tg3_mem_rx_release(tp);
8620 return -ENOMEM;
8621 }
8622
8623 /*
8624 * Must not be invoked with interrupt sources disabled and
8625 * the hardware shutdown down.
8626 */
8627 static void tg3_free_consistent(struct tg3 *tp)
8628 {
8629 int i;
8630
8631 for (i = 0; i < tp->irq_cnt; i++) {
8632 struct tg3_napi *tnapi = &tp->napi[i];
8633
8634 if (tnapi->hw_status) {
8635 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8636 tnapi->hw_status,
8637 tnapi->status_mapping);
8638 tnapi->hw_status = NULL;
8639 }
8640 }
8641
8642 tg3_mem_rx_release(tp);
8643 tg3_mem_tx_release(tp);
8644
8645 if (tp->hw_stats) {
8646 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8647 tp->hw_stats, tp->stats_mapping);
8648 tp->hw_stats = NULL;
8649 }
8650 }
8651
8652 /*
8653 * Must not be invoked with interrupt sources disabled and
8654 * the hardware shutdown down. Can sleep.
8655 */
8656 static int tg3_alloc_consistent(struct tg3 *tp)
8657 {
8658 int i;
8659
8660 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8661 sizeof(struct tg3_hw_stats),
8662 &tp->stats_mapping, GFP_KERNEL);
8663 if (!tp->hw_stats)
8664 goto err_out;
8665
8666 for (i = 0; i < tp->irq_cnt; i++) {
8667 struct tg3_napi *tnapi = &tp->napi[i];
8668 struct tg3_hw_status *sblk;
8669
8670 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8671 TG3_HW_STATUS_SIZE,
8672 &tnapi->status_mapping,
8673 GFP_KERNEL);
8674 if (!tnapi->hw_status)
8675 goto err_out;
8676
8677 sblk = tnapi->hw_status;
8678
8679 if (tg3_flag(tp, ENABLE_RSS)) {
8680 u16 *prodptr = NULL;
8681
8682 /*
8683 * When RSS is enabled, the status block format changes
8684 * slightly. The "rx_jumbo_consumer", "reserved",
8685 * and "rx_mini_consumer" members get mapped to the
8686 * other three rx return ring producer indexes.
8687 */
8688 switch (i) {
8689 case 1:
8690 prodptr = &sblk->idx[0].rx_producer;
8691 break;
8692 case 2:
8693 prodptr = &sblk->rx_jumbo_consumer;
8694 break;
8695 case 3:
8696 prodptr = &sblk->reserved;
8697 break;
8698 case 4:
8699 prodptr = &sblk->rx_mini_consumer;
8700 break;
8701 }
8702 tnapi->rx_rcb_prod_idx = prodptr;
8703 } else {
8704 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8705 }
8706 }
8707
8708 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8709 goto err_out;
8710
8711 return 0;
8712
8713 err_out:
8714 tg3_free_consistent(tp);
8715 return -ENOMEM;
8716 }
8717
8718 #define MAX_WAIT_CNT 1000
8719
8720 /* To stop a block, clear the enable bit and poll till it
8721 * clears. tp->lock is held.
8722 */
8723 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8724 {
8725 unsigned int i;
8726 u32 val;
8727
8728 if (tg3_flag(tp, 5705_PLUS)) {
8729 switch (ofs) {
8730 case RCVLSC_MODE:
8731 case DMAC_MODE:
8732 case MBFREE_MODE:
8733 case BUFMGR_MODE:
8734 case MEMARB_MODE:
8735 /* We can't enable/disable these bits of the
8736 * 5705/5750, just say success.
8737 */
8738 return 0;
8739
8740 default:
8741 break;
8742 }
8743 }
8744
8745 val = tr32(ofs);
8746 val &= ~enable_bit;
8747 tw32_f(ofs, val);
8748
8749 for (i = 0; i < MAX_WAIT_CNT; i++) {
8750 if (pci_channel_offline(tp->pdev)) {
8751 dev_err(&tp->pdev->dev,
8752 "tg3_stop_block device offline, "
8753 "ofs=%lx enable_bit=%x\n",
8754 ofs, enable_bit);
8755 return -ENODEV;
8756 }
8757
8758 udelay(100);
8759 val = tr32(ofs);
8760 if ((val & enable_bit) == 0)
8761 break;
8762 }
8763
8764 if (i == MAX_WAIT_CNT && !silent) {
8765 dev_err(&tp->pdev->dev,
8766 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8767 ofs, enable_bit);
8768 return -ENODEV;
8769 }
8770
8771 return 0;
8772 }
8773
8774 /* tp->lock is held. */
8775 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8776 {
8777 int i, err;
8778
8779 tg3_disable_ints(tp);
8780
8781 if (pci_channel_offline(tp->pdev)) {
8782 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8783 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8784 err = -ENODEV;
8785 goto err_no_dev;
8786 }
8787
8788 tp->rx_mode &= ~RX_MODE_ENABLE;
8789 tw32_f(MAC_RX_MODE, tp->rx_mode);
8790 udelay(10);
8791
8792 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8793 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8794 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8795 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8796 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8797 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8798
8799 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8800 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8801 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8802 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8803 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8804 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8805 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8806
8807 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8808 tw32_f(MAC_MODE, tp->mac_mode);
8809 udelay(40);
8810
8811 tp->tx_mode &= ~TX_MODE_ENABLE;
8812 tw32_f(MAC_TX_MODE, tp->tx_mode);
8813
8814 for (i = 0; i < MAX_WAIT_CNT; i++) {
8815 udelay(100);
8816 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8817 break;
8818 }
8819 if (i >= MAX_WAIT_CNT) {
8820 dev_err(&tp->pdev->dev,
8821 "%s timed out, TX_MODE_ENABLE will not clear "
8822 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8823 err |= -ENODEV;
8824 }
8825
8826 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8827 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8828 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8829
8830 tw32(FTQ_RESET, 0xffffffff);
8831 tw32(FTQ_RESET, 0x00000000);
8832
8833 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8834 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8835
8836 err_no_dev:
8837 for (i = 0; i < tp->irq_cnt; i++) {
8838 struct tg3_napi *tnapi = &tp->napi[i];
8839 if (tnapi->hw_status)
8840 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8841 }
8842
8843 return err;
8844 }
8845
8846 /* Save PCI command register before chip reset */
8847 static void tg3_save_pci_state(struct tg3 *tp)
8848 {
8849 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8850 }
8851
8852 /* Restore PCI state after chip reset */
8853 static void tg3_restore_pci_state(struct tg3 *tp)
8854 {
8855 u32 val;
8856
8857 /* Re-enable indirect register accesses. */
8858 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8859 tp->misc_host_ctrl);
8860
8861 /* Set MAX PCI retry to zero. */
8862 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8864 tg3_flag(tp, PCIX_MODE))
8865 val |= PCISTATE_RETRY_SAME_DMA;
8866 /* Allow reads and writes to the APE register and memory space. */
8867 if (tg3_flag(tp, ENABLE_APE))
8868 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8869 PCISTATE_ALLOW_APE_SHMEM_WR |
8870 PCISTATE_ALLOW_APE_PSPACE_WR;
8871 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8872
8873 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8874
8875 if (!tg3_flag(tp, PCI_EXPRESS)) {
8876 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8877 tp->pci_cacheline_sz);
8878 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8879 tp->pci_lat_timer);
8880 }
8881
8882 /* Make sure PCI-X relaxed ordering bit is clear. */
8883 if (tg3_flag(tp, PCIX_MODE)) {
8884 u16 pcix_cmd;
8885
8886 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8887 &pcix_cmd);
8888 pcix_cmd &= ~PCI_X_CMD_ERO;
8889 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8890 pcix_cmd);
8891 }
8892
8893 if (tg3_flag(tp, 5780_CLASS)) {
8894
8895 /* Chip reset on 5780 will reset MSI enable bit,
8896 * so need to restore it.
8897 */
8898 if (tg3_flag(tp, USING_MSI)) {
8899 u16 ctrl;
8900
8901 pci_read_config_word(tp->pdev,
8902 tp->msi_cap + PCI_MSI_FLAGS,
8903 &ctrl);
8904 pci_write_config_word(tp->pdev,
8905 tp->msi_cap + PCI_MSI_FLAGS,
8906 ctrl | PCI_MSI_FLAGS_ENABLE);
8907 val = tr32(MSGINT_MODE);
8908 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8909 }
8910 }
8911 }
8912
8913 /* tp->lock is held. */
8914 static int tg3_chip_reset(struct tg3 *tp)
8915 {
8916 u32 val;
8917 void (*write_op)(struct tg3 *, u32, u32);
8918 int i, err;
8919
8920 tg3_nvram_lock(tp);
8921
8922 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8923
8924 /* No matching tg3_nvram_unlock() after this because
8925 * chip reset below will undo the nvram lock.
8926 */
8927 tp->nvram_lock_cnt = 0;
8928
8929 /* GRC_MISC_CFG core clock reset will clear the memory
8930 * enable bit in PCI register 4 and the MSI enable bit
8931 * on some chips, so we save relevant registers here.
8932 */
8933 tg3_save_pci_state(tp);
8934
8935 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8936 tg3_flag(tp, 5755_PLUS))
8937 tw32(GRC_FASTBOOT_PC, 0);
8938
8939 /*
8940 * We must avoid the readl() that normally takes place.
8941 * It locks machines, causes machine checks, and other
8942 * fun things. So, temporarily disable the 5701
8943 * hardware workaround, while we do the reset.
8944 */
8945 write_op = tp->write32;
8946 if (write_op == tg3_write_flush_reg32)
8947 tp->write32 = tg3_write32;
8948
8949 /* Prevent the irq handler from reading or writing PCI registers
8950 * during chip reset when the memory enable bit in the PCI command
8951 * register may be cleared. The chip does not generate interrupt
8952 * at this time, but the irq handler may still be called due to irq
8953 * sharing or irqpoll.
8954 */
8955 tg3_flag_set(tp, CHIP_RESETTING);
8956 for (i = 0; i < tp->irq_cnt; i++) {
8957 struct tg3_napi *tnapi = &tp->napi[i];
8958 if (tnapi->hw_status) {
8959 tnapi->hw_status->status = 0;
8960 tnapi->hw_status->status_tag = 0;
8961 }
8962 tnapi->last_tag = 0;
8963 tnapi->last_irq_tag = 0;
8964 }
8965 smp_mb();
8966
8967 for (i = 0; i < tp->irq_cnt; i++)
8968 synchronize_irq(tp->napi[i].irq_vec);
8969
8970 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8971 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8972 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8973 }
8974
8975 /* do the reset */
8976 val = GRC_MISC_CFG_CORECLK_RESET;
8977
8978 if (tg3_flag(tp, PCI_EXPRESS)) {
8979 /* Force PCIe 1.0a mode */
8980 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8981 !tg3_flag(tp, 57765_PLUS) &&
8982 tr32(TG3_PCIE_PHY_TSTCTL) ==
8983 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8984 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8985
8986 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8987 tw32(GRC_MISC_CFG, (1 << 29));
8988 val |= (1 << 29);
8989 }
8990 }
8991
8992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8993 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8994 tw32(GRC_VCPU_EXT_CTRL,
8995 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8996 }
8997
8998 /* Manage gphy power for all CPMU absent PCIe devices. */
8999 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9000 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9001
9002 tw32(GRC_MISC_CFG, val);
9003
9004 /* restore 5701 hardware bug workaround write method */
9005 tp->write32 = write_op;
9006
9007 /* Unfortunately, we have to delay before the PCI read back.
9008 * Some 575X chips even will not respond to a PCI cfg access
9009 * when the reset command is given to the chip.
9010 *
9011 * How do these hardware designers expect things to work
9012 * properly if the PCI write is posted for a long period
9013 * of time? It is always necessary to have some method by
9014 * which a register read back can occur to push the write
9015 * out which does the reset.
9016 *
9017 * For most tg3 variants the trick below was working.
9018 * Ho hum...
9019 */
9020 udelay(120);
9021
9022 /* Flush PCI posted writes. The normal MMIO registers
9023 * are inaccessible at this time so this is the only
9024 * way to make this reliably (actually, this is no longer
9025 * the case, see above). I tried to use indirect
9026 * register read/write but this upset some 5701 variants.
9027 */
9028 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9029
9030 udelay(120);
9031
9032 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9033 u16 val16;
9034
9035 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9036 int j;
9037 u32 cfg_val;
9038
9039 /* Wait for link training to complete. */
9040 for (j = 0; j < 5000; j++)
9041 udelay(100);
9042
9043 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9044 pci_write_config_dword(tp->pdev, 0xc4,
9045 cfg_val | (1 << 15));
9046 }
9047
9048 /* Clear the "no snoop" and "relaxed ordering" bits. */
9049 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9050 /*
9051 * Older PCIe devices only support the 128 byte
9052 * MPS setting. Enforce the restriction.
9053 */
9054 if (!tg3_flag(tp, CPMU_PRESENT))
9055 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9056 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9057
9058 /* Clear error status */
9059 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9060 PCI_EXP_DEVSTA_CED |
9061 PCI_EXP_DEVSTA_NFED |
9062 PCI_EXP_DEVSTA_FED |
9063 PCI_EXP_DEVSTA_URD);
9064 }
9065
9066 tg3_restore_pci_state(tp);
9067
9068 tg3_flag_clear(tp, CHIP_RESETTING);
9069 tg3_flag_clear(tp, ERROR_PROCESSED);
9070
9071 val = 0;
9072 if (tg3_flag(tp, 5780_CLASS))
9073 val = tr32(MEMARB_MODE);
9074 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9075
9076 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9077 tg3_stop_fw(tp);
9078 tw32(0x5000, 0x400);
9079 }
9080
9081 if (tg3_flag(tp, IS_SSB_CORE)) {
9082 /*
9083 * BCM4785: In order to avoid repercussions from using
9084 * potentially defective internal ROM, stop the Rx RISC CPU,
9085 * which is not required.
9086 */
9087 tg3_stop_fw(tp);
9088 tg3_halt_cpu(tp, RX_CPU_BASE);
9089 }
9090
9091 err = tg3_poll_fw(tp);
9092 if (err)
9093 return err;
9094
9095 tw32(GRC_MODE, tp->grc_mode);
9096
9097 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9098 val = tr32(0xc4);
9099
9100 tw32(0xc4, val | (1 << 15));
9101 }
9102
9103 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9104 tg3_asic_rev(tp) == ASIC_REV_5705) {
9105 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9106 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9107 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9108 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9109 }
9110
9111 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9112 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9113 val = tp->mac_mode;
9114 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9115 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9116 val = tp->mac_mode;
9117 } else
9118 val = 0;
9119
9120 tw32_f(MAC_MODE, val);
9121 udelay(40);
9122
9123 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9124
9125 tg3_mdio_start(tp);
9126
9127 if (tg3_flag(tp, PCI_EXPRESS) &&
9128 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9129 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9130 !tg3_flag(tp, 57765_PLUS)) {
9131 val = tr32(0x7c00);
9132
9133 tw32(0x7c00, val | (1 << 25));
9134 }
9135
9136 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9137 val = tr32(TG3_CPMU_CLCK_ORIDE);
9138 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9139 }
9140
9141 /* Reprobe ASF enable state. */
9142 tg3_flag_clear(tp, ENABLE_ASF);
9143 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9144 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9145
9146 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9147 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9148 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9149 u32 nic_cfg;
9150
9151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9152 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9153 tg3_flag_set(tp, ENABLE_ASF);
9154 tp->last_event_jiffies = jiffies;
9155 if (tg3_flag(tp, 5750_PLUS))
9156 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9157
9158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9159 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9160 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9161 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9162 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9163 }
9164 }
9165
9166 return 0;
9167 }
9168
9169 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9170 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9171
9172 /* tp->lock is held. */
9173 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9174 {
9175 int err;
9176
9177 tg3_stop_fw(tp);
9178
9179 tg3_write_sig_pre_reset(tp, kind);
9180
9181 tg3_abort_hw(tp, silent);
9182 err = tg3_chip_reset(tp);
9183
9184 __tg3_set_mac_addr(tp, false);
9185
9186 tg3_write_sig_legacy(tp, kind);
9187 tg3_write_sig_post_reset(tp, kind);
9188
9189 if (tp->hw_stats) {
9190 /* Save the stats across chip resets... */
9191 tg3_get_nstats(tp, &tp->net_stats_prev);
9192 tg3_get_estats(tp, &tp->estats_prev);
9193
9194 /* And make sure the next sample is new data */
9195 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9196 }
9197
9198 if (err)
9199 return err;
9200
9201 return 0;
9202 }
9203
9204 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9205 {
9206 struct tg3 *tp = netdev_priv(dev);
9207 struct sockaddr *addr = p;
9208 int err = 0;
9209 bool skip_mac_1 = false;
9210
9211 if (!is_valid_ether_addr(addr->sa_data))
9212 return -EADDRNOTAVAIL;
9213
9214 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9215
9216 if (!netif_running(dev))
9217 return 0;
9218
9219 if (tg3_flag(tp, ENABLE_ASF)) {
9220 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9221
9222 addr0_high = tr32(MAC_ADDR_0_HIGH);
9223 addr0_low = tr32(MAC_ADDR_0_LOW);
9224 addr1_high = tr32(MAC_ADDR_1_HIGH);
9225 addr1_low = tr32(MAC_ADDR_1_LOW);
9226
9227 /* Skip MAC addr 1 if ASF is using it. */
9228 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9229 !(addr1_high == 0 && addr1_low == 0))
9230 skip_mac_1 = true;
9231 }
9232 spin_lock_bh(&tp->lock);
9233 __tg3_set_mac_addr(tp, skip_mac_1);
9234 spin_unlock_bh(&tp->lock);
9235
9236 return err;
9237 }
9238
9239 /* tp->lock is held. */
9240 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9241 dma_addr_t mapping, u32 maxlen_flags,
9242 u32 nic_addr)
9243 {
9244 tg3_write_mem(tp,
9245 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9246 ((u64) mapping >> 32));
9247 tg3_write_mem(tp,
9248 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9249 ((u64) mapping & 0xffffffff));
9250 tg3_write_mem(tp,
9251 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9252 maxlen_flags);
9253
9254 if (!tg3_flag(tp, 5705_PLUS))
9255 tg3_write_mem(tp,
9256 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9257 nic_addr);
9258 }
9259
9260
9261 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9262 {
9263 int i = 0;
9264
9265 if (!tg3_flag(tp, ENABLE_TSS)) {
9266 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9267 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9268 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9269 } else {
9270 tw32(HOSTCC_TXCOL_TICKS, 0);
9271 tw32(HOSTCC_TXMAX_FRAMES, 0);
9272 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9273
9274 for (; i < tp->txq_cnt; i++) {
9275 u32 reg;
9276
9277 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9278 tw32(reg, ec->tx_coalesce_usecs);
9279 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9280 tw32(reg, ec->tx_max_coalesced_frames);
9281 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9282 tw32(reg, ec->tx_max_coalesced_frames_irq);
9283 }
9284 }
9285
9286 for (; i < tp->irq_max - 1; i++) {
9287 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9288 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9289 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9290 }
9291 }
9292
9293 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9294 {
9295 int i = 0;
9296 u32 limit = tp->rxq_cnt;
9297
9298 if (!tg3_flag(tp, ENABLE_RSS)) {
9299 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9300 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9301 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9302 limit--;
9303 } else {
9304 tw32(HOSTCC_RXCOL_TICKS, 0);
9305 tw32(HOSTCC_RXMAX_FRAMES, 0);
9306 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9307 }
9308
9309 for (; i < limit; i++) {
9310 u32 reg;
9311
9312 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9313 tw32(reg, ec->rx_coalesce_usecs);
9314 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9315 tw32(reg, ec->rx_max_coalesced_frames);
9316 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9317 tw32(reg, ec->rx_max_coalesced_frames_irq);
9318 }
9319
9320 for (; i < tp->irq_max - 1; i++) {
9321 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9322 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9323 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9324 }
9325 }
9326
9327 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9328 {
9329 tg3_coal_tx_init(tp, ec);
9330 tg3_coal_rx_init(tp, ec);
9331
9332 if (!tg3_flag(tp, 5705_PLUS)) {
9333 u32 val = ec->stats_block_coalesce_usecs;
9334
9335 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9336 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9337
9338 if (!tp->link_up)
9339 val = 0;
9340
9341 tw32(HOSTCC_STAT_COAL_TICKS, val);
9342 }
9343 }
9344
9345 /* tp->lock is held. */
9346 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9347 {
9348 u32 txrcb, limit;
9349
9350 /* Disable all transmit rings but the first. */
9351 if (!tg3_flag(tp, 5705_PLUS))
9352 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9353 else if (tg3_flag(tp, 5717_PLUS))
9354 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9355 else if (tg3_flag(tp, 57765_CLASS) ||
9356 tg3_asic_rev(tp) == ASIC_REV_5762)
9357 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9358 else
9359 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9360
9361 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9362 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9363 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9364 BDINFO_FLAGS_DISABLED);
9365 }
9366
9367 /* tp->lock is held. */
9368 static void tg3_tx_rcbs_init(struct tg3 *tp)
9369 {
9370 int i = 0;
9371 u32 txrcb = NIC_SRAM_SEND_RCB;
9372
9373 if (tg3_flag(tp, ENABLE_TSS))
9374 i++;
9375
9376 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9377 struct tg3_napi *tnapi = &tp->napi[i];
9378
9379 if (!tnapi->tx_ring)
9380 continue;
9381
9382 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9383 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9384 NIC_SRAM_TX_BUFFER_DESC);
9385 }
9386 }
9387
9388 /* tp->lock is held. */
9389 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9390 {
9391 u32 rxrcb, limit;
9392
9393 /* Disable all receive return rings but the first. */
9394 if (tg3_flag(tp, 5717_PLUS))
9395 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9396 else if (!tg3_flag(tp, 5705_PLUS))
9397 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9398 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9399 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9400 tg3_flag(tp, 57765_CLASS))
9401 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9402 else
9403 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9404
9405 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9406 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9407 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9408 BDINFO_FLAGS_DISABLED);
9409 }
9410
9411 /* tp->lock is held. */
9412 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9413 {
9414 int i = 0;
9415 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9416
9417 if (tg3_flag(tp, ENABLE_RSS))
9418 i++;
9419
9420 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9421 struct tg3_napi *tnapi = &tp->napi[i];
9422
9423 if (!tnapi->rx_rcb)
9424 continue;
9425
9426 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9427 (tp->rx_ret_ring_mask + 1) <<
9428 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9429 }
9430 }
9431
9432 /* tp->lock is held. */
9433 static void tg3_rings_reset(struct tg3 *tp)
9434 {
9435 int i;
9436 u32 stblk;
9437 struct tg3_napi *tnapi = &tp->napi[0];
9438
9439 tg3_tx_rcbs_disable(tp);
9440
9441 tg3_rx_ret_rcbs_disable(tp);
9442
9443 /* Disable interrupts */
9444 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9445 tp->napi[0].chk_msi_cnt = 0;
9446 tp->napi[0].last_rx_cons = 0;
9447 tp->napi[0].last_tx_cons = 0;
9448
9449 /* Zero mailbox registers. */
9450 if (tg3_flag(tp, SUPPORT_MSIX)) {
9451 for (i = 1; i < tp->irq_max; i++) {
9452 tp->napi[i].tx_prod = 0;
9453 tp->napi[i].tx_cons = 0;
9454 if (tg3_flag(tp, ENABLE_TSS))
9455 tw32_mailbox(tp->napi[i].prodmbox, 0);
9456 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9457 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9458 tp->napi[i].chk_msi_cnt = 0;
9459 tp->napi[i].last_rx_cons = 0;
9460 tp->napi[i].last_tx_cons = 0;
9461 }
9462 if (!tg3_flag(tp, ENABLE_TSS))
9463 tw32_mailbox(tp->napi[0].prodmbox, 0);
9464 } else {
9465 tp->napi[0].tx_prod = 0;
9466 tp->napi[0].tx_cons = 0;
9467 tw32_mailbox(tp->napi[0].prodmbox, 0);
9468 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9469 }
9470
9471 /* Make sure the NIC-based send BD rings are disabled. */
9472 if (!tg3_flag(tp, 5705_PLUS)) {
9473 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9474 for (i = 0; i < 16; i++)
9475 tw32_tx_mbox(mbox + i * 8, 0);
9476 }
9477
9478 /* Clear status block in ram. */
9479 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9480
9481 /* Set status block DMA address */
9482 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9483 ((u64) tnapi->status_mapping >> 32));
9484 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9485 ((u64) tnapi->status_mapping & 0xffffffff));
9486
9487 stblk = HOSTCC_STATBLCK_RING1;
9488
9489 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9490 u64 mapping = (u64)tnapi->status_mapping;
9491 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9492 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9493 stblk += 8;
9494
9495 /* Clear status block in ram. */
9496 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9497 }
9498
9499 tg3_tx_rcbs_init(tp);
9500 tg3_rx_ret_rcbs_init(tp);
9501 }
9502
9503 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9504 {
9505 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9506
9507 if (!tg3_flag(tp, 5750_PLUS) ||
9508 tg3_flag(tp, 5780_CLASS) ||
9509 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9510 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9511 tg3_flag(tp, 57765_PLUS))
9512 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9513 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9514 tg3_asic_rev(tp) == ASIC_REV_5787)
9515 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9516 else
9517 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9518
9519 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9520 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9521
9522 val = min(nic_rep_thresh, host_rep_thresh);
9523 tw32(RCVBDI_STD_THRESH, val);
9524
9525 if (tg3_flag(tp, 57765_PLUS))
9526 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9527
9528 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9529 return;
9530
9531 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9532
9533 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9534
9535 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9536 tw32(RCVBDI_JUMBO_THRESH, val);
9537
9538 if (tg3_flag(tp, 57765_PLUS))
9539 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9540 }
9541
9542 static inline u32 calc_crc(unsigned char *buf, int len)
9543 {
9544 u32 reg;
9545 u32 tmp;
9546 int j, k;
9547
9548 reg = 0xffffffff;
9549
9550 for (j = 0; j < len; j++) {
9551 reg ^= buf[j];
9552
9553 for (k = 0; k < 8; k++) {
9554 tmp = reg & 0x01;
9555
9556 reg >>= 1;
9557
9558 if (tmp)
9559 reg ^= 0xedb88320;
9560 }
9561 }
9562
9563 return ~reg;
9564 }
9565
9566 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9567 {
9568 /* accept or reject all multicast frames */
9569 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9570 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9571 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9572 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9573 }
9574
9575 static void __tg3_set_rx_mode(struct net_device *dev)
9576 {
9577 struct tg3 *tp = netdev_priv(dev);
9578 u32 rx_mode;
9579
9580 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9581 RX_MODE_KEEP_VLAN_TAG);
9582
9583 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9584 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9585 * flag clear.
9586 */
9587 if (!tg3_flag(tp, ENABLE_ASF))
9588 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9589 #endif
9590
9591 if (dev->flags & IFF_PROMISC) {
9592 /* Promiscuous mode. */
9593 rx_mode |= RX_MODE_PROMISC;
9594 } else if (dev->flags & IFF_ALLMULTI) {
9595 /* Accept all multicast. */
9596 tg3_set_multi(tp, 1);
9597 } else if (netdev_mc_empty(dev)) {
9598 /* Reject all multicast. */
9599 tg3_set_multi(tp, 0);
9600 } else {
9601 /* Accept one or more multicast(s). */
9602 struct netdev_hw_addr *ha;
9603 u32 mc_filter[4] = { 0, };
9604 u32 regidx;
9605 u32 bit;
9606 u32 crc;
9607
9608 netdev_for_each_mc_addr(ha, dev) {
9609 crc = calc_crc(ha->addr, ETH_ALEN);
9610 bit = ~crc & 0x7f;
9611 regidx = (bit & 0x60) >> 5;
9612 bit &= 0x1f;
9613 mc_filter[regidx] |= (1 << bit);
9614 }
9615
9616 tw32(MAC_HASH_REG_0, mc_filter[0]);
9617 tw32(MAC_HASH_REG_1, mc_filter[1]);
9618 tw32(MAC_HASH_REG_2, mc_filter[2]);
9619 tw32(MAC_HASH_REG_3, mc_filter[3]);
9620 }
9621
9622 if (rx_mode != tp->rx_mode) {
9623 tp->rx_mode = rx_mode;
9624 tw32_f(MAC_RX_MODE, rx_mode);
9625 udelay(10);
9626 }
9627 }
9628
9629 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9630 {
9631 int i;
9632
9633 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9634 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9635 }
9636
9637 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9638 {
9639 int i;
9640
9641 if (!tg3_flag(tp, SUPPORT_MSIX))
9642 return;
9643
9644 if (tp->rxq_cnt == 1) {
9645 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9646 return;
9647 }
9648
9649 /* Validate table against current IRQ count */
9650 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9651 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9652 break;
9653 }
9654
9655 if (i != TG3_RSS_INDIR_TBL_SIZE)
9656 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9657 }
9658
9659 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9660 {
9661 int i = 0;
9662 u32 reg = MAC_RSS_INDIR_TBL_0;
9663
9664 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9665 u32 val = tp->rss_ind_tbl[i];
9666 i++;
9667 for (; i % 8; i++) {
9668 val <<= 4;
9669 val |= tp->rss_ind_tbl[i];
9670 }
9671 tw32(reg, val);
9672 reg += 4;
9673 }
9674 }
9675
9676 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9677 {
9678 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9679 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9680 else
9681 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9682 }
9683
9684 /* tp->lock is held. */
9685 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9686 {
9687 u32 val, rdmac_mode;
9688 int i, err, limit;
9689 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9690
9691 tg3_disable_ints(tp);
9692
9693 tg3_stop_fw(tp);
9694
9695 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9696
9697 if (tg3_flag(tp, INIT_COMPLETE))
9698 tg3_abort_hw(tp, 1);
9699
9700 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9701 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9702 tg3_phy_pull_config(tp);
9703 tg3_eee_pull_config(tp, NULL);
9704 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9705 }
9706
9707 /* Enable MAC control of LPI */
9708 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9709 tg3_setup_eee(tp);
9710
9711 if (reset_phy)
9712 tg3_phy_reset(tp);
9713
9714 err = tg3_chip_reset(tp);
9715 if (err)
9716 return err;
9717
9718 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9719
9720 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9721 val = tr32(TG3_CPMU_CTRL);
9722 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9723 tw32(TG3_CPMU_CTRL, val);
9724
9725 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9726 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9727 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9728 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9729
9730 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9731 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9732 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9733 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9734
9735 val = tr32(TG3_CPMU_HST_ACC);
9736 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9737 val |= CPMU_HST_ACC_MACCLK_6_25;
9738 tw32(TG3_CPMU_HST_ACC, val);
9739 }
9740
9741 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9742 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9743 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9744 PCIE_PWR_MGMT_L1_THRESH_4MS;
9745 tw32(PCIE_PWR_MGMT_THRESH, val);
9746
9747 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9748 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9749
9750 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9751
9752 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9753 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9754 }
9755
9756 if (tg3_flag(tp, L1PLLPD_EN)) {
9757 u32 grc_mode = tr32(GRC_MODE);
9758
9759 /* Access the lower 1K of PL PCIE block registers. */
9760 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9761 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9762
9763 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9764 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9765 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9766
9767 tw32(GRC_MODE, grc_mode);
9768 }
9769
9770 if (tg3_flag(tp, 57765_CLASS)) {
9771 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9772 u32 grc_mode = tr32(GRC_MODE);
9773
9774 /* Access the lower 1K of PL PCIE block registers. */
9775 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9776 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9777
9778 val = tr32(TG3_PCIE_TLDLPL_PORT +
9779 TG3_PCIE_PL_LO_PHYCTL5);
9780 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9781 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9782
9783 tw32(GRC_MODE, grc_mode);
9784 }
9785
9786 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9787 u32 grc_mode;
9788
9789 /* Fix transmit hangs */
9790 val = tr32(TG3_CPMU_PADRNG_CTL);
9791 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9792 tw32(TG3_CPMU_PADRNG_CTL, val);
9793
9794 grc_mode = tr32(GRC_MODE);
9795
9796 /* Access the lower 1K of DL PCIE block registers. */
9797 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9798 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9799
9800 val = tr32(TG3_PCIE_TLDLPL_PORT +
9801 TG3_PCIE_DL_LO_FTSMAX);
9802 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9803 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9804 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9805
9806 tw32(GRC_MODE, grc_mode);
9807 }
9808
9809 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9810 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9811 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9812 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9813 }
9814
9815 /* This works around an issue with Athlon chipsets on
9816 * B3 tigon3 silicon. This bit has no effect on any
9817 * other revision. But do not set this on PCI Express
9818 * chips and don't even touch the clocks if the CPMU is present.
9819 */
9820 if (!tg3_flag(tp, CPMU_PRESENT)) {
9821 if (!tg3_flag(tp, PCI_EXPRESS))
9822 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9823 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9824 }
9825
9826 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9827 tg3_flag(tp, PCIX_MODE)) {
9828 val = tr32(TG3PCI_PCISTATE);
9829 val |= PCISTATE_RETRY_SAME_DMA;
9830 tw32(TG3PCI_PCISTATE, val);
9831 }
9832
9833 if (tg3_flag(tp, ENABLE_APE)) {
9834 /* Allow reads and writes to the
9835 * APE register and memory space.
9836 */
9837 val = tr32(TG3PCI_PCISTATE);
9838 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9839 PCISTATE_ALLOW_APE_SHMEM_WR |
9840 PCISTATE_ALLOW_APE_PSPACE_WR;
9841 tw32(TG3PCI_PCISTATE, val);
9842 }
9843
9844 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9845 /* Enable some hw fixes. */
9846 val = tr32(TG3PCI_MSI_DATA);
9847 val |= (1 << 26) | (1 << 28) | (1 << 29);
9848 tw32(TG3PCI_MSI_DATA, val);
9849 }
9850
9851 /* Descriptor ring init may make accesses to the
9852 * NIC SRAM area to setup the TX descriptors, so we
9853 * can only do this after the hardware has been
9854 * successfully reset.
9855 */
9856 err = tg3_init_rings(tp);
9857 if (err)
9858 return err;
9859
9860 if (tg3_flag(tp, 57765_PLUS)) {
9861 val = tr32(TG3PCI_DMA_RW_CTRL) &
9862 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9864 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9865 if (!tg3_flag(tp, 57765_CLASS) &&
9866 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9867 tg3_asic_rev(tp) != ASIC_REV_5762)
9868 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9869 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9870 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9871 tg3_asic_rev(tp) != ASIC_REV_5761) {
9872 /* This value is determined during the probe time DMA
9873 * engine test, tg3_test_dma.
9874 */
9875 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9876 }
9877
9878 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9879 GRC_MODE_4X_NIC_SEND_RINGS |
9880 GRC_MODE_NO_TX_PHDR_CSUM |
9881 GRC_MODE_NO_RX_PHDR_CSUM);
9882 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9883
9884 /* Pseudo-header checksum is done by hardware logic and not
9885 * the offload processers, so make the chip do the pseudo-
9886 * header checksums on receive. For transmit it is more
9887 * convenient to do the pseudo-header checksum in software
9888 * as Linux does that on transmit for us in all cases.
9889 */
9890 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9891
9892 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9893 if (tp->rxptpctl)
9894 tw32(TG3_RX_PTP_CTL,
9895 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9896
9897 if (tg3_flag(tp, PTP_CAPABLE))
9898 val |= GRC_MODE_TIME_SYNC_ENABLE;
9899
9900 tw32(GRC_MODE, tp->grc_mode | val);
9901
9902 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9903 val = tr32(GRC_MISC_CFG);
9904 val &= ~0xff;
9905 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9906 tw32(GRC_MISC_CFG, val);
9907
9908 /* Initialize MBUF/DESC pool. */
9909 if (tg3_flag(tp, 5750_PLUS)) {
9910 /* Do nothing. */
9911 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9912 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9913 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9914 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9915 else
9916 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9917 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9918 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9919 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9920 int fw_len;
9921
9922 fw_len = tp->fw_len;
9923 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9924 tw32(BUFMGR_MB_POOL_ADDR,
9925 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9926 tw32(BUFMGR_MB_POOL_SIZE,
9927 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9928 }
9929
9930 if (tp->dev->mtu <= ETH_DATA_LEN) {
9931 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9932 tp->bufmgr_config.mbuf_read_dma_low_water);
9933 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9934 tp->bufmgr_config.mbuf_mac_rx_low_water);
9935 tw32(BUFMGR_MB_HIGH_WATER,
9936 tp->bufmgr_config.mbuf_high_water);
9937 } else {
9938 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9939 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9940 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9941 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9942 tw32(BUFMGR_MB_HIGH_WATER,
9943 tp->bufmgr_config.mbuf_high_water_jumbo);
9944 }
9945 tw32(BUFMGR_DMA_LOW_WATER,
9946 tp->bufmgr_config.dma_low_water);
9947 tw32(BUFMGR_DMA_HIGH_WATER,
9948 tp->bufmgr_config.dma_high_water);
9949
9950 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9951 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9952 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9953 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9954 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9955 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9956 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9957 tw32(BUFMGR_MODE, val);
9958 for (i = 0; i < 2000; i++) {
9959 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9960 break;
9961 udelay(10);
9962 }
9963 if (i >= 2000) {
9964 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9965 return -ENODEV;
9966 }
9967
9968 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9969 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9970
9971 tg3_setup_rxbd_thresholds(tp);
9972
9973 /* Initialize TG3_BDINFO's at:
9974 * RCVDBDI_STD_BD: standard eth size rx ring
9975 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9976 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9977 *
9978 * like so:
9979 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9980 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9981 * ring attribute flags
9982 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9983 *
9984 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9985 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9986 *
9987 * The size of each ring is fixed in the firmware, but the location is
9988 * configurable.
9989 */
9990 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9991 ((u64) tpr->rx_std_mapping >> 32));
9992 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9993 ((u64) tpr->rx_std_mapping & 0xffffffff));
9994 if (!tg3_flag(tp, 5717_PLUS))
9995 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9996 NIC_SRAM_RX_BUFFER_DESC);
9997
9998 /* Disable the mini ring */
9999 if (!tg3_flag(tp, 5705_PLUS))
10000 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10001 BDINFO_FLAGS_DISABLED);
10002
10003 /* Program the jumbo buffer descriptor ring control
10004 * blocks on those devices that have them.
10005 */
10006 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10007 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10008
10009 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10010 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10011 ((u64) tpr->rx_jmb_mapping >> 32));
10012 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10013 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10014 val = TG3_RX_JMB_RING_SIZE(tp) <<
10015 BDINFO_FLAGS_MAXLEN_SHIFT;
10016 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10017 val | BDINFO_FLAGS_USE_EXT_RECV);
10018 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10019 tg3_flag(tp, 57765_CLASS) ||
10020 tg3_asic_rev(tp) == ASIC_REV_5762)
10021 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10022 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10023 } else {
10024 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10025 BDINFO_FLAGS_DISABLED);
10026 }
10027
10028 if (tg3_flag(tp, 57765_PLUS)) {
10029 val = TG3_RX_STD_RING_SIZE(tp);
10030 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10031 val |= (TG3_RX_STD_DMA_SZ << 2);
10032 } else
10033 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10034 } else
10035 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10036
10037 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10038
10039 tpr->rx_std_prod_idx = tp->rx_pending;
10040 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10041
10042 tpr->rx_jmb_prod_idx =
10043 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10044 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10045
10046 tg3_rings_reset(tp);
10047
10048 /* Initialize MAC address and backoff seed. */
10049 __tg3_set_mac_addr(tp, false);
10050
10051 /* MTU + ethernet header + FCS + optional VLAN tag */
10052 tw32(MAC_RX_MTU_SIZE,
10053 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10054
10055 /* The slot time is changed by tg3_setup_phy if we
10056 * run at gigabit with half duplex.
10057 */
10058 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10059 (6 << TX_LENGTHS_IPG_SHIFT) |
10060 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10061
10062 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10063 tg3_asic_rev(tp) == ASIC_REV_5762)
10064 val |= tr32(MAC_TX_LENGTHS) &
10065 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10066 TX_LENGTHS_CNT_DWN_VAL_MSK);
10067
10068 tw32(MAC_TX_LENGTHS, val);
10069
10070 /* Receive rules. */
10071 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10072 tw32(RCVLPC_CONFIG, 0x0181);
10073
10074 /* Calculate RDMAC_MODE setting early, we need it to determine
10075 * the RCVLPC_STATE_ENABLE mask.
10076 */
10077 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10078 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10079 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10080 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10081 RDMAC_MODE_LNGREAD_ENAB);
10082
10083 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10084 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10085
10086 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10087 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10088 tg3_asic_rev(tp) == ASIC_REV_57780)
10089 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10090 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10091 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10092
10093 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10094 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10095 if (tg3_flag(tp, TSO_CAPABLE) &&
10096 tg3_asic_rev(tp) == ASIC_REV_5705) {
10097 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10098 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10099 !tg3_flag(tp, IS_5788)) {
10100 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10101 }
10102 }
10103
10104 if (tg3_flag(tp, PCI_EXPRESS))
10105 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10106
10107 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10108 tp->dma_limit = 0;
10109 if (tp->dev->mtu <= ETH_DATA_LEN) {
10110 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10111 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10112 }
10113 }
10114
10115 if (tg3_flag(tp, HW_TSO_1) ||
10116 tg3_flag(tp, HW_TSO_2) ||
10117 tg3_flag(tp, HW_TSO_3))
10118 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10119
10120 if (tg3_flag(tp, 57765_PLUS) ||
10121 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10122 tg3_asic_rev(tp) == ASIC_REV_57780)
10123 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10124
10125 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10126 tg3_asic_rev(tp) == ASIC_REV_5762)
10127 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10128
10129 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10130 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10131 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10132 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10133 tg3_flag(tp, 57765_PLUS)) {
10134 u32 tgtreg;
10135
10136 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10137 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10138 else
10139 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10140
10141 val = tr32(tgtreg);
10142 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10143 tg3_asic_rev(tp) == ASIC_REV_5762) {
10144 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10145 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10146 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10147 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10148 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10149 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10150 }
10151 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10152 }
10153
10154 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10155 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10156 tg3_asic_rev(tp) == ASIC_REV_5762) {
10157 u32 tgtreg;
10158
10159 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10160 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10161 else
10162 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10163
10164 val = tr32(tgtreg);
10165 tw32(tgtreg, val |
10166 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10167 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10168 }
10169
10170 /* Receive/send statistics. */
10171 if (tg3_flag(tp, 5750_PLUS)) {
10172 val = tr32(RCVLPC_STATS_ENABLE);
10173 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10174 tw32(RCVLPC_STATS_ENABLE, val);
10175 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10176 tg3_flag(tp, TSO_CAPABLE)) {
10177 val = tr32(RCVLPC_STATS_ENABLE);
10178 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10179 tw32(RCVLPC_STATS_ENABLE, val);
10180 } else {
10181 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10182 }
10183 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10184 tw32(SNDDATAI_STATSENAB, 0xffffff);
10185 tw32(SNDDATAI_STATSCTRL,
10186 (SNDDATAI_SCTRL_ENABLE |
10187 SNDDATAI_SCTRL_FASTUPD));
10188
10189 /* Setup host coalescing engine. */
10190 tw32(HOSTCC_MODE, 0);
10191 for (i = 0; i < 2000; i++) {
10192 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10193 break;
10194 udelay(10);
10195 }
10196
10197 __tg3_set_coalesce(tp, &tp->coal);
10198
10199 if (!tg3_flag(tp, 5705_PLUS)) {
10200 /* Status/statistics block address. See tg3_timer,
10201 * the tg3_periodic_fetch_stats call there, and
10202 * tg3_get_stats to see how this works for 5705/5750 chips.
10203 */
10204 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10205 ((u64) tp->stats_mapping >> 32));
10206 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10207 ((u64) tp->stats_mapping & 0xffffffff));
10208 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10209
10210 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10211
10212 /* Clear statistics and status block memory areas */
10213 for (i = NIC_SRAM_STATS_BLK;
10214 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10215 i += sizeof(u32)) {
10216 tg3_write_mem(tp, i, 0);
10217 udelay(40);
10218 }
10219 }
10220
10221 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10222
10223 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10224 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10225 if (!tg3_flag(tp, 5705_PLUS))
10226 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10227
10228 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10229 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10230 /* reset to prevent losing 1st rx packet intermittently */
10231 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10232 udelay(10);
10233 }
10234
10235 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10236 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10237 MAC_MODE_FHDE_ENABLE;
10238 if (tg3_flag(tp, ENABLE_APE))
10239 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10240 if (!tg3_flag(tp, 5705_PLUS) &&
10241 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10242 tg3_asic_rev(tp) != ASIC_REV_5700)
10243 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10244 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10245 udelay(40);
10246
10247 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10248 * If TG3_FLAG_IS_NIC is zero, we should read the
10249 * register to preserve the GPIO settings for LOMs. The GPIOs,
10250 * whether used as inputs or outputs, are set by boot code after
10251 * reset.
10252 */
10253 if (!tg3_flag(tp, IS_NIC)) {
10254 u32 gpio_mask;
10255
10256 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10257 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10258 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10259
10260 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10261 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10262 GRC_LCLCTRL_GPIO_OUTPUT3;
10263
10264 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10265 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10266
10267 tp->grc_local_ctrl &= ~gpio_mask;
10268 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10269
10270 /* GPIO1 must be driven high for eeprom write protect */
10271 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10272 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10273 GRC_LCLCTRL_GPIO_OUTPUT1);
10274 }
10275 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10276 udelay(100);
10277
10278 if (tg3_flag(tp, USING_MSIX)) {
10279 val = tr32(MSGINT_MODE);
10280 val |= MSGINT_MODE_ENABLE;
10281 if (tp->irq_cnt > 1)
10282 val |= MSGINT_MODE_MULTIVEC_EN;
10283 if (!tg3_flag(tp, 1SHOT_MSI))
10284 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10285 tw32(MSGINT_MODE, val);
10286 }
10287
10288 if (!tg3_flag(tp, 5705_PLUS)) {
10289 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10290 udelay(40);
10291 }
10292
10293 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10294 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10295 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10296 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10297 WDMAC_MODE_LNGREAD_ENAB);
10298
10299 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10300 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10301 if (tg3_flag(tp, TSO_CAPABLE) &&
10302 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10303 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10304 /* nothing */
10305 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10306 !tg3_flag(tp, IS_5788)) {
10307 val |= WDMAC_MODE_RX_ACCEL;
10308 }
10309 }
10310
10311 /* Enable host coalescing bug fix */
10312 if (tg3_flag(tp, 5755_PLUS))
10313 val |= WDMAC_MODE_STATUS_TAG_FIX;
10314
10315 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10316 val |= WDMAC_MODE_BURST_ALL_DATA;
10317
10318 tw32_f(WDMAC_MODE, val);
10319 udelay(40);
10320
10321 if (tg3_flag(tp, PCIX_MODE)) {
10322 u16 pcix_cmd;
10323
10324 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10325 &pcix_cmd);
10326 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10327 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10328 pcix_cmd |= PCI_X_CMD_READ_2K;
10329 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10330 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10331 pcix_cmd |= PCI_X_CMD_READ_2K;
10332 }
10333 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10334 pcix_cmd);
10335 }
10336
10337 tw32_f(RDMAC_MODE, rdmac_mode);
10338 udelay(40);
10339
10340 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10341 tg3_asic_rev(tp) == ASIC_REV_5720) {
10342 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10343 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10344 break;
10345 }
10346 if (i < TG3_NUM_RDMA_CHANNELS) {
10347 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10348 val |= tg3_lso_rd_dma_workaround_bit(tp);
10349 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10350 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10351 }
10352 }
10353
10354 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10355 if (!tg3_flag(tp, 5705_PLUS))
10356 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10357
10358 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10359 tw32(SNDDATAC_MODE,
10360 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10361 else
10362 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10363
10364 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10365 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10366 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10367 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10368 val |= RCVDBDI_MODE_LRG_RING_SZ;
10369 tw32(RCVDBDI_MODE, val);
10370 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10371 if (tg3_flag(tp, HW_TSO_1) ||
10372 tg3_flag(tp, HW_TSO_2) ||
10373 tg3_flag(tp, HW_TSO_3))
10374 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10375 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10376 if (tg3_flag(tp, ENABLE_TSS))
10377 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10378 tw32(SNDBDI_MODE, val);
10379 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10380
10381 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10382 err = tg3_load_5701_a0_firmware_fix(tp);
10383 if (err)
10384 return err;
10385 }
10386
10387 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10388 /* Ignore any errors for the firmware download. If download
10389 * fails, the device will operate with EEE disabled
10390 */
10391 tg3_load_57766_firmware(tp);
10392 }
10393
10394 if (tg3_flag(tp, TSO_CAPABLE)) {
10395 err = tg3_load_tso_firmware(tp);
10396 if (err)
10397 return err;
10398 }
10399
10400 tp->tx_mode = TX_MODE_ENABLE;
10401
10402 if (tg3_flag(tp, 5755_PLUS) ||
10403 tg3_asic_rev(tp) == ASIC_REV_5906)
10404 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10405
10406 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10407 tg3_asic_rev(tp) == ASIC_REV_5762) {
10408 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10409 tp->tx_mode &= ~val;
10410 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10411 }
10412
10413 tw32_f(MAC_TX_MODE, tp->tx_mode);
10414 udelay(100);
10415
10416 if (tg3_flag(tp, ENABLE_RSS)) {
10417 tg3_rss_write_indir_tbl(tp);
10418
10419 /* Setup the "secret" hash key. */
10420 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10421 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10422 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10423 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10424 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10425 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10426 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10427 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10428 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10429 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10430 }
10431
10432 tp->rx_mode = RX_MODE_ENABLE;
10433 if (tg3_flag(tp, 5755_PLUS))
10434 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10435
10436 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10437 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10438
10439 if (tg3_flag(tp, ENABLE_RSS))
10440 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10441 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10442 RX_MODE_RSS_IPV6_HASH_EN |
10443 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10444 RX_MODE_RSS_IPV4_HASH_EN |
10445 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10446
10447 tw32_f(MAC_RX_MODE, tp->rx_mode);
10448 udelay(10);
10449
10450 tw32(MAC_LED_CTRL, tp->led_ctrl);
10451
10452 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10453 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10454 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10455 udelay(10);
10456 }
10457 tw32_f(MAC_RX_MODE, tp->rx_mode);
10458 udelay(10);
10459
10460 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10461 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10462 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10463 /* Set drive transmission level to 1.2V */
10464 /* only if the signal pre-emphasis bit is not set */
10465 val = tr32(MAC_SERDES_CFG);
10466 val &= 0xfffff000;
10467 val |= 0x880;
10468 tw32(MAC_SERDES_CFG, val);
10469 }
10470 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10471 tw32(MAC_SERDES_CFG, 0x616000);
10472 }
10473
10474 /* Prevent chip from dropping frames when flow control
10475 * is enabled.
10476 */
10477 if (tg3_flag(tp, 57765_CLASS))
10478 val = 1;
10479 else
10480 val = 2;
10481 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10482
10483 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10484 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10485 /* Use hardware link auto-negotiation */
10486 tg3_flag_set(tp, HW_AUTONEG);
10487 }
10488
10489 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10490 tg3_asic_rev(tp) == ASIC_REV_5714) {
10491 u32 tmp;
10492
10493 tmp = tr32(SERDES_RX_CTRL);
10494 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10495 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10496 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10497 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10498 }
10499
10500 if (!tg3_flag(tp, USE_PHYLIB)) {
10501 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10502 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10503
10504 err = tg3_setup_phy(tp, false);
10505 if (err)
10506 return err;
10507
10508 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10509 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10510 u32 tmp;
10511
10512 /* Clear CRC stats. */
10513 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10514 tg3_writephy(tp, MII_TG3_TEST1,
10515 tmp | MII_TG3_TEST1_CRC_EN);
10516 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10517 }
10518 }
10519 }
10520
10521 __tg3_set_rx_mode(tp->dev);
10522
10523 /* Initialize receive rules. */
10524 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10525 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10526 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10527 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10528
10529 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10530 limit = 8;
10531 else
10532 limit = 16;
10533 if (tg3_flag(tp, ENABLE_ASF))
10534 limit -= 4;
10535 switch (limit) {
10536 case 16:
10537 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10538 case 15:
10539 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10540 case 14:
10541 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10542 case 13:
10543 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10544 case 12:
10545 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10546 case 11:
10547 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10548 case 10:
10549 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10550 case 9:
10551 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10552 case 8:
10553 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10554 case 7:
10555 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10556 case 6:
10557 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10558 case 5:
10559 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10560 case 4:
10561 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10562 case 3:
10563 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10564 case 2:
10565 case 1:
10566
10567 default:
10568 break;
10569 }
10570
10571 if (tg3_flag(tp, ENABLE_APE))
10572 /* Write our heartbeat update interval to APE. */
10573 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10574 APE_HOST_HEARTBEAT_INT_DISABLE);
10575
10576 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10577
10578 return 0;
10579 }
10580
10581 /* Called at device open time to get the chip ready for
10582 * packet processing. Invoked with tp->lock held.
10583 */
10584 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10585 {
10586 /* Chip may have been just powered on. If so, the boot code may still
10587 * be running initialization. Wait for it to finish to avoid races in
10588 * accessing the hardware.
10589 */
10590 tg3_enable_register_access(tp);
10591 tg3_poll_fw(tp);
10592
10593 tg3_switch_clocks(tp);
10594
10595 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10596
10597 return tg3_reset_hw(tp, reset_phy);
10598 }
10599
10600 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10601 {
10602 int i;
10603
10604 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10605 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10606
10607 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10608 off += len;
10609
10610 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10611 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10612 memset(ocir, 0, TG3_OCIR_LEN);
10613 }
10614 }
10615
10616 /* sysfs attributes for hwmon */
10617 static ssize_t tg3_show_temp(struct device *dev,
10618 struct device_attribute *devattr, char *buf)
10619 {
10620 struct pci_dev *pdev = to_pci_dev(dev);
10621 struct net_device *netdev = pci_get_drvdata(pdev);
10622 struct tg3 *tp = netdev_priv(netdev);
10623 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10624 u32 temperature;
10625
10626 spin_lock_bh(&tp->lock);
10627 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10628 sizeof(temperature));
10629 spin_unlock_bh(&tp->lock);
10630 return sprintf(buf, "%u\n", temperature);
10631 }
10632
10633
10634 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10635 TG3_TEMP_SENSOR_OFFSET);
10636 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10637 TG3_TEMP_CAUTION_OFFSET);
10638 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10639 TG3_TEMP_MAX_OFFSET);
10640
10641 static struct attribute *tg3_attributes[] = {
10642 &sensor_dev_attr_temp1_input.dev_attr.attr,
10643 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10644 &sensor_dev_attr_temp1_max.dev_attr.attr,
10645 NULL
10646 };
10647
10648 static const struct attribute_group tg3_group = {
10649 .attrs = tg3_attributes,
10650 };
10651
10652 static void tg3_hwmon_close(struct tg3 *tp)
10653 {
10654 if (tp->hwmon_dev) {
10655 hwmon_device_unregister(tp->hwmon_dev);
10656 tp->hwmon_dev = NULL;
10657 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10658 }
10659 }
10660
10661 static void tg3_hwmon_open(struct tg3 *tp)
10662 {
10663 int i, err;
10664 u32 size = 0;
10665 struct pci_dev *pdev = tp->pdev;
10666 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10667
10668 tg3_sd_scan_scratchpad(tp, ocirs);
10669
10670 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10671 if (!ocirs[i].src_data_length)
10672 continue;
10673
10674 size += ocirs[i].src_hdr_length;
10675 size += ocirs[i].src_data_length;
10676 }
10677
10678 if (!size)
10679 return;
10680
10681 /* Register hwmon sysfs hooks */
10682 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10683 if (err) {
10684 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10685 return;
10686 }
10687
10688 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10689 if (IS_ERR(tp->hwmon_dev)) {
10690 tp->hwmon_dev = NULL;
10691 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10692 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10693 }
10694 }
10695
10696
10697 #define TG3_STAT_ADD32(PSTAT, REG) \
10698 do { u32 __val = tr32(REG); \
10699 (PSTAT)->low += __val; \
10700 if ((PSTAT)->low < __val) \
10701 (PSTAT)->high += 1; \
10702 } while (0)
10703
10704 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10705 {
10706 struct tg3_hw_stats *sp = tp->hw_stats;
10707
10708 if (!tp->link_up)
10709 return;
10710
10711 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10712 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10713 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10714 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10715 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10716 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10717 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10718 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10719 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10720 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10721 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10722 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10723 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10724 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10725 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10726 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10727 u32 val;
10728
10729 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10730 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10731 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10732 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10733 }
10734
10735 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10736 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10737 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10738 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10739 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10740 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10741 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10742 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10743 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10744 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10745 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10746 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10747 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10748 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10749
10750 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10751 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10752 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10753 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10754 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10755 } else {
10756 u32 val = tr32(HOSTCC_FLOW_ATTN);
10757 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10758 if (val) {
10759 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10760 sp->rx_discards.low += val;
10761 if (sp->rx_discards.low < val)
10762 sp->rx_discards.high += 1;
10763 }
10764 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10765 }
10766 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10767 }
10768
10769 static void tg3_chk_missed_msi(struct tg3 *tp)
10770 {
10771 u32 i;
10772
10773 for (i = 0; i < tp->irq_cnt; i++) {
10774 struct tg3_napi *tnapi = &tp->napi[i];
10775
10776 if (tg3_has_work(tnapi)) {
10777 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10778 tnapi->last_tx_cons == tnapi->tx_cons) {
10779 if (tnapi->chk_msi_cnt < 1) {
10780 tnapi->chk_msi_cnt++;
10781 return;
10782 }
10783 tg3_msi(0, tnapi);
10784 }
10785 }
10786 tnapi->chk_msi_cnt = 0;
10787 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10788 tnapi->last_tx_cons = tnapi->tx_cons;
10789 }
10790 }
10791
10792 static void tg3_timer(unsigned long __opaque)
10793 {
10794 struct tg3 *tp = (struct tg3 *) __opaque;
10795
10796 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10797 goto restart_timer;
10798
10799 spin_lock(&tp->lock);
10800
10801 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10802 tg3_flag(tp, 57765_CLASS))
10803 tg3_chk_missed_msi(tp);
10804
10805 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10806 /* BCM4785: Flush posted writes from GbE to host memory. */
10807 tr32(HOSTCC_MODE);
10808 }
10809
10810 if (!tg3_flag(tp, TAGGED_STATUS)) {
10811 /* All of this garbage is because when using non-tagged
10812 * IRQ status the mailbox/status_block protocol the chip
10813 * uses with the cpu is race prone.
10814 */
10815 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10816 tw32(GRC_LOCAL_CTRL,
10817 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10818 } else {
10819 tw32(HOSTCC_MODE, tp->coalesce_mode |
10820 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10821 }
10822
10823 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10824 spin_unlock(&tp->lock);
10825 tg3_reset_task_schedule(tp);
10826 goto restart_timer;
10827 }
10828 }
10829
10830 /* This part only runs once per second. */
10831 if (!--tp->timer_counter) {
10832 if (tg3_flag(tp, 5705_PLUS))
10833 tg3_periodic_fetch_stats(tp);
10834
10835 if (tp->setlpicnt && !--tp->setlpicnt)
10836 tg3_phy_eee_enable(tp);
10837
10838 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10839 u32 mac_stat;
10840 int phy_event;
10841
10842 mac_stat = tr32(MAC_STATUS);
10843
10844 phy_event = 0;
10845 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10846 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10847 phy_event = 1;
10848 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10849 phy_event = 1;
10850
10851 if (phy_event)
10852 tg3_setup_phy(tp, false);
10853 } else if (tg3_flag(tp, POLL_SERDES)) {
10854 u32 mac_stat = tr32(MAC_STATUS);
10855 int need_setup = 0;
10856
10857 if (tp->link_up &&
10858 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10859 need_setup = 1;
10860 }
10861 if (!tp->link_up &&
10862 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10863 MAC_STATUS_SIGNAL_DET))) {
10864 need_setup = 1;
10865 }
10866 if (need_setup) {
10867 if (!tp->serdes_counter) {
10868 tw32_f(MAC_MODE,
10869 (tp->mac_mode &
10870 ~MAC_MODE_PORT_MODE_MASK));
10871 udelay(40);
10872 tw32_f(MAC_MODE, tp->mac_mode);
10873 udelay(40);
10874 }
10875 tg3_setup_phy(tp, false);
10876 }
10877 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10878 tg3_flag(tp, 5780_CLASS)) {
10879 tg3_serdes_parallel_detect(tp);
10880 }
10881
10882 tp->timer_counter = tp->timer_multiplier;
10883 }
10884
10885 /* Heartbeat is only sent once every 2 seconds.
10886 *
10887 * The heartbeat is to tell the ASF firmware that the host
10888 * driver is still alive. In the event that the OS crashes,
10889 * ASF needs to reset the hardware to free up the FIFO space
10890 * that may be filled with rx packets destined for the host.
10891 * If the FIFO is full, ASF will no longer function properly.
10892 *
10893 * Unintended resets have been reported on real time kernels
10894 * where the timer doesn't run on time. Netpoll will also have
10895 * same problem.
10896 *
10897 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10898 * to check the ring condition when the heartbeat is expiring
10899 * before doing the reset. This will prevent most unintended
10900 * resets.
10901 */
10902 if (!--tp->asf_counter) {
10903 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10904 tg3_wait_for_event_ack(tp);
10905
10906 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10907 FWCMD_NICDRV_ALIVE3);
10908 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10909 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10910 TG3_FW_UPDATE_TIMEOUT_SEC);
10911
10912 tg3_generate_fw_event(tp);
10913 }
10914 tp->asf_counter = tp->asf_multiplier;
10915 }
10916
10917 spin_unlock(&tp->lock);
10918
10919 restart_timer:
10920 tp->timer.expires = jiffies + tp->timer_offset;
10921 add_timer(&tp->timer);
10922 }
10923
10924 static void tg3_timer_init(struct tg3 *tp)
10925 {
10926 if (tg3_flag(tp, TAGGED_STATUS) &&
10927 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10928 !tg3_flag(tp, 57765_CLASS))
10929 tp->timer_offset = HZ;
10930 else
10931 tp->timer_offset = HZ / 10;
10932
10933 BUG_ON(tp->timer_offset > HZ);
10934
10935 tp->timer_multiplier = (HZ / tp->timer_offset);
10936 tp->asf_multiplier = (HZ / tp->timer_offset) *
10937 TG3_FW_UPDATE_FREQ_SEC;
10938
10939 init_timer(&tp->timer);
10940 tp->timer.data = (unsigned long) tp;
10941 tp->timer.function = tg3_timer;
10942 }
10943
10944 static void tg3_timer_start(struct tg3 *tp)
10945 {
10946 tp->asf_counter = tp->asf_multiplier;
10947 tp->timer_counter = tp->timer_multiplier;
10948
10949 tp->timer.expires = jiffies + tp->timer_offset;
10950 add_timer(&tp->timer);
10951 }
10952
10953 static void tg3_timer_stop(struct tg3 *tp)
10954 {
10955 del_timer_sync(&tp->timer);
10956 }
10957
10958 /* Restart hardware after configuration changes, self-test, etc.
10959 * Invoked with tp->lock held.
10960 */
10961 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10962 __releases(tp->lock)
10963 __acquires(tp->lock)
10964 {
10965 int err;
10966
10967 err = tg3_init_hw(tp, reset_phy);
10968 if (err) {
10969 netdev_err(tp->dev,
10970 "Failed to re-initialize device, aborting\n");
10971 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10972 tg3_full_unlock(tp);
10973 tg3_timer_stop(tp);
10974 tp->irq_sync = 0;
10975 tg3_napi_enable(tp);
10976 dev_close(tp->dev);
10977 tg3_full_lock(tp, 0);
10978 }
10979 return err;
10980 }
10981
10982 static void tg3_reset_task(struct work_struct *work)
10983 {
10984 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10985 int err;
10986
10987 tg3_full_lock(tp, 0);
10988
10989 if (!netif_running(tp->dev)) {
10990 tg3_flag_clear(tp, RESET_TASK_PENDING);
10991 tg3_full_unlock(tp);
10992 return;
10993 }
10994
10995 tg3_full_unlock(tp);
10996
10997 tg3_phy_stop(tp);
10998
10999 tg3_netif_stop(tp);
11000
11001 tg3_full_lock(tp, 1);
11002
11003 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11004 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11005 tp->write32_rx_mbox = tg3_write_flush_reg32;
11006 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11007 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11008 }
11009
11010 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11011 err = tg3_init_hw(tp, true);
11012 if (err)
11013 goto out;
11014
11015 tg3_netif_start(tp);
11016
11017 out:
11018 tg3_full_unlock(tp);
11019
11020 if (!err)
11021 tg3_phy_start(tp);
11022
11023 tg3_flag_clear(tp, RESET_TASK_PENDING);
11024 }
11025
11026 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11027 {
11028 irq_handler_t fn;
11029 unsigned long flags;
11030 char *name;
11031 struct tg3_napi *tnapi = &tp->napi[irq_num];
11032
11033 if (tp->irq_cnt == 1)
11034 name = tp->dev->name;
11035 else {
11036 name = &tnapi->irq_lbl[0];
11037 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
11038 name[IFNAMSIZ-1] = 0;
11039 }
11040
11041 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11042 fn = tg3_msi;
11043 if (tg3_flag(tp, 1SHOT_MSI))
11044 fn = tg3_msi_1shot;
11045 flags = 0;
11046 } else {
11047 fn = tg3_interrupt;
11048 if (tg3_flag(tp, TAGGED_STATUS))
11049 fn = tg3_interrupt_tagged;
11050 flags = IRQF_SHARED;
11051 }
11052
11053 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11054 }
11055
11056 static int tg3_test_interrupt(struct tg3 *tp)
11057 {
11058 struct tg3_napi *tnapi = &tp->napi[0];
11059 struct net_device *dev = tp->dev;
11060 int err, i, intr_ok = 0;
11061 u32 val;
11062
11063 if (!netif_running(dev))
11064 return -ENODEV;
11065
11066 tg3_disable_ints(tp);
11067
11068 free_irq(tnapi->irq_vec, tnapi);
11069
11070 /*
11071 * Turn off MSI one shot mode. Otherwise this test has no
11072 * observable way to know whether the interrupt was delivered.
11073 */
11074 if (tg3_flag(tp, 57765_PLUS)) {
11075 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11076 tw32(MSGINT_MODE, val);
11077 }
11078
11079 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11080 IRQF_SHARED, dev->name, tnapi);
11081 if (err)
11082 return err;
11083
11084 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11085 tg3_enable_ints(tp);
11086
11087 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11088 tnapi->coal_now);
11089
11090 for (i = 0; i < 5; i++) {
11091 u32 int_mbox, misc_host_ctrl;
11092
11093 int_mbox = tr32_mailbox(tnapi->int_mbox);
11094 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11095
11096 if ((int_mbox != 0) ||
11097 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11098 intr_ok = 1;
11099 break;
11100 }
11101
11102 if (tg3_flag(tp, 57765_PLUS) &&
11103 tnapi->hw_status->status_tag != tnapi->last_tag)
11104 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11105
11106 msleep(10);
11107 }
11108
11109 tg3_disable_ints(tp);
11110
11111 free_irq(tnapi->irq_vec, tnapi);
11112
11113 err = tg3_request_irq(tp, 0);
11114
11115 if (err)
11116 return err;
11117
11118 if (intr_ok) {
11119 /* Reenable MSI one shot mode. */
11120 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11121 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11122 tw32(MSGINT_MODE, val);
11123 }
11124 return 0;
11125 }
11126
11127 return -EIO;
11128 }
11129
11130 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11131 * successfully restored
11132 */
11133 static int tg3_test_msi(struct tg3 *tp)
11134 {
11135 int err;
11136 u16 pci_cmd;
11137
11138 if (!tg3_flag(tp, USING_MSI))
11139 return 0;
11140
11141 /* Turn off SERR reporting in case MSI terminates with Master
11142 * Abort.
11143 */
11144 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11145 pci_write_config_word(tp->pdev, PCI_COMMAND,
11146 pci_cmd & ~PCI_COMMAND_SERR);
11147
11148 err = tg3_test_interrupt(tp);
11149
11150 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11151
11152 if (!err)
11153 return 0;
11154
11155 /* other failures */
11156 if (err != -EIO)
11157 return err;
11158
11159 /* MSI test failed, go back to INTx mode */
11160 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11161 "to INTx mode. Please report this failure to the PCI "
11162 "maintainer and include system chipset information\n");
11163
11164 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11165
11166 pci_disable_msi(tp->pdev);
11167
11168 tg3_flag_clear(tp, USING_MSI);
11169 tp->napi[0].irq_vec = tp->pdev->irq;
11170
11171 err = tg3_request_irq(tp, 0);
11172 if (err)
11173 return err;
11174
11175 /* Need to reset the chip because the MSI cycle may have terminated
11176 * with Master Abort.
11177 */
11178 tg3_full_lock(tp, 1);
11179
11180 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11181 err = tg3_init_hw(tp, true);
11182
11183 tg3_full_unlock(tp);
11184
11185 if (err)
11186 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11187
11188 return err;
11189 }
11190
11191 static int tg3_request_firmware(struct tg3 *tp)
11192 {
11193 const struct tg3_firmware_hdr *fw_hdr;
11194
11195 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11196 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11197 tp->fw_needed);
11198 return -ENOENT;
11199 }
11200
11201 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11202
11203 /* Firmware blob starts with version numbers, followed by
11204 * start address and _full_ length including BSS sections
11205 * (which must be longer than the actual data, of course
11206 */
11207
11208 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11209 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11210 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11211 tp->fw_len, tp->fw_needed);
11212 release_firmware(tp->fw);
11213 tp->fw = NULL;
11214 return -EINVAL;
11215 }
11216
11217 /* We no longer need firmware; we have it. */
11218 tp->fw_needed = NULL;
11219 return 0;
11220 }
11221
11222 static u32 tg3_irq_count(struct tg3 *tp)
11223 {
11224 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11225
11226 if (irq_cnt > 1) {
11227 /* We want as many rx rings enabled as there are cpus.
11228 * In multiqueue MSI-X mode, the first MSI-X vector
11229 * only deals with link interrupts, etc, so we add
11230 * one to the number of vectors we are requesting.
11231 */
11232 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11233 }
11234
11235 return irq_cnt;
11236 }
11237
11238 static bool tg3_enable_msix(struct tg3 *tp)
11239 {
11240 int i, rc;
11241 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11242
11243 tp->txq_cnt = tp->txq_req;
11244 tp->rxq_cnt = tp->rxq_req;
11245 if (!tp->rxq_cnt)
11246 tp->rxq_cnt = netif_get_num_default_rss_queues();
11247 if (tp->rxq_cnt > tp->rxq_max)
11248 tp->rxq_cnt = tp->rxq_max;
11249
11250 /* Disable multiple TX rings by default. Simple round-robin hardware
11251 * scheduling of the TX rings can cause starvation of rings with
11252 * small packets when other rings have TSO or jumbo packets.
11253 */
11254 if (!tp->txq_req)
11255 tp->txq_cnt = 1;
11256
11257 tp->irq_cnt = tg3_irq_count(tp);
11258
11259 for (i = 0; i < tp->irq_max; i++) {
11260 msix_ent[i].entry = i;
11261 msix_ent[i].vector = 0;
11262 }
11263
11264 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11265 if (rc < 0) {
11266 return false;
11267 } else if (rc != 0) {
11268 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11269 return false;
11270 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11271 tp->irq_cnt, rc);
11272 tp->irq_cnt = rc;
11273 tp->rxq_cnt = max(rc - 1, 1);
11274 if (tp->txq_cnt)
11275 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11276 }
11277
11278 for (i = 0; i < tp->irq_max; i++)
11279 tp->napi[i].irq_vec = msix_ent[i].vector;
11280
11281 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11282 pci_disable_msix(tp->pdev);
11283 return false;
11284 }
11285
11286 if (tp->irq_cnt == 1)
11287 return true;
11288
11289 tg3_flag_set(tp, ENABLE_RSS);
11290
11291 if (tp->txq_cnt > 1)
11292 tg3_flag_set(tp, ENABLE_TSS);
11293
11294 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11295
11296 return true;
11297 }
11298
11299 static void tg3_ints_init(struct tg3 *tp)
11300 {
11301 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11302 !tg3_flag(tp, TAGGED_STATUS)) {
11303 /* All MSI supporting chips should support tagged
11304 * status. Assert that this is the case.
11305 */
11306 netdev_warn(tp->dev,
11307 "MSI without TAGGED_STATUS? Not using MSI\n");
11308 goto defcfg;
11309 }
11310
11311 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11312 tg3_flag_set(tp, USING_MSIX);
11313 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11314 tg3_flag_set(tp, USING_MSI);
11315
11316 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11317 u32 msi_mode = tr32(MSGINT_MODE);
11318 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11319 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11320 if (!tg3_flag(tp, 1SHOT_MSI))
11321 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11322 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11323 }
11324 defcfg:
11325 if (!tg3_flag(tp, USING_MSIX)) {
11326 tp->irq_cnt = 1;
11327 tp->napi[0].irq_vec = tp->pdev->irq;
11328 }
11329
11330 if (tp->irq_cnt == 1) {
11331 tp->txq_cnt = 1;
11332 tp->rxq_cnt = 1;
11333 netif_set_real_num_tx_queues(tp->dev, 1);
11334 netif_set_real_num_rx_queues(tp->dev, 1);
11335 }
11336 }
11337
11338 static void tg3_ints_fini(struct tg3 *tp)
11339 {
11340 if (tg3_flag(tp, USING_MSIX))
11341 pci_disable_msix(tp->pdev);
11342 else if (tg3_flag(tp, USING_MSI))
11343 pci_disable_msi(tp->pdev);
11344 tg3_flag_clear(tp, USING_MSI);
11345 tg3_flag_clear(tp, USING_MSIX);
11346 tg3_flag_clear(tp, ENABLE_RSS);
11347 tg3_flag_clear(tp, ENABLE_TSS);
11348 }
11349
11350 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11351 bool init)
11352 {
11353 struct net_device *dev = tp->dev;
11354 int i, err;
11355
11356 /*
11357 * Setup interrupts first so we know how
11358 * many NAPI resources to allocate
11359 */
11360 tg3_ints_init(tp);
11361
11362 tg3_rss_check_indir_tbl(tp);
11363
11364 /* The placement of this call is tied
11365 * to the setup and use of Host TX descriptors.
11366 */
11367 err = tg3_alloc_consistent(tp);
11368 if (err)
11369 goto out_ints_fini;
11370
11371 tg3_napi_init(tp);
11372
11373 tg3_napi_enable(tp);
11374
11375 for (i = 0; i < tp->irq_cnt; i++) {
11376 struct tg3_napi *tnapi = &tp->napi[i];
11377 err = tg3_request_irq(tp, i);
11378 if (err) {
11379 for (i--; i >= 0; i--) {
11380 tnapi = &tp->napi[i];
11381 free_irq(tnapi->irq_vec, tnapi);
11382 }
11383 goto out_napi_fini;
11384 }
11385 }
11386
11387 tg3_full_lock(tp, 0);
11388
11389 if (init)
11390 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11391
11392 err = tg3_init_hw(tp, reset_phy);
11393 if (err) {
11394 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11395 tg3_free_rings(tp);
11396 }
11397
11398 tg3_full_unlock(tp);
11399
11400 if (err)
11401 goto out_free_irq;
11402
11403 if (test_irq && tg3_flag(tp, USING_MSI)) {
11404 err = tg3_test_msi(tp);
11405
11406 if (err) {
11407 tg3_full_lock(tp, 0);
11408 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11409 tg3_free_rings(tp);
11410 tg3_full_unlock(tp);
11411
11412 goto out_napi_fini;
11413 }
11414
11415 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11416 u32 val = tr32(PCIE_TRANSACTION_CFG);
11417
11418 tw32(PCIE_TRANSACTION_CFG,
11419 val | PCIE_TRANS_CFG_1SHOT_MSI);
11420 }
11421 }
11422
11423 tg3_phy_start(tp);
11424
11425 tg3_hwmon_open(tp);
11426
11427 tg3_full_lock(tp, 0);
11428
11429 tg3_timer_start(tp);
11430 tg3_flag_set(tp, INIT_COMPLETE);
11431 tg3_enable_ints(tp);
11432
11433 if (init)
11434 tg3_ptp_init(tp);
11435 else
11436 tg3_ptp_resume(tp);
11437
11438
11439 tg3_full_unlock(tp);
11440
11441 netif_tx_start_all_queues(dev);
11442
11443 /*
11444 * Reset loopback feature if it was turned on while the device was down
11445 * make sure that it's installed properly now.
11446 */
11447 if (dev->features & NETIF_F_LOOPBACK)
11448 tg3_set_loopback(dev, dev->features);
11449
11450 return 0;
11451
11452 out_free_irq:
11453 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11454 struct tg3_napi *tnapi = &tp->napi[i];
11455 free_irq(tnapi->irq_vec, tnapi);
11456 }
11457
11458 out_napi_fini:
11459 tg3_napi_disable(tp);
11460 tg3_napi_fini(tp);
11461 tg3_free_consistent(tp);
11462
11463 out_ints_fini:
11464 tg3_ints_fini(tp);
11465
11466 return err;
11467 }
11468
11469 static void tg3_stop(struct tg3 *tp)
11470 {
11471 int i;
11472
11473 tg3_reset_task_cancel(tp);
11474 tg3_netif_stop(tp);
11475
11476 tg3_timer_stop(tp);
11477
11478 tg3_hwmon_close(tp);
11479
11480 tg3_phy_stop(tp);
11481
11482 tg3_full_lock(tp, 1);
11483
11484 tg3_disable_ints(tp);
11485
11486 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11487 tg3_free_rings(tp);
11488 tg3_flag_clear(tp, INIT_COMPLETE);
11489
11490 tg3_full_unlock(tp);
11491
11492 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11493 struct tg3_napi *tnapi = &tp->napi[i];
11494 free_irq(tnapi->irq_vec, tnapi);
11495 }
11496
11497 tg3_ints_fini(tp);
11498
11499 tg3_napi_fini(tp);
11500
11501 tg3_free_consistent(tp);
11502 }
11503
11504 static int tg3_open(struct net_device *dev)
11505 {
11506 struct tg3 *tp = netdev_priv(dev);
11507 int err;
11508
11509 if (tp->fw_needed) {
11510 err = tg3_request_firmware(tp);
11511 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11512 if (err) {
11513 netdev_warn(tp->dev, "EEE capability disabled\n");
11514 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11515 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11516 netdev_warn(tp->dev, "EEE capability restored\n");
11517 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11518 }
11519 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11520 if (err)
11521 return err;
11522 } else if (err) {
11523 netdev_warn(tp->dev, "TSO capability disabled\n");
11524 tg3_flag_clear(tp, TSO_CAPABLE);
11525 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11526 netdev_notice(tp->dev, "TSO capability restored\n");
11527 tg3_flag_set(tp, TSO_CAPABLE);
11528 }
11529 }
11530
11531 tg3_carrier_off(tp);
11532
11533 err = tg3_power_up(tp);
11534 if (err)
11535 return err;
11536
11537 tg3_full_lock(tp, 0);
11538
11539 tg3_disable_ints(tp);
11540 tg3_flag_clear(tp, INIT_COMPLETE);
11541
11542 tg3_full_unlock(tp);
11543
11544 err = tg3_start(tp,
11545 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11546 true, true);
11547 if (err) {
11548 tg3_frob_aux_power(tp, false);
11549 pci_set_power_state(tp->pdev, PCI_D3hot);
11550 }
11551
11552 if (tg3_flag(tp, PTP_CAPABLE)) {
11553 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11554 &tp->pdev->dev);
11555 if (IS_ERR(tp->ptp_clock))
11556 tp->ptp_clock = NULL;
11557 }
11558
11559 return err;
11560 }
11561
11562 static int tg3_close(struct net_device *dev)
11563 {
11564 struct tg3 *tp = netdev_priv(dev);
11565
11566 tg3_ptp_fini(tp);
11567
11568 tg3_stop(tp);
11569
11570 /* Clear stats across close / open calls */
11571 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11572 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11573
11574 tg3_power_down_prepare(tp);
11575
11576 tg3_carrier_off(tp);
11577
11578 return 0;
11579 }
11580
11581 static inline u64 get_stat64(tg3_stat64_t *val)
11582 {
11583 return ((u64)val->high << 32) | ((u64)val->low);
11584 }
11585
11586 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11587 {
11588 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11589
11590 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11591 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11592 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11593 u32 val;
11594
11595 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11596 tg3_writephy(tp, MII_TG3_TEST1,
11597 val | MII_TG3_TEST1_CRC_EN);
11598 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11599 } else
11600 val = 0;
11601
11602 tp->phy_crc_errors += val;
11603
11604 return tp->phy_crc_errors;
11605 }
11606
11607 return get_stat64(&hw_stats->rx_fcs_errors);
11608 }
11609
11610 #define ESTAT_ADD(member) \
11611 estats->member = old_estats->member + \
11612 get_stat64(&hw_stats->member)
11613
11614 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11615 {
11616 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11617 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11618
11619 ESTAT_ADD(rx_octets);
11620 ESTAT_ADD(rx_fragments);
11621 ESTAT_ADD(rx_ucast_packets);
11622 ESTAT_ADD(rx_mcast_packets);
11623 ESTAT_ADD(rx_bcast_packets);
11624 ESTAT_ADD(rx_fcs_errors);
11625 ESTAT_ADD(rx_align_errors);
11626 ESTAT_ADD(rx_xon_pause_rcvd);
11627 ESTAT_ADD(rx_xoff_pause_rcvd);
11628 ESTAT_ADD(rx_mac_ctrl_rcvd);
11629 ESTAT_ADD(rx_xoff_entered);
11630 ESTAT_ADD(rx_frame_too_long_errors);
11631 ESTAT_ADD(rx_jabbers);
11632 ESTAT_ADD(rx_undersize_packets);
11633 ESTAT_ADD(rx_in_length_errors);
11634 ESTAT_ADD(rx_out_length_errors);
11635 ESTAT_ADD(rx_64_or_less_octet_packets);
11636 ESTAT_ADD(rx_65_to_127_octet_packets);
11637 ESTAT_ADD(rx_128_to_255_octet_packets);
11638 ESTAT_ADD(rx_256_to_511_octet_packets);
11639 ESTAT_ADD(rx_512_to_1023_octet_packets);
11640 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11641 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11642 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11643 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11644 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11645
11646 ESTAT_ADD(tx_octets);
11647 ESTAT_ADD(tx_collisions);
11648 ESTAT_ADD(tx_xon_sent);
11649 ESTAT_ADD(tx_xoff_sent);
11650 ESTAT_ADD(tx_flow_control);
11651 ESTAT_ADD(tx_mac_errors);
11652 ESTAT_ADD(tx_single_collisions);
11653 ESTAT_ADD(tx_mult_collisions);
11654 ESTAT_ADD(tx_deferred);
11655 ESTAT_ADD(tx_excessive_collisions);
11656 ESTAT_ADD(tx_late_collisions);
11657 ESTAT_ADD(tx_collide_2times);
11658 ESTAT_ADD(tx_collide_3times);
11659 ESTAT_ADD(tx_collide_4times);
11660 ESTAT_ADD(tx_collide_5times);
11661 ESTAT_ADD(tx_collide_6times);
11662 ESTAT_ADD(tx_collide_7times);
11663 ESTAT_ADD(tx_collide_8times);
11664 ESTAT_ADD(tx_collide_9times);
11665 ESTAT_ADD(tx_collide_10times);
11666 ESTAT_ADD(tx_collide_11times);
11667 ESTAT_ADD(tx_collide_12times);
11668 ESTAT_ADD(tx_collide_13times);
11669 ESTAT_ADD(tx_collide_14times);
11670 ESTAT_ADD(tx_collide_15times);
11671 ESTAT_ADD(tx_ucast_packets);
11672 ESTAT_ADD(tx_mcast_packets);
11673 ESTAT_ADD(tx_bcast_packets);
11674 ESTAT_ADD(tx_carrier_sense_errors);
11675 ESTAT_ADD(tx_discards);
11676 ESTAT_ADD(tx_errors);
11677
11678 ESTAT_ADD(dma_writeq_full);
11679 ESTAT_ADD(dma_write_prioq_full);
11680 ESTAT_ADD(rxbds_empty);
11681 ESTAT_ADD(rx_discards);
11682 ESTAT_ADD(rx_errors);
11683 ESTAT_ADD(rx_threshold_hit);
11684
11685 ESTAT_ADD(dma_readq_full);
11686 ESTAT_ADD(dma_read_prioq_full);
11687 ESTAT_ADD(tx_comp_queue_full);
11688
11689 ESTAT_ADD(ring_set_send_prod_index);
11690 ESTAT_ADD(ring_status_update);
11691 ESTAT_ADD(nic_irqs);
11692 ESTAT_ADD(nic_avoided_irqs);
11693 ESTAT_ADD(nic_tx_threshold_hit);
11694
11695 ESTAT_ADD(mbuf_lwm_thresh_hit);
11696 }
11697
11698 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11699 {
11700 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11701 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11702
11703 stats->rx_packets = old_stats->rx_packets +
11704 get_stat64(&hw_stats->rx_ucast_packets) +
11705 get_stat64(&hw_stats->rx_mcast_packets) +
11706 get_stat64(&hw_stats->rx_bcast_packets);
11707
11708 stats->tx_packets = old_stats->tx_packets +
11709 get_stat64(&hw_stats->tx_ucast_packets) +
11710 get_stat64(&hw_stats->tx_mcast_packets) +
11711 get_stat64(&hw_stats->tx_bcast_packets);
11712
11713 stats->rx_bytes = old_stats->rx_bytes +
11714 get_stat64(&hw_stats->rx_octets);
11715 stats->tx_bytes = old_stats->tx_bytes +
11716 get_stat64(&hw_stats->tx_octets);
11717
11718 stats->rx_errors = old_stats->rx_errors +
11719 get_stat64(&hw_stats->rx_errors);
11720 stats->tx_errors = old_stats->tx_errors +
11721 get_stat64(&hw_stats->tx_errors) +
11722 get_stat64(&hw_stats->tx_mac_errors) +
11723 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11724 get_stat64(&hw_stats->tx_discards);
11725
11726 stats->multicast = old_stats->multicast +
11727 get_stat64(&hw_stats->rx_mcast_packets);
11728 stats->collisions = old_stats->collisions +
11729 get_stat64(&hw_stats->tx_collisions);
11730
11731 stats->rx_length_errors = old_stats->rx_length_errors +
11732 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11733 get_stat64(&hw_stats->rx_undersize_packets);
11734
11735 stats->rx_over_errors = old_stats->rx_over_errors +
11736 get_stat64(&hw_stats->rxbds_empty);
11737 stats->rx_frame_errors = old_stats->rx_frame_errors +
11738 get_stat64(&hw_stats->rx_align_errors);
11739 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11740 get_stat64(&hw_stats->tx_discards);
11741 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11742 get_stat64(&hw_stats->tx_carrier_sense_errors);
11743
11744 stats->rx_crc_errors = old_stats->rx_crc_errors +
11745 tg3_calc_crc_errors(tp);
11746
11747 stats->rx_missed_errors = old_stats->rx_missed_errors +
11748 get_stat64(&hw_stats->rx_discards);
11749
11750 stats->rx_dropped = tp->rx_dropped;
11751 stats->tx_dropped = tp->tx_dropped;
11752 }
11753
11754 static int tg3_get_regs_len(struct net_device *dev)
11755 {
11756 return TG3_REG_BLK_SIZE;
11757 }
11758
11759 static void tg3_get_regs(struct net_device *dev,
11760 struct ethtool_regs *regs, void *_p)
11761 {
11762 struct tg3 *tp = netdev_priv(dev);
11763
11764 regs->version = 0;
11765
11766 memset(_p, 0, TG3_REG_BLK_SIZE);
11767
11768 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11769 return;
11770
11771 tg3_full_lock(tp, 0);
11772
11773 tg3_dump_legacy_regs(tp, (u32 *)_p);
11774
11775 tg3_full_unlock(tp);
11776 }
11777
11778 static int tg3_get_eeprom_len(struct net_device *dev)
11779 {
11780 struct tg3 *tp = netdev_priv(dev);
11781
11782 return tp->nvram_size;
11783 }
11784
11785 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11786 {
11787 struct tg3 *tp = netdev_priv(dev);
11788 int ret;
11789 u8 *pd;
11790 u32 i, offset, len, b_offset, b_count;
11791 __be32 val;
11792
11793 if (tg3_flag(tp, NO_NVRAM))
11794 return -EINVAL;
11795
11796 offset = eeprom->offset;
11797 len = eeprom->len;
11798 eeprom->len = 0;
11799
11800 eeprom->magic = TG3_EEPROM_MAGIC;
11801
11802 if (offset & 3) {
11803 /* adjustments to start on required 4 byte boundary */
11804 b_offset = offset & 3;
11805 b_count = 4 - b_offset;
11806 if (b_count > len) {
11807 /* i.e. offset=1 len=2 */
11808 b_count = len;
11809 }
11810 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11811 if (ret)
11812 return ret;
11813 memcpy(data, ((char *)&val) + b_offset, b_count);
11814 len -= b_count;
11815 offset += b_count;
11816 eeprom->len += b_count;
11817 }
11818
11819 /* read bytes up to the last 4 byte boundary */
11820 pd = &data[eeprom->len];
11821 for (i = 0; i < (len - (len & 3)); i += 4) {
11822 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11823 if (ret) {
11824 eeprom->len += i;
11825 return ret;
11826 }
11827 memcpy(pd + i, &val, 4);
11828 }
11829 eeprom->len += i;
11830
11831 if (len & 3) {
11832 /* read last bytes not ending on 4 byte boundary */
11833 pd = &data[eeprom->len];
11834 b_count = len & 3;
11835 b_offset = offset + len - b_count;
11836 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11837 if (ret)
11838 return ret;
11839 memcpy(pd, &val, b_count);
11840 eeprom->len += b_count;
11841 }
11842 return 0;
11843 }
11844
11845 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11846 {
11847 struct tg3 *tp = netdev_priv(dev);
11848 int ret;
11849 u32 offset, len, b_offset, odd_len;
11850 u8 *buf;
11851 __be32 start, end;
11852
11853 if (tg3_flag(tp, NO_NVRAM) ||
11854 eeprom->magic != TG3_EEPROM_MAGIC)
11855 return -EINVAL;
11856
11857 offset = eeprom->offset;
11858 len = eeprom->len;
11859
11860 if ((b_offset = (offset & 3))) {
11861 /* adjustments to start on required 4 byte boundary */
11862 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11863 if (ret)
11864 return ret;
11865 len += b_offset;
11866 offset &= ~3;
11867 if (len < 4)
11868 len = 4;
11869 }
11870
11871 odd_len = 0;
11872 if (len & 3) {
11873 /* adjustments to end on required 4 byte boundary */
11874 odd_len = 1;
11875 len = (len + 3) & ~3;
11876 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11877 if (ret)
11878 return ret;
11879 }
11880
11881 buf = data;
11882 if (b_offset || odd_len) {
11883 buf = kmalloc(len, GFP_KERNEL);
11884 if (!buf)
11885 return -ENOMEM;
11886 if (b_offset)
11887 memcpy(buf, &start, 4);
11888 if (odd_len)
11889 memcpy(buf+len-4, &end, 4);
11890 memcpy(buf + b_offset, data, eeprom->len);
11891 }
11892
11893 ret = tg3_nvram_write_block(tp, offset, len, buf);
11894
11895 if (buf != data)
11896 kfree(buf);
11897
11898 return ret;
11899 }
11900
11901 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11902 {
11903 struct tg3 *tp = netdev_priv(dev);
11904
11905 if (tg3_flag(tp, USE_PHYLIB)) {
11906 struct phy_device *phydev;
11907 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11908 return -EAGAIN;
11909 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11910 return phy_ethtool_gset(phydev, cmd);
11911 }
11912
11913 cmd->supported = (SUPPORTED_Autoneg);
11914
11915 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11916 cmd->supported |= (SUPPORTED_1000baseT_Half |
11917 SUPPORTED_1000baseT_Full);
11918
11919 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11920 cmd->supported |= (SUPPORTED_100baseT_Half |
11921 SUPPORTED_100baseT_Full |
11922 SUPPORTED_10baseT_Half |
11923 SUPPORTED_10baseT_Full |
11924 SUPPORTED_TP);
11925 cmd->port = PORT_TP;
11926 } else {
11927 cmd->supported |= SUPPORTED_FIBRE;
11928 cmd->port = PORT_FIBRE;
11929 }
11930
11931 cmd->advertising = tp->link_config.advertising;
11932 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11933 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11934 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11935 cmd->advertising |= ADVERTISED_Pause;
11936 } else {
11937 cmd->advertising |= ADVERTISED_Pause |
11938 ADVERTISED_Asym_Pause;
11939 }
11940 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11941 cmd->advertising |= ADVERTISED_Asym_Pause;
11942 }
11943 }
11944 if (netif_running(dev) && tp->link_up) {
11945 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11946 cmd->duplex = tp->link_config.active_duplex;
11947 cmd->lp_advertising = tp->link_config.rmt_adv;
11948 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11949 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11950 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11951 else
11952 cmd->eth_tp_mdix = ETH_TP_MDI;
11953 }
11954 } else {
11955 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11956 cmd->duplex = DUPLEX_UNKNOWN;
11957 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11958 }
11959 cmd->phy_address = tp->phy_addr;
11960 cmd->transceiver = XCVR_INTERNAL;
11961 cmd->autoneg = tp->link_config.autoneg;
11962 cmd->maxtxpkt = 0;
11963 cmd->maxrxpkt = 0;
11964 return 0;
11965 }
11966
11967 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11968 {
11969 struct tg3 *tp = netdev_priv(dev);
11970 u32 speed = ethtool_cmd_speed(cmd);
11971
11972 if (tg3_flag(tp, USE_PHYLIB)) {
11973 struct phy_device *phydev;
11974 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11975 return -EAGAIN;
11976 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11977 return phy_ethtool_sset(phydev, cmd);
11978 }
11979
11980 if (cmd->autoneg != AUTONEG_ENABLE &&
11981 cmd->autoneg != AUTONEG_DISABLE)
11982 return -EINVAL;
11983
11984 if (cmd->autoneg == AUTONEG_DISABLE &&
11985 cmd->duplex != DUPLEX_FULL &&
11986 cmd->duplex != DUPLEX_HALF)
11987 return -EINVAL;
11988
11989 if (cmd->autoneg == AUTONEG_ENABLE) {
11990 u32 mask = ADVERTISED_Autoneg |
11991 ADVERTISED_Pause |
11992 ADVERTISED_Asym_Pause;
11993
11994 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11995 mask |= ADVERTISED_1000baseT_Half |
11996 ADVERTISED_1000baseT_Full;
11997
11998 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11999 mask |= ADVERTISED_100baseT_Half |
12000 ADVERTISED_100baseT_Full |
12001 ADVERTISED_10baseT_Half |
12002 ADVERTISED_10baseT_Full |
12003 ADVERTISED_TP;
12004 else
12005 mask |= ADVERTISED_FIBRE;
12006
12007 if (cmd->advertising & ~mask)
12008 return -EINVAL;
12009
12010 mask &= (ADVERTISED_1000baseT_Half |
12011 ADVERTISED_1000baseT_Full |
12012 ADVERTISED_100baseT_Half |
12013 ADVERTISED_100baseT_Full |
12014 ADVERTISED_10baseT_Half |
12015 ADVERTISED_10baseT_Full);
12016
12017 cmd->advertising &= mask;
12018 } else {
12019 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12020 if (speed != SPEED_1000)
12021 return -EINVAL;
12022
12023 if (cmd->duplex != DUPLEX_FULL)
12024 return -EINVAL;
12025 } else {
12026 if (speed != SPEED_100 &&
12027 speed != SPEED_10)
12028 return -EINVAL;
12029 }
12030 }
12031
12032 tg3_full_lock(tp, 0);
12033
12034 tp->link_config.autoneg = cmd->autoneg;
12035 if (cmd->autoneg == AUTONEG_ENABLE) {
12036 tp->link_config.advertising = (cmd->advertising |
12037 ADVERTISED_Autoneg);
12038 tp->link_config.speed = SPEED_UNKNOWN;
12039 tp->link_config.duplex = DUPLEX_UNKNOWN;
12040 } else {
12041 tp->link_config.advertising = 0;
12042 tp->link_config.speed = speed;
12043 tp->link_config.duplex = cmd->duplex;
12044 }
12045
12046 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12047
12048 tg3_warn_mgmt_link_flap(tp);
12049
12050 if (netif_running(dev))
12051 tg3_setup_phy(tp, true);
12052
12053 tg3_full_unlock(tp);
12054
12055 return 0;
12056 }
12057
12058 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12059 {
12060 struct tg3 *tp = netdev_priv(dev);
12061
12062 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12063 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12064 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12065 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12066 }
12067
12068 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12069 {
12070 struct tg3 *tp = netdev_priv(dev);
12071
12072 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12073 wol->supported = WAKE_MAGIC;
12074 else
12075 wol->supported = 0;
12076 wol->wolopts = 0;
12077 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12078 wol->wolopts = WAKE_MAGIC;
12079 memset(&wol->sopass, 0, sizeof(wol->sopass));
12080 }
12081
12082 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12083 {
12084 struct tg3 *tp = netdev_priv(dev);
12085 struct device *dp = &tp->pdev->dev;
12086
12087 if (wol->wolopts & ~WAKE_MAGIC)
12088 return -EINVAL;
12089 if ((wol->wolopts & WAKE_MAGIC) &&
12090 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12091 return -EINVAL;
12092
12093 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12094
12095 spin_lock_bh(&tp->lock);
12096 if (device_may_wakeup(dp))
12097 tg3_flag_set(tp, WOL_ENABLE);
12098 else
12099 tg3_flag_clear(tp, WOL_ENABLE);
12100 spin_unlock_bh(&tp->lock);
12101
12102 return 0;
12103 }
12104
12105 static u32 tg3_get_msglevel(struct net_device *dev)
12106 {
12107 struct tg3 *tp = netdev_priv(dev);
12108 return tp->msg_enable;
12109 }
12110
12111 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12112 {
12113 struct tg3 *tp = netdev_priv(dev);
12114 tp->msg_enable = value;
12115 }
12116
12117 static int tg3_nway_reset(struct net_device *dev)
12118 {
12119 struct tg3 *tp = netdev_priv(dev);
12120 int r;
12121
12122 if (!netif_running(dev))
12123 return -EAGAIN;
12124
12125 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12126 return -EINVAL;
12127
12128 tg3_warn_mgmt_link_flap(tp);
12129
12130 if (tg3_flag(tp, USE_PHYLIB)) {
12131 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12132 return -EAGAIN;
12133 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12134 } else {
12135 u32 bmcr;
12136
12137 spin_lock_bh(&tp->lock);
12138 r = -EINVAL;
12139 tg3_readphy(tp, MII_BMCR, &bmcr);
12140 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12141 ((bmcr & BMCR_ANENABLE) ||
12142 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12143 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12144 BMCR_ANENABLE);
12145 r = 0;
12146 }
12147 spin_unlock_bh(&tp->lock);
12148 }
12149
12150 return r;
12151 }
12152
12153 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12154 {
12155 struct tg3 *tp = netdev_priv(dev);
12156
12157 ering->rx_max_pending = tp->rx_std_ring_mask;
12158 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12159 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12160 else
12161 ering->rx_jumbo_max_pending = 0;
12162
12163 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12164
12165 ering->rx_pending = tp->rx_pending;
12166 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12167 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12168 else
12169 ering->rx_jumbo_pending = 0;
12170
12171 ering->tx_pending = tp->napi[0].tx_pending;
12172 }
12173
12174 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12175 {
12176 struct tg3 *tp = netdev_priv(dev);
12177 int i, irq_sync = 0, err = 0;
12178
12179 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12180 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12181 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12182 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12183 (tg3_flag(tp, TSO_BUG) &&
12184 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12185 return -EINVAL;
12186
12187 if (netif_running(dev)) {
12188 tg3_phy_stop(tp);
12189 tg3_netif_stop(tp);
12190 irq_sync = 1;
12191 }
12192
12193 tg3_full_lock(tp, irq_sync);
12194
12195 tp->rx_pending = ering->rx_pending;
12196
12197 if (tg3_flag(tp, MAX_RXPEND_64) &&
12198 tp->rx_pending > 63)
12199 tp->rx_pending = 63;
12200 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12201
12202 for (i = 0; i < tp->irq_max; i++)
12203 tp->napi[i].tx_pending = ering->tx_pending;
12204
12205 if (netif_running(dev)) {
12206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12207 err = tg3_restart_hw(tp, false);
12208 if (!err)
12209 tg3_netif_start(tp);
12210 }
12211
12212 tg3_full_unlock(tp);
12213
12214 if (irq_sync && !err)
12215 tg3_phy_start(tp);
12216
12217 return err;
12218 }
12219
12220 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12221 {
12222 struct tg3 *tp = netdev_priv(dev);
12223
12224 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12225
12226 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12227 epause->rx_pause = 1;
12228 else
12229 epause->rx_pause = 0;
12230
12231 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12232 epause->tx_pause = 1;
12233 else
12234 epause->tx_pause = 0;
12235 }
12236
12237 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12238 {
12239 struct tg3 *tp = netdev_priv(dev);
12240 int err = 0;
12241
12242 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12243 tg3_warn_mgmt_link_flap(tp);
12244
12245 if (tg3_flag(tp, USE_PHYLIB)) {
12246 u32 newadv;
12247 struct phy_device *phydev;
12248
12249 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12250
12251 if (!(phydev->supported & SUPPORTED_Pause) ||
12252 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12253 (epause->rx_pause != epause->tx_pause)))
12254 return -EINVAL;
12255
12256 tp->link_config.flowctrl = 0;
12257 if (epause->rx_pause) {
12258 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12259
12260 if (epause->tx_pause) {
12261 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12262 newadv = ADVERTISED_Pause;
12263 } else
12264 newadv = ADVERTISED_Pause |
12265 ADVERTISED_Asym_Pause;
12266 } else if (epause->tx_pause) {
12267 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12268 newadv = ADVERTISED_Asym_Pause;
12269 } else
12270 newadv = 0;
12271
12272 if (epause->autoneg)
12273 tg3_flag_set(tp, PAUSE_AUTONEG);
12274 else
12275 tg3_flag_clear(tp, PAUSE_AUTONEG);
12276
12277 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12278 u32 oldadv = phydev->advertising &
12279 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12280 if (oldadv != newadv) {
12281 phydev->advertising &=
12282 ~(ADVERTISED_Pause |
12283 ADVERTISED_Asym_Pause);
12284 phydev->advertising |= newadv;
12285 if (phydev->autoneg) {
12286 /*
12287 * Always renegotiate the link to
12288 * inform our link partner of our
12289 * flow control settings, even if the
12290 * flow control is forced. Let
12291 * tg3_adjust_link() do the final
12292 * flow control setup.
12293 */
12294 return phy_start_aneg(phydev);
12295 }
12296 }
12297
12298 if (!epause->autoneg)
12299 tg3_setup_flow_control(tp, 0, 0);
12300 } else {
12301 tp->link_config.advertising &=
12302 ~(ADVERTISED_Pause |
12303 ADVERTISED_Asym_Pause);
12304 tp->link_config.advertising |= newadv;
12305 }
12306 } else {
12307 int irq_sync = 0;
12308
12309 if (netif_running(dev)) {
12310 tg3_netif_stop(tp);
12311 irq_sync = 1;
12312 }
12313
12314 tg3_full_lock(tp, irq_sync);
12315
12316 if (epause->autoneg)
12317 tg3_flag_set(tp, PAUSE_AUTONEG);
12318 else
12319 tg3_flag_clear(tp, PAUSE_AUTONEG);
12320 if (epause->rx_pause)
12321 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12322 else
12323 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12324 if (epause->tx_pause)
12325 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12326 else
12327 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12328
12329 if (netif_running(dev)) {
12330 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12331 err = tg3_restart_hw(tp, false);
12332 if (!err)
12333 tg3_netif_start(tp);
12334 }
12335
12336 tg3_full_unlock(tp);
12337 }
12338
12339 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12340
12341 return err;
12342 }
12343
12344 static int tg3_get_sset_count(struct net_device *dev, int sset)
12345 {
12346 switch (sset) {
12347 case ETH_SS_TEST:
12348 return TG3_NUM_TEST;
12349 case ETH_SS_STATS:
12350 return TG3_NUM_STATS;
12351 default:
12352 return -EOPNOTSUPP;
12353 }
12354 }
12355
12356 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12357 u32 *rules __always_unused)
12358 {
12359 struct tg3 *tp = netdev_priv(dev);
12360
12361 if (!tg3_flag(tp, SUPPORT_MSIX))
12362 return -EOPNOTSUPP;
12363
12364 switch (info->cmd) {
12365 case ETHTOOL_GRXRINGS:
12366 if (netif_running(tp->dev))
12367 info->data = tp->rxq_cnt;
12368 else {
12369 info->data = num_online_cpus();
12370 if (info->data > TG3_RSS_MAX_NUM_QS)
12371 info->data = TG3_RSS_MAX_NUM_QS;
12372 }
12373
12374 /* The first interrupt vector only
12375 * handles link interrupts.
12376 */
12377 info->data -= 1;
12378 return 0;
12379
12380 default:
12381 return -EOPNOTSUPP;
12382 }
12383 }
12384
12385 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12386 {
12387 u32 size = 0;
12388 struct tg3 *tp = netdev_priv(dev);
12389
12390 if (tg3_flag(tp, SUPPORT_MSIX))
12391 size = TG3_RSS_INDIR_TBL_SIZE;
12392
12393 return size;
12394 }
12395
12396 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12397 {
12398 struct tg3 *tp = netdev_priv(dev);
12399 int i;
12400
12401 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12402 indir[i] = tp->rss_ind_tbl[i];
12403
12404 return 0;
12405 }
12406
12407 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12408 {
12409 struct tg3 *tp = netdev_priv(dev);
12410 size_t i;
12411
12412 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12413 tp->rss_ind_tbl[i] = indir[i];
12414
12415 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12416 return 0;
12417
12418 /* It is legal to write the indirection
12419 * table while the device is running.
12420 */
12421 tg3_full_lock(tp, 0);
12422 tg3_rss_write_indir_tbl(tp);
12423 tg3_full_unlock(tp);
12424
12425 return 0;
12426 }
12427
12428 static void tg3_get_channels(struct net_device *dev,
12429 struct ethtool_channels *channel)
12430 {
12431 struct tg3 *tp = netdev_priv(dev);
12432 u32 deflt_qs = netif_get_num_default_rss_queues();
12433
12434 channel->max_rx = tp->rxq_max;
12435 channel->max_tx = tp->txq_max;
12436
12437 if (netif_running(dev)) {
12438 channel->rx_count = tp->rxq_cnt;
12439 channel->tx_count = tp->txq_cnt;
12440 } else {
12441 if (tp->rxq_req)
12442 channel->rx_count = tp->rxq_req;
12443 else
12444 channel->rx_count = min(deflt_qs, tp->rxq_max);
12445
12446 if (tp->txq_req)
12447 channel->tx_count = tp->txq_req;
12448 else
12449 channel->tx_count = min(deflt_qs, tp->txq_max);
12450 }
12451 }
12452
12453 static int tg3_set_channels(struct net_device *dev,
12454 struct ethtool_channels *channel)
12455 {
12456 struct tg3 *tp = netdev_priv(dev);
12457
12458 if (!tg3_flag(tp, SUPPORT_MSIX))
12459 return -EOPNOTSUPP;
12460
12461 if (channel->rx_count > tp->rxq_max ||
12462 channel->tx_count > tp->txq_max)
12463 return -EINVAL;
12464
12465 tp->rxq_req = channel->rx_count;
12466 tp->txq_req = channel->tx_count;
12467
12468 if (!netif_running(dev))
12469 return 0;
12470
12471 tg3_stop(tp);
12472
12473 tg3_carrier_off(tp);
12474
12475 tg3_start(tp, true, false, false);
12476
12477 return 0;
12478 }
12479
12480 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12481 {
12482 switch (stringset) {
12483 case ETH_SS_STATS:
12484 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12485 break;
12486 case ETH_SS_TEST:
12487 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12488 break;
12489 default:
12490 WARN_ON(1); /* we need a WARN() */
12491 break;
12492 }
12493 }
12494
12495 static int tg3_set_phys_id(struct net_device *dev,
12496 enum ethtool_phys_id_state state)
12497 {
12498 struct tg3 *tp = netdev_priv(dev);
12499
12500 if (!netif_running(tp->dev))
12501 return -EAGAIN;
12502
12503 switch (state) {
12504 case ETHTOOL_ID_ACTIVE:
12505 return 1; /* cycle on/off once per second */
12506
12507 case ETHTOOL_ID_ON:
12508 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12509 LED_CTRL_1000MBPS_ON |
12510 LED_CTRL_100MBPS_ON |
12511 LED_CTRL_10MBPS_ON |
12512 LED_CTRL_TRAFFIC_OVERRIDE |
12513 LED_CTRL_TRAFFIC_BLINK |
12514 LED_CTRL_TRAFFIC_LED);
12515 break;
12516
12517 case ETHTOOL_ID_OFF:
12518 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12519 LED_CTRL_TRAFFIC_OVERRIDE);
12520 break;
12521
12522 case ETHTOOL_ID_INACTIVE:
12523 tw32(MAC_LED_CTRL, tp->led_ctrl);
12524 break;
12525 }
12526
12527 return 0;
12528 }
12529
12530 static void tg3_get_ethtool_stats(struct net_device *dev,
12531 struct ethtool_stats *estats, u64 *tmp_stats)
12532 {
12533 struct tg3 *tp = netdev_priv(dev);
12534
12535 if (tp->hw_stats)
12536 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12537 else
12538 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12539 }
12540
12541 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12542 {
12543 int i;
12544 __be32 *buf;
12545 u32 offset = 0, len = 0;
12546 u32 magic, val;
12547
12548 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12549 return NULL;
12550
12551 if (magic == TG3_EEPROM_MAGIC) {
12552 for (offset = TG3_NVM_DIR_START;
12553 offset < TG3_NVM_DIR_END;
12554 offset += TG3_NVM_DIRENT_SIZE) {
12555 if (tg3_nvram_read(tp, offset, &val))
12556 return NULL;
12557
12558 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12559 TG3_NVM_DIRTYPE_EXTVPD)
12560 break;
12561 }
12562
12563 if (offset != TG3_NVM_DIR_END) {
12564 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12565 if (tg3_nvram_read(tp, offset + 4, &offset))
12566 return NULL;
12567
12568 offset = tg3_nvram_logical_addr(tp, offset);
12569 }
12570 }
12571
12572 if (!offset || !len) {
12573 offset = TG3_NVM_VPD_OFF;
12574 len = TG3_NVM_VPD_LEN;
12575 }
12576
12577 buf = kmalloc(len, GFP_KERNEL);
12578 if (buf == NULL)
12579 return NULL;
12580
12581 if (magic == TG3_EEPROM_MAGIC) {
12582 for (i = 0; i < len; i += 4) {
12583 /* The data is in little-endian format in NVRAM.
12584 * Use the big-endian read routines to preserve
12585 * the byte order as it exists in NVRAM.
12586 */
12587 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12588 goto error;
12589 }
12590 } else {
12591 u8 *ptr;
12592 ssize_t cnt;
12593 unsigned int pos = 0;
12594
12595 ptr = (u8 *)&buf[0];
12596 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12597 cnt = pci_read_vpd(tp->pdev, pos,
12598 len - pos, ptr);
12599 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12600 cnt = 0;
12601 else if (cnt < 0)
12602 goto error;
12603 }
12604 if (pos != len)
12605 goto error;
12606 }
12607
12608 *vpdlen = len;
12609
12610 return buf;
12611
12612 error:
12613 kfree(buf);
12614 return NULL;
12615 }
12616
12617 #define NVRAM_TEST_SIZE 0x100
12618 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12619 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12620 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12621 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12622 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12623 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12624 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12625 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12626
12627 static int tg3_test_nvram(struct tg3 *tp)
12628 {
12629 u32 csum, magic, len;
12630 __be32 *buf;
12631 int i, j, k, err = 0, size;
12632
12633 if (tg3_flag(tp, NO_NVRAM))
12634 return 0;
12635
12636 if (tg3_nvram_read(tp, 0, &magic) != 0)
12637 return -EIO;
12638
12639 if (magic == TG3_EEPROM_MAGIC)
12640 size = NVRAM_TEST_SIZE;
12641 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12642 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12643 TG3_EEPROM_SB_FORMAT_1) {
12644 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12645 case TG3_EEPROM_SB_REVISION_0:
12646 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12647 break;
12648 case TG3_EEPROM_SB_REVISION_2:
12649 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12650 break;
12651 case TG3_EEPROM_SB_REVISION_3:
12652 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12653 break;
12654 case TG3_EEPROM_SB_REVISION_4:
12655 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12656 break;
12657 case TG3_EEPROM_SB_REVISION_5:
12658 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12659 break;
12660 case TG3_EEPROM_SB_REVISION_6:
12661 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12662 break;
12663 default:
12664 return -EIO;
12665 }
12666 } else
12667 return 0;
12668 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12669 size = NVRAM_SELFBOOT_HW_SIZE;
12670 else
12671 return -EIO;
12672
12673 buf = kmalloc(size, GFP_KERNEL);
12674 if (buf == NULL)
12675 return -ENOMEM;
12676
12677 err = -EIO;
12678 for (i = 0, j = 0; i < size; i += 4, j++) {
12679 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12680 if (err)
12681 break;
12682 }
12683 if (i < size)
12684 goto out;
12685
12686 /* Selfboot format */
12687 magic = be32_to_cpu(buf[0]);
12688 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12689 TG3_EEPROM_MAGIC_FW) {
12690 u8 *buf8 = (u8 *) buf, csum8 = 0;
12691
12692 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12693 TG3_EEPROM_SB_REVISION_2) {
12694 /* For rev 2, the csum doesn't include the MBA. */
12695 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12696 csum8 += buf8[i];
12697 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12698 csum8 += buf8[i];
12699 } else {
12700 for (i = 0; i < size; i++)
12701 csum8 += buf8[i];
12702 }
12703
12704 if (csum8 == 0) {
12705 err = 0;
12706 goto out;
12707 }
12708
12709 err = -EIO;
12710 goto out;
12711 }
12712
12713 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12714 TG3_EEPROM_MAGIC_HW) {
12715 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12716 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12717 u8 *buf8 = (u8 *) buf;
12718
12719 /* Separate the parity bits and the data bytes. */
12720 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12721 if ((i == 0) || (i == 8)) {
12722 int l;
12723 u8 msk;
12724
12725 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12726 parity[k++] = buf8[i] & msk;
12727 i++;
12728 } else if (i == 16) {
12729 int l;
12730 u8 msk;
12731
12732 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12733 parity[k++] = buf8[i] & msk;
12734 i++;
12735
12736 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12737 parity[k++] = buf8[i] & msk;
12738 i++;
12739 }
12740 data[j++] = buf8[i];
12741 }
12742
12743 err = -EIO;
12744 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12745 u8 hw8 = hweight8(data[i]);
12746
12747 if ((hw8 & 0x1) && parity[i])
12748 goto out;
12749 else if (!(hw8 & 0x1) && !parity[i])
12750 goto out;
12751 }
12752 err = 0;
12753 goto out;
12754 }
12755
12756 err = -EIO;
12757
12758 /* Bootstrap checksum at offset 0x10 */
12759 csum = calc_crc((unsigned char *) buf, 0x10);
12760 if (csum != le32_to_cpu(buf[0x10/4]))
12761 goto out;
12762
12763 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12764 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12765 if (csum != le32_to_cpu(buf[0xfc/4]))
12766 goto out;
12767
12768 kfree(buf);
12769
12770 buf = tg3_vpd_readblock(tp, &len);
12771 if (!buf)
12772 return -ENOMEM;
12773
12774 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12775 if (i > 0) {
12776 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12777 if (j < 0)
12778 goto out;
12779
12780 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12781 goto out;
12782
12783 i += PCI_VPD_LRDT_TAG_SIZE;
12784 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12785 PCI_VPD_RO_KEYWORD_CHKSUM);
12786 if (j > 0) {
12787 u8 csum8 = 0;
12788
12789 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12790
12791 for (i = 0; i <= j; i++)
12792 csum8 += ((u8 *)buf)[i];
12793
12794 if (csum8)
12795 goto out;
12796 }
12797 }
12798
12799 err = 0;
12800
12801 out:
12802 kfree(buf);
12803 return err;
12804 }
12805
12806 #define TG3_SERDES_TIMEOUT_SEC 2
12807 #define TG3_COPPER_TIMEOUT_SEC 6
12808
12809 static int tg3_test_link(struct tg3 *tp)
12810 {
12811 int i, max;
12812
12813 if (!netif_running(tp->dev))
12814 return -ENODEV;
12815
12816 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12817 max = TG3_SERDES_TIMEOUT_SEC;
12818 else
12819 max = TG3_COPPER_TIMEOUT_SEC;
12820
12821 for (i = 0; i < max; i++) {
12822 if (tp->link_up)
12823 return 0;
12824
12825 if (msleep_interruptible(1000))
12826 break;
12827 }
12828
12829 return -EIO;
12830 }
12831
12832 /* Only test the commonly used registers */
12833 static int tg3_test_registers(struct tg3 *tp)
12834 {
12835 int i, is_5705, is_5750;
12836 u32 offset, read_mask, write_mask, val, save_val, read_val;
12837 static struct {
12838 u16 offset;
12839 u16 flags;
12840 #define TG3_FL_5705 0x1
12841 #define TG3_FL_NOT_5705 0x2
12842 #define TG3_FL_NOT_5788 0x4
12843 #define TG3_FL_NOT_5750 0x8
12844 u32 read_mask;
12845 u32 write_mask;
12846 } reg_tbl[] = {
12847 /* MAC Control Registers */
12848 { MAC_MODE, TG3_FL_NOT_5705,
12849 0x00000000, 0x00ef6f8c },
12850 { MAC_MODE, TG3_FL_5705,
12851 0x00000000, 0x01ef6b8c },
12852 { MAC_STATUS, TG3_FL_NOT_5705,
12853 0x03800107, 0x00000000 },
12854 { MAC_STATUS, TG3_FL_5705,
12855 0x03800100, 0x00000000 },
12856 { MAC_ADDR_0_HIGH, 0x0000,
12857 0x00000000, 0x0000ffff },
12858 { MAC_ADDR_0_LOW, 0x0000,
12859 0x00000000, 0xffffffff },
12860 { MAC_RX_MTU_SIZE, 0x0000,
12861 0x00000000, 0x0000ffff },
12862 { MAC_TX_MODE, 0x0000,
12863 0x00000000, 0x00000070 },
12864 { MAC_TX_LENGTHS, 0x0000,
12865 0x00000000, 0x00003fff },
12866 { MAC_RX_MODE, TG3_FL_NOT_5705,
12867 0x00000000, 0x000007fc },
12868 { MAC_RX_MODE, TG3_FL_5705,
12869 0x00000000, 0x000007dc },
12870 { MAC_HASH_REG_0, 0x0000,
12871 0x00000000, 0xffffffff },
12872 { MAC_HASH_REG_1, 0x0000,
12873 0x00000000, 0xffffffff },
12874 { MAC_HASH_REG_2, 0x0000,
12875 0x00000000, 0xffffffff },
12876 { MAC_HASH_REG_3, 0x0000,
12877 0x00000000, 0xffffffff },
12878
12879 /* Receive Data and Receive BD Initiator Control Registers. */
12880 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12881 0x00000000, 0xffffffff },
12882 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12883 0x00000000, 0xffffffff },
12884 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12885 0x00000000, 0x00000003 },
12886 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12887 0x00000000, 0xffffffff },
12888 { RCVDBDI_STD_BD+0, 0x0000,
12889 0x00000000, 0xffffffff },
12890 { RCVDBDI_STD_BD+4, 0x0000,
12891 0x00000000, 0xffffffff },
12892 { RCVDBDI_STD_BD+8, 0x0000,
12893 0x00000000, 0xffff0002 },
12894 { RCVDBDI_STD_BD+0xc, 0x0000,
12895 0x00000000, 0xffffffff },
12896
12897 /* Receive BD Initiator Control Registers. */
12898 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12899 0x00000000, 0xffffffff },
12900 { RCVBDI_STD_THRESH, TG3_FL_5705,
12901 0x00000000, 0x000003ff },
12902 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12903 0x00000000, 0xffffffff },
12904
12905 /* Host Coalescing Control Registers. */
12906 { HOSTCC_MODE, TG3_FL_NOT_5705,
12907 0x00000000, 0x00000004 },
12908 { HOSTCC_MODE, TG3_FL_5705,
12909 0x00000000, 0x000000f6 },
12910 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12911 0x00000000, 0xffffffff },
12912 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12913 0x00000000, 0x000003ff },
12914 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12915 0x00000000, 0xffffffff },
12916 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12917 0x00000000, 0x000003ff },
12918 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12919 0x00000000, 0xffffffff },
12920 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12921 0x00000000, 0x000000ff },
12922 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12923 0x00000000, 0xffffffff },
12924 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12925 0x00000000, 0x000000ff },
12926 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12927 0x00000000, 0xffffffff },
12928 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12929 0x00000000, 0xffffffff },
12930 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12931 0x00000000, 0xffffffff },
12932 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12933 0x00000000, 0x000000ff },
12934 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12935 0x00000000, 0xffffffff },
12936 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12937 0x00000000, 0x000000ff },
12938 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12939 0x00000000, 0xffffffff },
12940 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12941 0x00000000, 0xffffffff },
12942 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12943 0x00000000, 0xffffffff },
12944 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12945 0x00000000, 0xffffffff },
12946 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12947 0x00000000, 0xffffffff },
12948 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12949 0xffffffff, 0x00000000 },
12950 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12951 0xffffffff, 0x00000000 },
12952
12953 /* Buffer Manager Control Registers. */
12954 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12955 0x00000000, 0x007fff80 },
12956 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12957 0x00000000, 0x007fffff },
12958 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12959 0x00000000, 0x0000003f },
12960 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12961 0x00000000, 0x000001ff },
12962 { BUFMGR_MB_HIGH_WATER, 0x0000,
12963 0x00000000, 0x000001ff },
12964 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12965 0xffffffff, 0x00000000 },
12966 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12967 0xffffffff, 0x00000000 },
12968
12969 /* Mailbox Registers */
12970 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12971 0x00000000, 0x000001ff },
12972 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12973 0x00000000, 0x000001ff },
12974 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12975 0x00000000, 0x000007ff },
12976 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12977 0x00000000, 0x000001ff },
12978
12979 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12980 };
12981
12982 is_5705 = is_5750 = 0;
12983 if (tg3_flag(tp, 5705_PLUS)) {
12984 is_5705 = 1;
12985 if (tg3_flag(tp, 5750_PLUS))
12986 is_5750 = 1;
12987 }
12988
12989 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12990 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12991 continue;
12992
12993 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12994 continue;
12995
12996 if (tg3_flag(tp, IS_5788) &&
12997 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12998 continue;
12999
13000 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13001 continue;
13002
13003 offset = (u32) reg_tbl[i].offset;
13004 read_mask = reg_tbl[i].read_mask;
13005 write_mask = reg_tbl[i].write_mask;
13006
13007 /* Save the original register content */
13008 save_val = tr32(offset);
13009
13010 /* Determine the read-only value. */
13011 read_val = save_val & read_mask;
13012
13013 /* Write zero to the register, then make sure the read-only bits
13014 * are not changed and the read/write bits are all zeros.
13015 */
13016 tw32(offset, 0);
13017
13018 val = tr32(offset);
13019
13020 /* Test the read-only and read/write bits. */
13021 if (((val & read_mask) != read_val) || (val & write_mask))
13022 goto out;
13023
13024 /* Write ones to all the bits defined by RdMask and WrMask, then
13025 * make sure the read-only bits are not changed and the
13026 * read/write bits are all ones.
13027 */
13028 tw32(offset, read_mask | write_mask);
13029
13030 val = tr32(offset);
13031
13032 /* Test the read-only bits. */
13033 if ((val & read_mask) != read_val)
13034 goto out;
13035
13036 /* Test the read/write bits. */
13037 if ((val & write_mask) != write_mask)
13038 goto out;
13039
13040 tw32(offset, save_val);
13041 }
13042
13043 return 0;
13044
13045 out:
13046 if (netif_msg_hw(tp))
13047 netdev_err(tp->dev,
13048 "Register test failed at offset %x\n", offset);
13049 tw32(offset, save_val);
13050 return -EIO;
13051 }
13052
13053 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13054 {
13055 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13056 int i;
13057 u32 j;
13058
13059 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13060 for (j = 0; j < len; j += 4) {
13061 u32 val;
13062
13063 tg3_write_mem(tp, offset + j, test_pattern[i]);
13064 tg3_read_mem(tp, offset + j, &val);
13065 if (val != test_pattern[i])
13066 return -EIO;
13067 }
13068 }
13069 return 0;
13070 }
13071
13072 static int tg3_test_memory(struct tg3 *tp)
13073 {
13074 static struct mem_entry {
13075 u32 offset;
13076 u32 len;
13077 } mem_tbl_570x[] = {
13078 { 0x00000000, 0x00b50},
13079 { 0x00002000, 0x1c000},
13080 { 0xffffffff, 0x00000}
13081 }, mem_tbl_5705[] = {
13082 { 0x00000100, 0x0000c},
13083 { 0x00000200, 0x00008},
13084 { 0x00004000, 0x00800},
13085 { 0x00006000, 0x01000},
13086 { 0x00008000, 0x02000},
13087 { 0x00010000, 0x0e000},
13088 { 0xffffffff, 0x00000}
13089 }, mem_tbl_5755[] = {
13090 { 0x00000200, 0x00008},
13091 { 0x00004000, 0x00800},
13092 { 0x00006000, 0x00800},
13093 { 0x00008000, 0x02000},
13094 { 0x00010000, 0x0c000},
13095 { 0xffffffff, 0x00000}
13096 }, mem_tbl_5906[] = {
13097 { 0x00000200, 0x00008},
13098 { 0x00004000, 0x00400},
13099 { 0x00006000, 0x00400},
13100 { 0x00008000, 0x01000},
13101 { 0x00010000, 0x01000},
13102 { 0xffffffff, 0x00000}
13103 }, mem_tbl_5717[] = {
13104 { 0x00000200, 0x00008},
13105 { 0x00010000, 0x0a000},
13106 { 0x00020000, 0x13c00},
13107 { 0xffffffff, 0x00000}
13108 }, mem_tbl_57765[] = {
13109 { 0x00000200, 0x00008},
13110 { 0x00004000, 0x00800},
13111 { 0x00006000, 0x09800},
13112 { 0x00010000, 0x0a000},
13113 { 0xffffffff, 0x00000}
13114 };
13115 struct mem_entry *mem_tbl;
13116 int err = 0;
13117 int i;
13118
13119 if (tg3_flag(tp, 5717_PLUS))
13120 mem_tbl = mem_tbl_5717;
13121 else if (tg3_flag(tp, 57765_CLASS) ||
13122 tg3_asic_rev(tp) == ASIC_REV_5762)
13123 mem_tbl = mem_tbl_57765;
13124 else if (tg3_flag(tp, 5755_PLUS))
13125 mem_tbl = mem_tbl_5755;
13126 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13127 mem_tbl = mem_tbl_5906;
13128 else if (tg3_flag(tp, 5705_PLUS))
13129 mem_tbl = mem_tbl_5705;
13130 else
13131 mem_tbl = mem_tbl_570x;
13132
13133 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13134 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13135 if (err)
13136 break;
13137 }
13138
13139 return err;
13140 }
13141
13142 #define TG3_TSO_MSS 500
13143
13144 #define TG3_TSO_IP_HDR_LEN 20
13145 #define TG3_TSO_TCP_HDR_LEN 20
13146 #define TG3_TSO_TCP_OPT_LEN 12
13147
13148 static const u8 tg3_tso_header[] = {
13149 0x08, 0x00,
13150 0x45, 0x00, 0x00, 0x00,
13151 0x00, 0x00, 0x40, 0x00,
13152 0x40, 0x06, 0x00, 0x00,
13153 0x0a, 0x00, 0x00, 0x01,
13154 0x0a, 0x00, 0x00, 0x02,
13155 0x0d, 0x00, 0xe0, 0x00,
13156 0x00, 0x00, 0x01, 0x00,
13157 0x00, 0x00, 0x02, 0x00,
13158 0x80, 0x10, 0x10, 0x00,
13159 0x14, 0x09, 0x00, 0x00,
13160 0x01, 0x01, 0x08, 0x0a,
13161 0x11, 0x11, 0x11, 0x11,
13162 0x11, 0x11, 0x11, 0x11,
13163 };
13164
13165 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13166 {
13167 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13168 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13169 u32 budget;
13170 struct sk_buff *skb;
13171 u8 *tx_data, *rx_data;
13172 dma_addr_t map;
13173 int num_pkts, tx_len, rx_len, i, err;
13174 struct tg3_rx_buffer_desc *desc;
13175 struct tg3_napi *tnapi, *rnapi;
13176 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13177
13178 tnapi = &tp->napi[0];
13179 rnapi = &tp->napi[0];
13180 if (tp->irq_cnt > 1) {
13181 if (tg3_flag(tp, ENABLE_RSS))
13182 rnapi = &tp->napi[1];
13183 if (tg3_flag(tp, ENABLE_TSS))
13184 tnapi = &tp->napi[1];
13185 }
13186 coal_now = tnapi->coal_now | rnapi->coal_now;
13187
13188 err = -EIO;
13189
13190 tx_len = pktsz;
13191 skb = netdev_alloc_skb(tp->dev, tx_len);
13192 if (!skb)
13193 return -ENOMEM;
13194
13195 tx_data = skb_put(skb, tx_len);
13196 memcpy(tx_data, tp->dev->dev_addr, 6);
13197 memset(tx_data + 6, 0x0, 8);
13198
13199 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13200
13201 if (tso_loopback) {
13202 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13203
13204 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13205 TG3_TSO_TCP_OPT_LEN;
13206
13207 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13208 sizeof(tg3_tso_header));
13209 mss = TG3_TSO_MSS;
13210
13211 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13212 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13213
13214 /* Set the total length field in the IP header */
13215 iph->tot_len = htons((u16)(mss + hdr_len));
13216
13217 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13218 TXD_FLAG_CPU_POST_DMA);
13219
13220 if (tg3_flag(tp, HW_TSO_1) ||
13221 tg3_flag(tp, HW_TSO_2) ||
13222 tg3_flag(tp, HW_TSO_3)) {
13223 struct tcphdr *th;
13224 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13225 th = (struct tcphdr *)&tx_data[val];
13226 th->check = 0;
13227 } else
13228 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13229
13230 if (tg3_flag(tp, HW_TSO_3)) {
13231 mss |= (hdr_len & 0xc) << 12;
13232 if (hdr_len & 0x10)
13233 base_flags |= 0x00000010;
13234 base_flags |= (hdr_len & 0x3e0) << 5;
13235 } else if (tg3_flag(tp, HW_TSO_2))
13236 mss |= hdr_len << 9;
13237 else if (tg3_flag(tp, HW_TSO_1) ||
13238 tg3_asic_rev(tp) == ASIC_REV_5705) {
13239 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13240 } else {
13241 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13242 }
13243
13244 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13245 } else {
13246 num_pkts = 1;
13247 data_off = ETH_HLEN;
13248
13249 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13250 tx_len > VLAN_ETH_FRAME_LEN)
13251 base_flags |= TXD_FLAG_JMB_PKT;
13252 }
13253
13254 for (i = data_off; i < tx_len; i++)
13255 tx_data[i] = (u8) (i & 0xff);
13256
13257 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13258 if (pci_dma_mapping_error(tp->pdev, map)) {
13259 dev_kfree_skb(skb);
13260 return -EIO;
13261 }
13262
13263 val = tnapi->tx_prod;
13264 tnapi->tx_buffers[val].skb = skb;
13265 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13266
13267 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13268 rnapi->coal_now);
13269
13270 udelay(10);
13271
13272 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13273
13274 budget = tg3_tx_avail(tnapi);
13275 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13276 base_flags | TXD_FLAG_END, mss, 0)) {
13277 tnapi->tx_buffers[val].skb = NULL;
13278 dev_kfree_skb(skb);
13279 return -EIO;
13280 }
13281
13282 tnapi->tx_prod++;
13283
13284 /* Sync BD data before updating mailbox */
13285 wmb();
13286
13287 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13288 tr32_mailbox(tnapi->prodmbox);
13289
13290 udelay(10);
13291
13292 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13293 for (i = 0; i < 35; i++) {
13294 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13295 coal_now);
13296
13297 udelay(10);
13298
13299 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13300 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13301 if ((tx_idx == tnapi->tx_prod) &&
13302 (rx_idx == (rx_start_idx + num_pkts)))
13303 break;
13304 }
13305
13306 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13307 dev_kfree_skb(skb);
13308
13309 if (tx_idx != tnapi->tx_prod)
13310 goto out;
13311
13312 if (rx_idx != rx_start_idx + num_pkts)
13313 goto out;
13314
13315 val = data_off;
13316 while (rx_idx != rx_start_idx) {
13317 desc = &rnapi->rx_rcb[rx_start_idx++];
13318 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13319 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13320
13321 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13322 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13323 goto out;
13324
13325 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13326 - ETH_FCS_LEN;
13327
13328 if (!tso_loopback) {
13329 if (rx_len != tx_len)
13330 goto out;
13331
13332 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13333 if (opaque_key != RXD_OPAQUE_RING_STD)
13334 goto out;
13335 } else {
13336 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13337 goto out;
13338 }
13339 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13340 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13341 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13342 goto out;
13343 }
13344
13345 if (opaque_key == RXD_OPAQUE_RING_STD) {
13346 rx_data = tpr->rx_std_buffers[desc_idx].data;
13347 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13348 mapping);
13349 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13350 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13351 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13352 mapping);
13353 } else
13354 goto out;
13355
13356 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13357 PCI_DMA_FROMDEVICE);
13358
13359 rx_data += TG3_RX_OFFSET(tp);
13360 for (i = data_off; i < rx_len; i++, val++) {
13361 if (*(rx_data + i) != (u8) (val & 0xff))
13362 goto out;
13363 }
13364 }
13365
13366 err = 0;
13367
13368 /* tg3_free_rings will unmap and free the rx_data */
13369 out:
13370 return err;
13371 }
13372
13373 #define TG3_STD_LOOPBACK_FAILED 1
13374 #define TG3_JMB_LOOPBACK_FAILED 2
13375 #define TG3_TSO_LOOPBACK_FAILED 4
13376 #define TG3_LOOPBACK_FAILED \
13377 (TG3_STD_LOOPBACK_FAILED | \
13378 TG3_JMB_LOOPBACK_FAILED | \
13379 TG3_TSO_LOOPBACK_FAILED)
13380
13381 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13382 {
13383 int err = -EIO;
13384 u32 eee_cap;
13385 u32 jmb_pkt_sz = 9000;
13386
13387 if (tp->dma_limit)
13388 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13389
13390 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13391 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13392
13393 if (!netif_running(tp->dev)) {
13394 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13395 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13396 if (do_extlpbk)
13397 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13398 goto done;
13399 }
13400
13401 err = tg3_reset_hw(tp, true);
13402 if (err) {
13403 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13404 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13405 if (do_extlpbk)
13406 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13407 goto done;
13408 }
13409
13410 if (tg3_flag(tp, ENABLE_RSS)) {
13411 int i;
13412
13413 /* Reroute all rx packets to the 1st queue */
13414 for (i = MAC_RSS_INDIR_TBL_0;
13415 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13416 tw32(i, 0x0);
13417 }
13418
13419 /* HW errata - mac loopback fails in some cases on 5780.
13420 * Normal traffic and PHY loopback are not affected by
13421 * errata. Also, the MAC loopback test is deprecated for
13422 * all newer ASIC revisions.
13423 */
13424 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13425 !tg3_flag(tp, CPMU_PRESENT)) {
13426 tg3_mac_loopback(tp, true);
13427
13428 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13429 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13430
13431 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13432 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13433 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13434
13435 tg3_mac_loopback(tp, false);
13436 }
13437
13438 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13439 !tg3_flag(tp, USE_PHYLIB)) {
13440 int i;
13441
13442 tg3_phy_lpbk_set(tp, 0, false);
13443
13444 /* Wait for link */
13445 for (i = 0; i < 100; i++) {
13446 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13447 break;
13448 mdelay(1);
13449 }
13450
13451 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13452 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13453 if (tg3_flag(tp, TSO_CAPABLE) &&
13454 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13455 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13456 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13457 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13458 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13459
13460 if (do_extlpbk) {
13461 tg3_phy_lpbk_set(tp, 0, true);
13462
13463 /* All link indications report up, but the hardware
13464 * isn't really ready for about 20 msec. Double it
13465 * to be sure.
13466 */
13467 mdelay(40);
13468
13469 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13470 data[TG3_EXT_LOOPB_TEST] |=
13471 TG3_STD_LOOPBACK_FAILED;
13472 if (tg3_flag(tp, TSO_CAPABLE) &&
13473 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13474 data[TG3_EXT_LOOPB_TEST] |=
13475 TG3_TSO_LOOPBACK_FAILED;
13476 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13477 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13478 data[TG3_EXT_LOOPB_TEST] |=
13479 TG3_JMB_LOOPBACK_FAILED;
13480 }
13481
13482 /* Re-enable gphy autopowerdown. */
13483 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13484 tg3_phy_toggle_apd(tp, true);
13485 }
13486
13487 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13488 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13489
13490 done:
13491 tp->phy_flags |= eee_cap;
13492
13493 return err;
13494 }
13495
13496 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13497 u64 *data)
13498 {
13499 struct tg3 *tp = netdev_priv(dev);
13500 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13501
13502 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13503 if (tg3_power_up(tp)) {
13504 etest->flags |= ETH_TEST_FL_FAILED;
13505 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13506 return;
13507 }
13508 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13509 }
13510
13511 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13512
13513 if (tg3_test_nvram(tp) != 0) {
13514 etest->flags |= ETH_TEST_FL_FAILED;
13515 data[TG3_NVRAM_TEST] = 1;
13516 }
13517 if (!doextlpbk && tg3_test_link(tp)) {
13518 etest->flags |= ETH_TEST_FL_FAILED;
13519 data[TG3_LINK_TEST] = 1;
13520 }
13521 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13522 int err, err2 = 0, irq_sync = 0;
13523
13524 if (netif_running(dev)) {
13525 tg3_phy_stop(tp);
13526 tg3_netif_stop(tp);
13527 irq_sync = 1;
13528 }
13529
13530 tg3_full_lock(tp, irq_sync);
13531 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13532 err = tg3_nvram_lock(tp);
13533 tg3_halt_cpu(tp, RX_CPU_BASE);
13534 if (!tg3_flag(tp, 5705_PLUS))
13535 tg3_halt_cpu(tp, TX_CPU_BASE);
13536 if (!err)
13537 tg3_nvram_unlock(tp);
13538
13539 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13540 tg3_phy_reset(tp);
13541
13542 if (tg3_test_registers(tp) != 0) {
13543 etest->flags |= ETH_TEST_FL_FAILED;
13544 data[TG3_REGISTER_TEST] = 1;
13545 }
13546
13547 if (tg3_test_memory(tp) != 0) {
13548 etest->flags |= ETH_TEST_FL_FAILED;
13549 data[TG3_MEMORY_TEST] = 1;
13550 }
13551
13552 if (doextlpbk)
13553 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13554
13555 if (tg3_test_loopback(tp, data, doextlpbk))
13556 etest->flags |= ETH_TEST_FL_FAILED;
13557
13558 tg3_full_unlock(tp);
13559
13560 if (tg3_test_interrupt(tp) != 0) {
13561 etest->flags |= ETH_TEST_FL_FAILED;
13562 data[TG3_INTERRUPT_TEST] = 1;
13563 }
13564
13565 tg3_full_lock(tp, 0);
13566
13567 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13568 if (netif_running(dev)) {
13569 tg3_flag_set(tp, INIT_COMPLETE);
13570 err2 = tg3_restart_hw(tp, true);
13571 if (!err2)
13572 tg3_netif_start(tp);
13573 }
13574
13575 tg3_full_unlock(tp);
13576
13577 if (irq_sync && !err2)
13578 tg3_phy_start(tp);
13579 }
13580 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13581 tg3_power_down_prepare(tp);
13582
13583 }
13584
13585 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13586 struct ifreq *ifr, int cmd)
13587 {
13588 struct tg3 *tp = netdev_priv(dev);
13589 struct hwtstamp_config stmpconf;
13590
13591 if (!tg3_flag(tp, PTP_CAPABLE))
13592 return -EINVAL;
13593
13594 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13595 return -EFAULT;
13596
13597 if (stmpconf.flags)
13598 return -EINVAL;
13599
13600 switch (stmpconf.tx_type) {
13601 case HWTSTAMP_TX_ON:
13602 tg3_flag_set(tp, TX_TSTAMP_EN);
13603 break;
13604 case HWTSTAMP_TX_OFF:
13605 tg3_flag_clear(tp, TX_TSTAMP_EN);
13606 break;
13607 default:
13608 return -ERANGE;
13609 }
13610
13611 switch (stmpconf.rx_filter) {
13612 case HWTSTAMP_FILTER_NONE:
13613 tp->rxptpctl = 0;
13614 break;
13615 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13616 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13617 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13618 break;
13619 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13620 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13621 TG3_RX_PTP_CTL_SYNC_EVNT;
13622 break;
13623 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13624 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13625 TG3_RX_PTP_CTL_DELAY_REQ;
13626 break;
13627 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13628 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13629 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13630 break;
13631 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13632 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13633 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13634 break;
13635 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13636 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13637 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13638 break;
13639 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13640 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13641 TG3_RX_PTP_CTL_SYNC_EVNT;
13642 break;
13643 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13644 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13645 TG3_RX_PTP_CTL_SYNC_EVNT;
13646 break;
13647 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13648 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13649 TG3_RX_PTP_CTL_SYNC_EVNT;
13650 break;
13651 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13652 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13653 TG3_RX_PTP_CTL_DELAY_REQ;
13654 break;
13655 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13656 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13657 TG3_RX_PTP_CTL_DELAY_REQ;
13658 break;
13659 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13660 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13661 TG3_RX_PTP_CTL_DELAY_REQ;
13662 break;
13663 default:
13664 return -ERANGE;
13665 }
13666
13667 if (netif_running(dev) && tp->rxptpctl)
13668 tw32(TG3_RX_PTP_CTL,
13669 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13670
13671 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13672 -EFAULT : 0;
13673 }
13674
13675 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13676 {
13677 struct mii_ioctl_data *data = if_mii(ifr);
13678 struct tg3 *tp = netdev_priv(dev);
13679 int err;
13680
13681 if (tg3_flag(tp, USE_PHYLIB)) {
13682 struct phy_device *phydev;
13683 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13684 return -EAGAIN;
13685 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13686 return phy_mii_ioctl(phydev, ifr, cmd);
13687 }
13688
13689 switch (cmd) {
13690 case SIOCGMIIPHY:
13691 data->phy_id = tp->phy_addr;
13692
13693 /* fallthru */
13694 case SIOCGMIIREG: {
13695 u32 mii_regval;
13696
13697 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13698 break; /* We have no PHY */
13699
13700 if (!netif_running(dev))
13701 return -EAGAIN;
13702
13703 spin_lock_bh(&tp->lock);
13704 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13705 data->reg_num & 0x1f, &mii_regval);
13706 spin_unlock_bh(&tp->lock);
13707
13708 data->val_out = mii_regval;
13709
13710 return err;
13711 }
13712
13713 case SIOCSMIIREG:
13714 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13715 break; /* We have no PHY */
13716
13717 if (!netif_running(dev))
13718 return -EAGAIN;
13719
13720 spin_lock_bh(&tp->lock);
13721 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13722 data->reg_num & 0x1f, data->val_in);
13723 spin_unlock_bh(&tp->lock);
13724
13725 return err;
13726
13727 case SIOCSHWTSTAMP:
13728 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13729
13730 default:
13731 /* do nothing */
13732 break;
13733 }
13734 return -EOPNOTSUPP;
13735 }
13736
13737 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13738 {
13739 struct tg3 *tp = netdev_priv(dev);
13740
13741 memcpy(ec, &tp->coal, sizeof(*ec));
13742 return 0;
13743 }
13744
13745 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13746 {
13747 struct tg3 *tp = netdev_priv(dev);
13748 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13749 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13750
13751 if (!tg3_flag(tp, 5705_PLUS)) {
13752 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13753 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13754 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13755 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13756 }
13757
13758 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13759 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13760 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13761 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13762 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13763 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13764 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13765 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13766 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13767 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13768 return -EINVAL;
13769
13770 /* No rx interrupts will be generated if both are zero */
13771 if ((ec->rx_coalesce_usecs == 0) &&
13772 (ec->rx_max_coalesced_frames == 0))
13773 return -EINVAL;
13774
13775 /* No tx interrupts will be generated if both are zero */
13776 if ((ec->tx_coalesce_usecs == 0) &&
13777 (ec->tx_max_coalesced_frames == 0))
13778 return -EINVAL;
13779
13780 /* Only copy relevant parameters, ignore all others. */
13781 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13782 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13783 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13784 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13785 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13786 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13787 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13788 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13789 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13790
13791 if (netif_running(dev)) {
13792 tg3_full_lock(tp, 0);
13793 __tg3_set_coalesce(tp, &tp->coal);
13794 tg3_full_unlock(tp);
13795 }
13796 return 0;
13797 }
13798
13799 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13800 {
13801 struct tg3 *tp = netdev_priv(dev);
13802
13803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13804 netdev_warn(tp->dev, "Board does not support EEE!\n");
13805 return -EOPNOTSUPP;
13806 }
13807
13808 if (edata->advertised != tp->eee.advertised) {
13809 netdev_warn(tp->dev,
13810 "Direct manipulation of EEE advertisement is not supported\n");
13811 return -EINVAL;
13812 }
13813
13814 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13815 netdev_warn(tp->dev,
13816 "Maximal Tx Lpi timer supported is %#x(u)\n",
13817 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13818 return -EINVAL;
13819 }
13820
13821 tp->eee = *edata;
13822
13823 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13824 tg3_warn_mgmt_link_flap(tp);
13825
13826 if (netif_running(tp->dev)) {
13827 tg3_full_lock(tp, 0);
13828 tg3_setup_eee(tp);
13829 tg3_phy_reset(tp);
13830 tg3_full_unlock(tp);
13831 }
13832
13833 return 0;
13834 }
13835
13836 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13837 {
13838 struct tg3 *tp = netdev_priv(dev);
13839
13840 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13841 netdev_warn(tp->dev,
13842 "Board does not support EEE!\n");
13843 return -EOPNOTSUPP;
13844 }
13845
13846 *edata = tp->eee;
13847 return 0;
13848 }
13849
13850 static const struct ethtool_ops tg3_ethtool_ops = {
13851 .get_settings = tg3_get_settings,
13852 .set_settings = tg3_set_settings,
13853 .get_drvinfo = tg3_get_drvinfo,
13854 .get_regs_len = tg3_get_regs_len,
13855 .get_regs = tg3_get_regs,
13856 .get_wol = tg3_get_wol,
13857 .set_wol = tg3_set_wol,
13858 .get_msglevel = tg3_get_msglevel,
13859 .set_msglevel = tg3_set_msglevel,
13860 .nway_reset = tg3_nway_reset,
13861 .get_link = ethtool_op_get_link,
13862 .get_eeprom_len = tg3_get_eeprom_len,
13863 .get_eeprom = tg3_get_eeprom,
13864 .set_eeprom = tg3_set_eeprom,
13865 .get_ringparam = tg3_get_ringparam,
13866 .set_ringparam = tg3_set_ringparam,
13867 .get_pauseparam = tg3_get_pauseparam,
13868 .set_pauseparam = tg3_set_pauseparam,
13869 .self_test = tg3_self_test,
13870 .get_strings = tg3_get_strings,
13871 .set_phys_id = tg3_set_phys_id,
13872 .get_ethtool_stats = tg3_get_ethtool_stats,
13873 .get_coalesce = tg3_get_coalesce,
13874 .set_coalesce = tg3_set_coalesce,
13875 .get_sset_count = tg3_get_sset_count,
13876 .get_rxnfc = tg3_get_rxnfc,
13877 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13878 .get_rxfh_indir = tg3_get_rxfh_indir,
13879 .set_rxfh_indir = tg3_set_rxfh_indir,
13880 .get_channels = tg3_get_channels,
13881 .set_channels = tg3_set_channels,
13882 .get_ts_info = tg3_get_ts_info,
13883 .get_eee = tg3_get_eee,
13884 .set_eee = tg3_set_eee,
13885 };
13886
13887 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13888 struct rtnl_link_stats64 *stats)
13889 {
13890 struct tg3 *tp = netdev_priv(dev);
13891
13892 spin_lock_bh(&tp->lock);
13893 if (!tp->hw_stats) {
13894 spin_unlock_bh(&tp->lock);
13895 return &tp->net_stats_prev;
13896 }
13897
13898 tg3_get_nstats(tp, stats);
13899 spin_unlock_bh(&tp->lock);
13900
13901 return stats;
13902 }
13903
13904 static void tg3_set_rx_mode(struct net_device *dev)
13905 {
13906 struct tg3 *tp = netdev_priv(dev);
13907
13908 if (!netif_running(dev))
13909 return;
13910
13911 tg3_full_lock(tp, 0);
13912 __tg3_set_rx_mode(dev);
13913 tg3_full_unlock(tp);
13914 }
13915
13916 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13917 int new_mtu)
13918 {
13919 dev->mtu = new_mtu;
13920
13921 if (new_mtu > ETH_DATA_LEN) {
13922 if (tg3_flag(tp, 5780_CLASS)) {
13923 netdev_update_features(dev);
13924 tg3_flag_clear(tp, TSO_CAPABLE);
13925 } else {
13926 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13927 }
13928 } else {
13929 if (tg3_flag(tp, 5780_CLASS)) {
13930 tg3_flag_set(tp, TSO_CAPABLE);
13931 netdev_update_features(dev);
13932 }
13933 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13934 }
13935 }
13936
13937 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13938 {
13939 struct tg3 *tp = netdev_priv(dev);
13940 int err;
13941 bool reset_phy = false;
13942
13943 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13944 return -EINVAL;
13945
13946 if (!netif_running(dev)) {
13947 /* We'll just catch it later when the
13948 * device is up'd.
13949 */
13950 tg3_set_mtu(dev, tp, new_mtu);
13951 return 0;
13952 }
13953
13954 tg3_phy_stop(tp);
13955
13956 tg3_netif_stop(tp);
13957
13958 tg3_full_lock(tp, 1);
13959
13960 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13961
13962 tg3_set_mtu(dev, tp, new_mtu);
13963
13964 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13965 * breaks all requests to 256 bytes.
13966 */
13967 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13968 reset_phy = true;
13969
13970 err = tg3_restart_hw(tp, reset_phy);
13971
13972 if (!err)
13973 tg3_netif_start(tp);
13974
13975 tg3_full_unlock(tp);
13976
13977 if (!err)
13978 tg3_phy_start(tp);
13979
13980 return err;
13981 }
13982
13983 static const struct net_device_ops tg3_netdev_ops = {
13984 .ndo_open = tg3_open,
13985 .ndo_stop = tg3_close,
13986 .ndo_start_xmit = tg3_start_xmit,
13987 .ndo_get_stats64 = tg3_get_stats64,
13988 .ndo_validate_addr = eth_validate_addr,
13989 .ndo_set_rx_mode = tg3_set_rx_mode,
13990 .ndo_set_mac_address = tg3_set_mac_addr,
13991 .ndo_do_ioctl = tg3_ioctl,
13992 .ndo_tx_timeout = tg3_tx_timeout,
13993 .ndo_change_mtu = tg3_change_mtu,
13994 .ndo_fix_features = tg3_fix_features,
13995 .ndo_set_features = tg3_set_features,
13996 #ifdef CONFIG_NET_POLL_CONTROLLER
13997 .ndo_poll_controller = tg3_poll_controller,
13998 #endif
13999 };
14000
14001 static void tg3_get_eeprom_size(struct tg3 *tp)
14002 {
14003 u32 cursize, val, magic;
14004
14005 tp->nvram_size = EEPROM_CHIP_SIZE;
14006
14007 if (tg3_nvram_read(tp, 0, &magic) != 0)
14008 return;
14009
14010 if ((magic != TG3_EEPROM_MAGIC) &&
14011 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14012 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14013 return;
14014
14015 /*
14016 * Size the chip by reading offsets at increasing powers of two.
14017 * When we encounter our validation signature, we know the addressing
14018 * has wrapped around, and thus have our chip size.
14019 */
14020 cursize = 0x10;
14021
14022 while (cursize < tp->nvram_size) {
14023 if (tg3_nvram_read(tp, cursize, &val) != 0)
14024 return;
14025
14026 if (val == magic)
14027 break;
14028
14029 cursize <<= 1;
14030 }
14031
14032 tp->nvram_size = cursize;
14033 }
14034
14035 static void tg3_get_nvram_size(struct tg3 *tp)
14036 {
14037 u32 val;
14038
14039 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14040 return;
14041
14042 /* Selfboot format */
14043 if (val != TG3_EEPROM_MAGIC) {
14044 tg3_get_eeprom_size(tp);
14045 return;
14046 }
14047
14048 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14049 if (val != 0) {
14050 /* This is confusing. We want to operate on the
14051 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14052 * call will read from NVRAM and byteswap the data
14053 * according to the byteswapping settings for all
14054 * other register accesses. This ensures the data we
14055 * want will always reside in the lower 16-bits.
14056 * However, the data in NVRAM is in LE format, which
14057 * means the data from the NVRAM read will always be
14058 * opposite the endianness of the CPU. The 16-bit
14059 * byteswap then brings the data to CPU endianness.
14060 */
14061 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14062 return;
14063 }
14064 }
14065 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14066 }
14067
14068 static void tg3_get_nvram_info(struct tg3 *tp)
14069 {
14070 u32 nvcfg1;
14071
14072 nvcfg1 = tr32(NVRAM_CFG1);
14073 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14074 tg3_flag_set(tp, FLASH);
14075 } else {
14076 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14077 tw32(NVRAM_CFG1, nvcfg1);
14078 }
14079
14080 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14081 tg3_flag(tp, 5780_CLASS)) {
14082 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14083 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14084 tp->nvram_jedecnum = JEDEC_ATMEL;
14085 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14086 tg3_flag_set(tp, NVRAM_BUFFERED);
14087 break;
14088 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14089 tp->nvram_jedecnum = JEDEC_ATMEL;
14090 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14091 break;
14092 case FLASH_VENDOR_ATMEL_EEPROM:
14093 tp->nvram_jedecnum = JEDEC_ATMEL;
14094 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14095 tg3_flag_set(tp, NVRAM_BUFFERED);
14096 break;
14097 case FLASH_VENDOR_ST:
14098 tp->nvram_jedecnum = JEDEC_ST;
14099 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14100 tg3_flag_set(tp, NVRAM_BUFFERED);
14101 break;
14102 case FLASH_VENDOR_SAIFUN:
14103 tp->nvram_jedecnum = JEDEC_SAIFUN;
14104 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14105 break;
14106 case FLASH_VENDOR_SST_SMALL:
14107 case FLASH_VENDOR_SST_LARGE:
14108 tp->nvram_jedecnum = JEDEC_SST;
14109 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14110 break;
14111 }
14112 } else {
14113 tp->nvram_jedecnum = JEDEC_ATMEL;
14114 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14115 tg3_flag_set(tp, NVRAM_BUFFERED);
14116 }
14117 }
14118
14119 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14120 {
14121 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14122 case FLASH_5752PAGE_SIZE_256:
14123 tp->nvram_pagesize = 256;
14124 break;
14125 case FLASH_5752PAGE_SIZE_512:
14126 tp->nvram_pagesize = 512;
14127 break;
14128 case FLASH_5752PAGE_SIZE_1K:
14129 tp->nvram_pagesize = 1024;
14130 break;
14131 case FLASH_5752PAGE_SIZE_2K:
14132 tp->nvram_pagesize = 2048;
14133 break;
14134 case FLASH_5752PAGE_SIZE_4K:
14135 tp->nvram_pagesize = 4096;
14136 break;
14137 case FLASH_5752PAGE_SIZE_264:
14138 tp->nvram_pagesize = 264;
14139 break;
14140 case FLASH_5752PAGE_SIZE_528:
14141 tp->nvram_pagesize = 528;
14142 break;
14143 }
14144 }
14145
14146 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14147 {
14148 u32 nvcfg1;
14149
14150 nvcfg1 = tr32(NVRAM_CFG1);
14151
14152 /* NVRAM protection for TPM */
14153 if (nvcfg1 & (1 << 27))
14154 tg3_flag_set(tp, PROTECTED_NVRAM);
14155
14156 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14157 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14158 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14159 tp->nvram_jedecnum = JEDEC_ATMEL;
14160 tg3_flag_set(tp, NVRAM_BUFFERED);
14161 break;
14162 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14163 tp->nvram_jedecnum = JEDEC_ATMEL;
14164 tg3_flag_set(tp, NVRAM_BUFFERED);
14165 tg3_flag_set(tp, FLASH);
14166 break;
14167 case FLASH_5752VENDOR_ST_M45PE10:
14168 case FLASH_5752VENDOR_ST_M45PE20:
14169 case FLASH_5752VENDOR_ST_M45PE40:
14170 tp->nvram_jedecnum = JEDEC_ST;
14171 tg3_flag_set(tp, NVRAM_BUFFERED);
14172 tg3_flag_set(tp, FLASH);
14173 break;
14174 }
14175
14176 if (tg3_flag(tp, FLASH)) {
14177 tg3_nvram_get_pagesize(tp, nvcfg1);
14178 } else {
14179 /* For eeprom, set pagesize to maximum eeprom size */
14180 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14181
14182 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14183 tw32(NVRAM_CFG1, nvcfg1);
14184 }
14185 }
14186
14187 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14188 {
14189 u32 nvcfg1, protect = 0;
14190
14191 nvcfg1 = tr32(NVRAM_CFG1);
14192
14193 /* NVRAM protection for TPM */
14194 if (nvcfg1 & (1 << 27)) {
14195 tg3_flag_set(tp, PROTECTED_NVRAM);
14196 protect = 1;
14197 }
14198
14199 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14200 switch (nvcfg1) {
14201 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14202 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14203 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14204 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14205 tp->nvram_jedecnum = JEDEC_ATMEL;
14206 tg3_flag_set(tp, NVRAM_BUFFERED);
14207 tg3_flag_set(tp, FLASH);
14208 tp->nvram_pagesize = 264;
14209 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14210 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14211 tp->nvram_size = (protect ? 0x3e200 :
14212 TG3_NVRAM_SIZE_512KB);
14213 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14214 tp->nvram_size = (protect ? 0x1f200 :
14215 TG3_NVRAM_SIZE_256KB);
14216 else
14217 tp->nvram_size = (protect ? 0x1f200 :
14218 TG3_NVRAM_SIZE_128KB);
14219 break;
14220 case FLASH_5752VENDOR_ST_M45PE10:
14221 case FLASH_5752VENDOR_ST_M45PE20:
14222 case FLASH_5752VENDOR_ST_M45PE40:
14223 tp->nvram_jedecnum = JEDEC_ST;
14224 tg3_flag_set(tp, NVRAM_BUFFERED);
14225 tg3_flag_set(tp, FLASH);
14226 tp->nvram_pagesize = 256;
14227 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14228 tp->nvram_size = (protect ?
14229 TG3_NVRAM_SIZE_64KB :
14230 TG3_NVRAM_SIZE_128KB);
14231 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14232 tp->nvram_size = (protect ?
14233 TG3_NVRAM_SIZE_64KB :
14234 TG3_NVRAM_SIZE_256KB);
14235 else
14236 tp->nvram_size = (protect ?
14237 TG3_NVRAM_SIZE_128KB :
14238 TG3_NVRAM_SIZE_512KB);
14239 break;
14240 }
14241 }
14242
14243 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14244 {
14245 u32 nvcfg1;
14246
14247 nvcfg1 = tr32(NVRAM_CFG1);
14248
14249 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14250 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14251 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14252 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14253 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14254 tp->nvram_jedecnum = JEDEC_ATMEL;
14255 tg3_flag_set(tp, NVRAM_BUFFERED);
14256 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14257
14258 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14259 tw32(NVRAM_CFG1, nvcfg1);
14260 break;
14261 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14262 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14263 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14264 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14265 tp->nvram_jedecnum = JEDEC_ATMEL;
14266 tg3_flag_set(tp, NVRAM_BUFFERED);
14267 tg3_flag_set(tp, FLASH);
14268 tp->nvram_pagesize = 264;
14269 break;
14270 case FLASH_5752VENDOR_ST_M45PE10:
14271 case FLASH_5752VENDOR_ST_M45PE20:
14272 case FLASH_5752VENDOR_ST_M45PE40:
14273 tp->nvram_jedecnum = JEDEC_ST;
14274 tg3_flag_set(tp, NVRAM_BUFFERED);
14275 tg3_flag_set(tp, FLASH);
14276 tp->nvram_pagesize = 256;
14277 break;
14278 }
14279 }
14280
14281 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14282 {
14283 u32 nvcfg1, protect = 0;
14284
14285 nvcfg1 = tr32(NVRAM_CFG1);
14286
14287 /* NVRAM protection for TPM */
14288 if (nvcfg1 & (1 << 27)) {
14289 tg3_flag_set(tp, PROTECTED_NVRAM);
14290 protect = 1;
14291 }
14292
14293 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14294 switch (nvcfg1) {
14295 case FLASH_5761VENDOR_ATMEL_ADB021D:
14296 case FLASH_5761VENDOR_ATMEL_ADB041D:
14297 case FLASH_5761VENDOR_ATMEL_ADB081D:
14298 case FLASH_5761VENDOR_ATMEL_ADB161D:
14299 case FLASH_5761VENDOR_ATMEL_MDB021D:
14300 case FLASH_5761VENDOR_ATMEL_MDB041D:
14301 case FLASH_5761VENDOR_ATMEL_MDB081D:
14302 case FLASH_5761VENDOR_ATMEL_MDB161D:
14303 tp->nvram_jedecnum = JEDEC_ATMEL;
14304 tg3_flag_set(tp, NVRAM_BUFFERED);
14305 tg3_flag_set(tp, FLASH);
14306 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14307 tp->nvram_pagesize = 256;
14308 break;
14309 case FLASH_5761VENDOR_ST_A_M45PE20:
14310 case FLASH_5761VENDOR_ST_A_M45PE40:
14311 case FLASH_5761VENDOR_ST_A_M45PE80:
14312 case FLASH_5761VENDOR_ST_A_M45PE16:
14313 case FLASH_5761VENDOR_ST_M_M45PE20:
14314 case FLASH_5761VENDOR_ST_M_M45PE40:
14315 case FLASH_5761VENDOR_ST_M_M45PE80:
14316 case FLASH_5761VENDOR_ST_M_M45PE16:
14317 tp->nvram_jedecnum = JEDEC_ST;
14318 tg3_flag_set(tp, NVRAM_BUFFERED);
14319 tg3_flag_set(tp, FLASH);
14320 tp->nvram_pagesize = 256;
14321 break;
14322 }
14323
14324 if (protect) {
14325 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14326 } else {
14327 switch (nvcfg1) {
14328 case FLASH_5761VENDOR_ATMEL_ADB161D:
14329 case FLASH_5761VENDOR_ATMEL_MDB161D:
14330 case FLASH_5761VENDOR_ST_A_M45PE16:
14331 case FLASH_5761VENDOR_ST_M_M45PE16:
14332 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14333 break;
14334 case FLASH_5761VENDOR_ATMEL_ADB081D:
14335 case FLASH_5761VENDOR_ATMEL_MDB081D:
14336 case FLASH_5761VENDOR_ST_A_M45PE80:
14337 case FLASH_5761VENDOR_ST_M_M45PE80:
14338 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14339 break;
14340 case FLASH_5761VENDOR_ATMEL_ADB041D:
14341 case FLASH_5761VENDOR_ATMEL_MDB041D:
14342 case FLASH_5761VENDOR_ST_A_M45PE40:
14343 case FLASH_5761VENDOR_ST_M_M45PE40:
14344 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14345 break;
14346 case FLASH_5761VENDOR_ATMEL_ADB021D:
14347 case FLASH_5761VENDOR_ATMEL_MDB021D:
14348 case FLASH_5761VENDOR_ST_A_M45PE20:
14349 case FLASH_5761VENDOR_ST_M_M45PE20:
14350 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14351 break;
14352 }
14353 }
14354 }
14355
14356 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14357 {
14358 tp->nvram_jedecnum = JEDEC_ATMEL;
14359 tg3_flag_set(tp, NVRAM_BUFFERED);
14360 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14361 }
14362
14363 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14364 {
14365 u32 nvcfg1;
14366
14367 nvcfg1 = tr32(NVRAM_CFG1);
14368
14369 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14370 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14371 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14372 tp->nvram_jedecnum = JEDEC_ATMEL;
14373 tg3_flag_set(tp, NVRAM_BUFFERED);
14374 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14375
14376 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14377 tw32(NVRAM_CFG1, nvcfg1);
14378 return;
14379 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14380 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14381 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14382 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14383 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14384 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14385 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14386 tp->nvram_jedecnum = JEDEC_ATMEL;
14387 tg3_flag_set(tp, NVRAM_BUFFERED);
14388 tg3_flag_set(tp, FLASH);
14389
14390 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14391 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14392 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14393 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14394 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14395 break;
14396 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14397 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14398 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14399 break;
14400 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14401 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14402 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14403 break;
14404 }
14405 break;
14406 case FLASH_5752VENDOR_ST_M45PE10:
14407 case FLASH_5752VENDOR_ST_M45PE20:
14408 case FLASH_5752VENDOR_ST_M45PE40:
14409 tp->nvram_jedecnum = JEDEC_ST;
14410 tg3_flag_set(tp, NVRAM_BUFFERED);
14411 tg3_flag_set(tp, FLASH);
14412
14413 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14414 case FLASH_5752VENDOR_ST_M45PE10:
14415 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14416 break;
14417 case FLASH_5752VENDOR_ST_M45PE20:
14418 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14419 break;
14420 case FLASH_5752VENDOR_ST_M45PE40:
14421 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14422 break;
14423 }
14424 break;
14425 default:
14426 tg3_flag_set(tp, NO_NVRAM);
14427 return;
14428 }
14429
14430 tg3_nvram_get_pagesize(tp, nvcfg1);
14431 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14432 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14433 }
14434
14435
14436 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14437 {
14438 u32 nvcfg1;
14439
14440 nvcfg1 = tr32(NVRAM_CFG1);
14441
14442 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14443 case FLASH_5717VENDOR_ATMEL_EEPROM:
14444 case FLASH_5717VENDOR_MICRO_EEPROM:
14445 tp->nvram_jedecnum = JEDEC_ATMEL;
14446 tg3_flag_set(tp, NVRAM_BUFFERED);
14447 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14448
14449 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14450 tw32(NVRAM_CFG1, nvcfg1);
14451 return;
14452 case FLASH_5717VENDOR_ATMEL_MDB011D:
14453 case FLASH_5717VENDOR_ATMEL_ADB011B:
14454 case FLASH_5717VENDOR_ATMEL_ADB011D:
14455 case FLASH_5717VENDOR_ATMEL_MDB021D:
14456 case FLASH_5717VENDOR_ATMEL_ADB021B:
14457 case FLASH_5717VENDOR_ATMEL_ADB021D:
14458 case FLASH_5717VENDOR_ATMEL_45USPT:
14459 tp->nvram_jedecnum = JEDEC_ATMEL;
14460 tg3_flag_set(tp, NVRAM_BUFFERED);
14461 tg3_flag_set(tp, FLASH);
14462
14463 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14464 case FLASH_5717VENDOR_ATMEL_MDB021D:
14465 /* Detect size with tg3_nvram_get_size() */
14466 break;
14467 case FLASH_5717VENDOR_ATMEL_ADB021B:
14468 case FLASH_5717VENDOR_ATMEL_ADB021D:
14469 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14470 break;
14471 default:
14472 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14473 break;
14474 }
14475 break;
14476 case FLASH_5717VENDOR_ST_M_M25PE10:
14477 case FLASH_5717VENDOR_ST_A_M25PE10:
14478 case FLASH_5717VENDOR_ST_M_M45PE10:
14479 case FLASH_5717VENDOR_ST_A_M45PE10:
14480 case FLASH_5717VENDOR_ST_M_M25PE20:
14481 case FLASH_5717VENDOR_ST_A_M25PE20:
14482 case FLASH_5717VENDOR_ST_M_M45PE20:
14483 case FLASH_5717VENDOR_ST_A_M45PE20:
14484 case FLASH_5717VENDOR_ST_25USPT:
14485 case FLASH_5717VENDOR_ST_45USPT:
14486 tp->nvram_jedecnum = JEDEC_ST;
14487 tg3_flag_set(tp, NVRAM_BUFFERED);
14488 tg3_flag_set(tp, FLASH);
14489
14490 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14491 case FLASH_5717VENDOR_ST_M_M25PE20:
14492 case FLASH_5717VENDOR_ST_M_M45PE20:
14493 /* Detect size with tg3_nvram_get_size() */
14494 break;
14495 case FLASH_5717VENDOR_ST_A_M25PE20:
14496 case FLASH_5717VENDOR_ST_A_M45PE20:
14497 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14498 break;
14499 default:
14500 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14501 break;
14502 }
14503 break;
14504 default:
14505 tg3_flag_set(tp, NO_NVRAM);
14506 return;
14507 }
14508
14509 tg3_nvram_get_pagesize(tp, nvcfg1);
14510 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14511 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14512 }
14513
14514 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14515 {
14516 u32 nvcfg1, nvmpinstrp;
14517
14518 nvcfg1 = tr32(NVRAM_CFG1);
14519 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14520
14521 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14522 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14523 tg3_flag_set(tp, NO_NVRAM);
14524 return;
14525 }
14526
14527 switch (nvmpinstrp) {
14528 case FLASH_5762_EEPROM_HD:
14529 nvmpinstrp = FLASH_5720_EEPROM_HD;
14530 break;
14531 case FLASH_5762_EEPROM_LD:
14532 nvmpinstrp = FLASH_5720_EEPROM_LD;
14533 break;
14534 case FLASH_5720VENDOR_M_ST_M45PE20:
14535 /* This pinstrap supports multiple sizes, so force it
14536 * to read the actual size from location 0xf0.
14537 */
14538 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14539 break;
14540 }
14541 }
14542
14543 switch (nvmpinstrp) {
14544 case FLASH_5720_EEPROM_HD:
14545 case FLASH_5720_EEPROM_LD:
14546 tp->nvram_jedecnum = JEDEC_ATMEL;
14547 tg3_flag_set(tp, NVRAM_BUFFERED);
14548
14549 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14550 tw32(NVRAM_CFG1, nvcfg1);
14551 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14552 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14553 else
14554 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14555 return;
14556 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14557 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14558 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14559 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14560 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14561 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14562 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14563 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14564 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14565 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14566 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14567 case FLASH_5720VENDOR_ATMEL_45USPT:
14568 tp->nvram_jedecnum = JEDEC_ATMEL;
14569 tg3_flag_set(tp, NVRAM_BUFFERED);
14570 tg3_flag_set(tp, FLASH);
14571
14572 switch (nvmpinstrp) {
14573 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14574 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14575 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14576 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14577 break;
14578 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14579 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14580 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14581 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14582 break;
14583 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14584 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14585 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14586 break;
14587 default:
14588 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14589 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14590 break;
14591 }
14592 break;
14593 case FLASH_5720VENDOR_M_ST_M25PE10:
14594 case FLASH_5720VENDOR_M_ST_M45PE10:
14595 case FLASH_5720VENDOR_A_ST_M25PE10:
14596 case FLASH_5720VENDOR_A_ST_M45PE10:
14597 case FLASH_5720VENDOR_M_ST_M25PE20:
14598 case FLASH_5720VENDOR_M_ST_M45PE20:
14599 case FLASH_5720VENDOR_A_ST_M25PE20:
14600 case FLASH_5720VENDOR_A_ST_M45PE20:
14601 case FLASH_5720VENDOR_M_ST_M25PE40:
14602 case FLASH_5720VENDOR_M_ST_M45PE40:
14603 case FLASH_5720VENDOR_A_ST_M25PE40:
14604 case FLASH_5720VENDOR_A_ST_M45PE40:
14605 case FLASH_5720VENDOR_M_ST_M25PE80:
14606 case FLASH_5720VENDOR_M_ST_M45PE80:
14607 case FLASH_5720VENDOR_A_ST_M25PE80:
14608 case FLASH_5720VENDOR_A_ST_M45PE80:
14609 case FLASH_5720VENDOR_ST_25USPT:
14610 case FLASH_5720VENDOR_ST_45USPT:
14611 tp->nvram_jedecnum = JEDEC_ST;
14612 tg3_flag_set(tp, NVRAM_BUFFERED);
14613 tg3_flag_set(tp, FLASH);
14614
14615 switch (nvmpinstrp) {
14616 case FLASH_5720VENDOR_M_ST_M25PE20:
14617 case FLASH_5720VENDOR_M_ST_M45PE20:
14618 case FLASH_5720VENDOR_A_ST_M25PE20:
14619 case FLASH_5720VENDOR_A_ST_M45PE20:
14620 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14621 break;
14622 case FLASH_5720VENDOR_M_ST_M25PE40:
14623 case FLASH_5720VENDOR_M_ST_M45PE40:
14624 case FLASH_5720VENDOR_A_ST_M25PE40:
14625 case FLASH_5720VENDOR_A_ST_M45PE40:
14626 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14627 break;
14628 case FLASH_5720VENDOR_M_ST_M25PE80:
14629 case FLASH_5720VENDOR_M_ST_M45PE80:
14630 case FLASH_5720VENDOR_A_ST_M25PE80:
14631 case FLASH_5720VENDOR_A_ST_M45PE80:
14632 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14633 break;
14634 default:
14635 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14636 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14637 break;
14638 }
14639 break;
14640 default:
14641 tg3_flag_set(tp, NO_NVRAM);
14642 return;
14643 }
14644
14645 tg3_nvram_get_pagesize(tp, nvcfg1);
14646 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14647 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14648
14649 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14650 u32 val;
14651
14652 if (tg3_nvram_read(tp, 0, &val))
14653 return;
14654
14655 if (val != TG3_EEPROM_MAGIC &&
14656 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14657 tg3_flag_set(tp, NO_NVRAM);
14658 }
14659 }
14660
14661 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14662 static void tg3_nvram_init(struct tg3 *tp)
14663 {
14664 if (tg3_flag(tp, IS_SSB_CORE)) {
14665 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14666 tg3_flag_clear(tp, NVRAM);
14667 tg3_flag_clear(tp, NVRAM_BUFFERED);
14668 tg3_flag_set(tp, NO_NVRAM);
14669 return;
14670 }
14671
14672 tw32_f(GRC_EEPROM_ADDR,
14673 (EEPROM_ADDR_FSM_RESET |
14674 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14675 EEPROM_ADDR_CLKPERD_SHIFT)));
14676
14677 msleep(1);
14678
14679 /* Enable seeprom accesses. */
14680 tw32_f(GRC_LOCAL_CTRL,
14681 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14682 udelay(100);
14683
14684 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14685 tg3_asic_rev(tp) != ASIC_REV_5701) {
14686 tg3_flag_set(tp, NVRAM);
14687
14688 if (tg3_nvram_lock(tp)) {
14689 netdev_warn(tp->dev,
14690 "Cannot get nvram lock, %s failed\n",
14691 __func__);
14692 return;
14693 }
14694 tg3_enable_nvram_access(tp);
14695
14696 tp->nvram_size = 0;
14697
14698 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14699 tg3_get_5752_nvram_info(tp);
14700 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14701 tg3_get_5755_nvram_info(tp);
14702 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14703 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14704 tg3_asic_rev(tp) == ASIC_REV_5785)
14705 tg3_get_5787_nvram_info(tp);
14706 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14707 tg3_get_5761_nvram_info(tp);
14708 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14709 tg3_get_5906_nvram_info(tp);
14710 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14711 tg3_flag(tp, 57765_CLASS))
14712 tg3_get_57780_nvram_info(tp);
14713 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14714 tg3_asic_rev(tp) == ASIC_REV_5719)
14715 tg3_get_5717_nvram_info(tp);
14716 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14717 tg3_asic_rev(tp) == ASIC_REV_5762)
14718 tg3_get_5720_nvram_info(tp);
14719 else
14720 tg3_get_nvram_info(tp);
14721
14722 if (tp->nvram_size == 0)
14723 tg3_get_nvram_size(tp);
14724
14725 tg3_disable_nvram_access(tp);
14726 tg3_nvram_unlock(tp);
14727
14728 } else {
14729 tg3_flag_clear(tp, NVRAM);
14730 tg3_flag_clear(tp, NVRAM_BUFFERED);
14731
14732 tg3_get_eeprom_size(tp);
14733 }
14734 }
14735
14736 struct subsys_tbl_ent {
14737 u16 subsys_vendor, subsys_devid;
14738 u32 phy_id;
14739 };
14740
14741 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14742 /* Broadcom boards. */
14743 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14744 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14745 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14746 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14747 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14748 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14749 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14750 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14751 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14752 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14753 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14754 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14755 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14756 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14757 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14758 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14759 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14760 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14761 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14762 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14763 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14764 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14765
14766 /* 3com boards. */
14767 { TG3PCI_SUBVENDOR_ID_3COM,
14768 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14769 { TG3PCI_SUBVENDOR_ID_3COM,
14770 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14771 { TG3PCI_SUBVENDOR_ID_3COM,
14772 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14773 { TG3PCI_SUBVENDOR_ID_3COM,
14774 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14775 { TG3PCI_SUBVENDOR_ID_3COM,
14776 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14777
14778 /* DELL boards. */
14779 { TG3PCI_SUBVENDOR_ID_DELL,
14780 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14781 { TG3PCI_SUBVENDOR_ID_DELL,
14782 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14783 { TG3PCI_SUBVENDOR_ID_DELL,
14784 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14785 { TG3PCI_SUBVENDOR_ID_DELL,
14786 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14787
14788 /* Compaq boards. */
14789 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14790 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14791 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14792 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14793 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14794 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14795 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14796 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14797 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14798 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14799
14800 /* IBM boards. */
14801 { TG3PCI_SUBVENDOR_ID_IBM,
14802 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14803 };
14804
14805 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14806 {
14807 int i;
14808
14809 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14810 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14811 tp->pdev->subsystem_vendor) &&
14812 (subsys_id_to_phy_id[i].subsys_devid ==
14813 tp->pdev->subsystem_device))
14814 return &subsys_id_to_phy_id[i];
14815 }
14816 return NULL;
14817 }
14818
14819 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14820 {
14821 u32 val;
14822
14823 tp->phy_id = TG3_PHY_ID_INVALID;
14824 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14825
14826 /* Assume an onboard device and WOL capable by default. */
14827 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14828 tg3_flag_set(tp, WOL_CAP);
14829
14830 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14831 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14832 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14833 tg3_flag_set(tp, IS_NIC);
14834 }
14835 val = tr32(VCPU_CFGSHDW);
14836 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14837 tg3_flag_set(tp, ASPM_WORKAROUND);
14838 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14839 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14840 tg3_flag_set(tp, WOL_ENABLE);
14841 device_set_wakeup_enable(&tp->pdev->dev, true);
14842 }
14843 goto done;
14844 }
14845
14846 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14847 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14848 u32 nic_cfg, led_cfg;
14849 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14850 int eeprom_phy_serdes = 0;
14851
14852 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14853 tp->nic_sram_data_cfg = nic_cfg;
14854
14855 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14856 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14857 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14858 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14859 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14860 (ver > 0) && (ver < 0x100))
14861 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14862
14863 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14864 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14865
14866 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14867 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14868 eeprom_phy_serdes = 1;
14869
14870 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14871 if (nic_phy_id != 0) {
14872 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14873 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14874
14875 eeprom_phy_id = (id1 >> 16) << 10;
14876 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14877 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14878 } else
14879 eeprom_phy_id = 0;
14880
14881 tp->phy_id = eeprom_phy_id;
14882 if (eeprom_phy_serdes) {
14883 if (!tg3_flag(tp, 5705_PLUS))
14884 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14885 else
14886 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14887 }
14888
14889 if (tg3_flag(tp, 5750_PLUS))
14890 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14891 SHASTA_EXT_LED_MODE_MASK);
14892 else
14893 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14894
14895 switch (led_cfg) {
14896 default:
14897 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14898 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14899 break;
14900
14901 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14902 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14903 break;
14904
14905 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14906 tp->led_ctrl = LED_CTRL_MODE_MAC;
14907
14908 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14909 * read on some older 5700/5701 bootcode.
14910 */
14911 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14912 tg3_asic_rev(tp) == ASIC_REV_5701)
14913 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14914
14915 break;
14916
14917 case SHASTA_EXT_LED_SHARED:
14918 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14919 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14920 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14921 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14922 LED_CTRL_MODE_PHY_2);
14923 break;
14924
14925 case SHASTA_EXT_LED_MAC:
14926 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14927 break;
14928
14929 case SHASTA_EXT_LED_COMBO:
14930 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14931 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14932 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14933 LED_CTRL_MODE_PHY_2);
14934 break;
14935
14936 }
14937
14938 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14939 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14940 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14941 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14942
14943 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14944 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14945
14946 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14947 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14948 if ((tp->pdev->subsystem_vendor ==
14949 PCI_VENDOR_ID_ARIMA) &&
14950 (tp->pdev->subsystem_device == 0x205a ||
14951 tp->pdev->subsystem_device == 0x2063))
14952 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14953 } else {
14954 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14955 tg3_flag_set(tp, IS_NIC);
14956 }
14957
14958 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14959 tg3_flag_set(tp, ENABLE_ASF);
14960 if (tg3_flag(tp, 5750_PLUS))
14961 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14962 }
14963
14964 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14965 tg3_flag(tp, 5750_PLUS))
14966 tg3_flag_set(tp, ENABLE_APE);
14967
14968 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14969 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14970 tg3_flag_clear(tp, WOL_CAP);
14971
14972 if (tg3_flag(tp, WOL_CAP) &&
14973 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14974 tg3_flag_set(tp, WOL_ENABLE);
14975 device_set_wakeup_enable(&tp->pdev->dev, true);
14976 }
14977
14978 if (cfg2 & (1 << 17))
14979 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14980
14981 /* serdes signal pre-emphasis in register 0x590 set by */
14982 /* bootcode if bit 18 is set */
14983 if (cfg2 & (1 << 18))
14984 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14985
14986 if ((tg3_flag(tp, 57765_PLUS) ||
14987 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14988 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14989 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14990 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14991
14992 if (tg3_flag(tp, PCI_EXPRESS)) {
14993 u32 cfg3;
14994
14995 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14996 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14997 !tg3_flag(tp, 57765_PLUS) &&
14998 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14999 tg3_flag_set(tp, ASPM_WORKAROUND);
15000 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15001 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15002 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15003 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15004 }
15005
15006 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15007 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15008 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15009 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15010 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15011 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15012 }
15013 done:
15014 if (tg3_flag(tp, WOL_CAP))
15015 device_set_wakeup_enable(&tp->pdev->dev,
15016 tg3_flag(tp, WOL_ENABLE));
15017 else
15018 device_set_wakeup_capable(&tp->pdev->dev, false);
15019 }
15020
15021 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15022 {
15023 int i, err;
15024 u32 val2, off = offset * 8;
15025
15026 err = tg3_nvram_lock(tp);
15027 if (err)
15028 return err;
15029
15030 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15031 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15032 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15033 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15034 udelay(10);
15035
15036 for (i = 0; i < 100; i++) {
15037 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15038 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15039 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15040 break;
15041 }
15042 udelay(10);
15043 }
15044
15045 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15046
15047 tg3_nvram_unlock(tp);
15048 if (val2 & APE_OTP_STATUS_CMD_DONE)
15049 return 0;
15050
15051 return -EBUSY;
15052 }
15053
15054 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15055 {
15056 int i;
15057 u32 val;
15058
15059 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15060 tw32(OTP_CTRL, cmd);
15061
15062 /* Wait for up to 1 ms for command to execute. */
15063 for (i = 0; i < 100; i++) {
15064 val = tr32(OTP_STATUS);
15065 if (val & OTP_STATUS_CMD_DONE)
15066 break;
15067 udelay(10);
15068 }
15069
15070 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15071 }
15072
15073 /* Read the gphy configuration from the OTP region of the chip. The gphy
15074 * configuration is a 32-bit value that straddles the alignment boundary.
15075 * We do two 32-bit reads and then shift and merge the results.
15076 */
15077 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15078 {
15079 u32 bhalf_otp, thalf_otp;
15080
15081 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15082
15083 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15084 return 0;
15085
15086 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15087
15088 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15089 return 0;
15090
15091 thalf_otp = tr32(OTP_READ_DATA);
15092
15093 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15094
15095 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15096 return 0;
15097
15098 bhalf_otp = tr32(OTP_READ_DATA);
15099
15100 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15101 }
15102
15103 static void tg3_phy_init_link_config(struct tg3 *tp)
15104 {
15105 u32 adv = ADVERTISED_Autoneg;
15106
15107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15108 adv |= ADVERTISED_1000baseT_Half |
15109 ADVERTISED_1000baseT_Full;
15110
15111 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15112 adv |= ADVERTISED_100baseT_Half |
15113 ADVERTISED_100baseT_Full |
15114 ADVERTISED_10baseT_Half |
15115 ADVERTISED_10baseT_Full |
15116 ADVERTISED_TP;
15117 else
15118 adv |= ADVERTISED_FIBRE;
15119
15120 tp->link_config.advertising = adv;
15121 tp->link_config.speed = SPEED_UNKNOWN;
15122 tp->link_config.duplex = DUPLEX_UNKNOWN;
15123 tp->link_config.autoneg = AUTONEG_ENABLE;
15124 tp->link_config.active_speed = SPEED_UNKNOWN;
15125 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15126
15127 tp->old_link = -1;
15128 }
15129
15130 static int tg3_phy_probe(struct tg3 *tp)
15131 {
15132 u32 hw_phy_id_1, hw_phy_id_2;
15133 u32 hw_phy_id, hw_phy_id_masked;
15134 int err;
15135
15136 /* flow control autonegotiation is default behavior */
15137 tg3_flag_set(tp, PAUSE_AUTONEG);
15138 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15139
15140 if (tg3_flag(tp, ENABLE_APE)) {
15141 switch (tp->pci_fn) {
15142 case 0:
15143 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15144 break;
15145 case 1:
15146 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15147 break;
15148 case 2:
15149 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15150 break;
15151 case 3:
15152 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15153 break;
15154 }
15155 }
15156
15157 if (!tg3_flag(tp, ENABLE_ASF) &&
15158 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15159 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15160 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15161 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15162
15163 if (tg3_flag(tp, USE_PHYLIB))
15164 return tg3_phy_init(tp);
15165
15166 /* Reading the PHY ID register can conflict with ASF
15167 * firmware access to the PHY hardware.
15168 */
15169 err = 0;
15170 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15171 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15172 } else {
15173 /* Now read the physical PHY_ID from the chip and verify
15174 * that it is sane. If it doesn't look good, we fall back
15175 * to either the hard-coded table based PHY_ID and failing
15176 * that the value found in the eeprom area.
15177 */
15178 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15179 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15180
15181 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15182 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15183 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15184
15185 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15186 }
15187
15188 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15189 tp->phy_id = hw_phy_id;
15190 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15191 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15192 else
15193 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15194 } else {
15195 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15196 /* Do nothing, phy ID already set up in
15197 * tg3_get_eeprom_hw_cfg().
15198 */
15199 } else {
15200 struct subsys_tbl_ent *p;
15201
15202 /* No eeprom signature? Try the hardcoded
15203 * subsys device table.
15204 */
15205 p = tg3_lookup_by_subsys(tp);
15206 if (p) {
15207 tp->phy_id = p->phy_id;
15208 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15209 /* For now we saw the IDs 0xbc050cd0,
15210 * 0xbc050f80 and 0xbc050c30 on devices
15211 * connected to an BCM4785 and there are
15212 * probably more. Just assume that the phy is
15213 * supported when it is connected to a SSB core
15214 * for now.
15215 */
15216 return -ENODEV;
15217 }
15218
15219 if (!tp->phy_id ||
15220 tp->phy_id == TG3_PHY_ID_BCM8002)
15221 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15222 }
15223 }
15224
15225 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15226 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15227 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15228 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15229 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15230 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15231 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15232 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15233 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15234 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15235
15236 tp->eee.supported = SUPPORTED_100baseT_Full |
15237 SUPPORTED_1000baseT_Full;
15238 tp->eee.advertised = ADVERTISED_100baseT_Full |
15239 ADVERTISED_1000baseT_Full;
15240 tp->eee.eee_enabled = 1;
15241 tp->eee.tx_lpi_enabled = 1;
15242 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15243 }
15244
15245 tg3_phy_init_link_config(tp);
15246
15247 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15248 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15249 !tg3_flag(tp, ENABLE_APE) &&
15250 !tg3_flag(tp, ENABLE_ASF)) {
15251 u32 bmsr, dummy;
15252
15253 tg3_readphy(tp, MII_BMSR, &bmsr);
15254 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15255 (bmsr & BMSR_LSTATUS))
15256 goto skip_phy_reset;
15257
15258 err = tg3_phy_reset(tp);
15259 if (err)
15260 return err;
15261
15262 tg3_phy_set_wirespeed(tp);
15263
15264 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15265 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15266 tp->link_config.flowctrl);
15267
15268 tg3_writephy(tp, MII_BMCR,
15269 BMCR_ANENABLE | BMCR_ANRESTART);
15270 }
15271 }
15272
15273 skip_phy_reset:
15274 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15275 err = tg3_init_5401phy_dsp(tp);
15276 if (err)
15277 return err;
15278
15279 err = tg3_init_5401phy_dsp(tp);
15280 }
15281
15282 return err;
15283 }
15284
15285 static void tg3_read_vpd(struct tg3 *tp)
15286 {
15287 u8 *vpd_data;
15288 unsigned int block_end, rosize, len;
15289 u32 vpdlen;
15290 int j, i = 0;
15291
15292 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15293 if (!vpd_data)
15294 goto out_no_vpd;
15295
15296 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15297 if (i < 0)
15298 goto out_not_found;
15299
15300 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15301 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15302 i += PCI_VPD_LRDT_TAG_SIZE;
15303
15304 if (block_end > vpdlen)
15305 goto out_not_found;
15306
15307 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15308 PCI_VPD_RO_KEYWORD_MFR_ID);
15309 if (j > 0) {
15310 len = pci_vpd_info_field_size(&vpd_data[j]);
15311
15312 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15313 if (j + len > block_end || len != 4 ||
15314 memcmp(&vpd_data[j], "1028", 4))
15315 goto partno;
15316
15317 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15318 PCI_VPD_RO_KEYWORD_VENDOR0);
15319 if (j < 0)
15320 goto partno;
15321
15322 len = pci_vpd_info_field_size(&vpd_data[j]);
15323
15324 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15325 if (j + len > block_end)
15326 goto partno;
15327
15328 if (len >= sizeof(tp->fw_ver))
15329 len = sizeof(tp->fw_ver) - 1;
15330 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15331 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15332 &vpd_data[j]);
15333 }
15334
15335 partno:
15336 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15337 PCI_VPD_RO_KEYWORD_PARTNO);
15338 if (i < 0)
15339 goto out_not_found;
15340
15341 len = pci_vpd_info_field_size(&vpd_data[i]);
15342
15343 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15344 if (len > TG3_BPN_SIZE ||
15345 (len + i) > vpdlen)
15346 goto out_not_found;
15347
15348 memcpy(tp->board_part_number, &vpd_data[i], len);
15349
15350 out_not_found:
15351 kfree(vpd_data);
15352 if (tp->board_part_number[0])
15353 return;
15354
15355 out_no_vpd:
15356 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15357 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15358 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15359 strcpy(tp->board_part_number, "BCM5717");
15360 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15361 strcpy(tp->board_part_number, "BCM5718");
15362 else
15363 goto nomatch;
15364 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15365 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15366 strcpy(tp->board_part_number, "BCM57780");
15367 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15368 strcpy(tp->board_part_number, "BCM57760");
15369 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15370 strcpy(tp->board_part_number, "BCM57790");
15371 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15372 strcpy(tp->board_part_number, "BCM57788");
15373 else
15374 goto nomatch;
15375 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15376 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15377 strcpy(tp->board_part_number, "BCM57761");
15378 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15379 strcpy(tp->board_part_number, "BCM57765");
15380 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15381 strcpy(tp->board_part_number, "BCM57781");
15382 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15383 strcpy(tp->board_part_number, "BCM57785");
15384 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15385 strcpy(tp->board_part_number, "BCM57791");
15386 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15387 strcpy(tp->board_part_number, "BCM57795");
15388 else
15389 goto nomatch;
15390 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15391 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15392 strcpy(tp->board_part_number, "BCM57762");
15393 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15394 strcpy(tp->board_part_number, "BCM57766");
15395 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15396 strcpy(tp->board_part_number, "BCM57782");
15397 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15398 strcpy(tp->board_part_number, "BCM57786");
15399 else
15400 goto nomatch;
15401 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15402 strcpy(tp->board_part_number, "BCM95906");
15403 } else {
15404 nomatch:
15405 strcpy(tp->board_part_number, "none");
15406 }
15407 }
15408
15409 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15410 {
15411 u32 val;
15412
15413 if (tg3_nvram_read(tp, offset, &val) ||
15414 (val & 0xfc000000) != 0x0c000000 ||
15415 tg3_nvram_read(tp, offset + 4, &val) ||
15416 val != 0)
15417 return 0;
15418
15419 return 1;
15420 }
15421
15422 static void tg3_read_bc_ver(struct tg3 *tp)
15423 {
15424 u32 val, offset, start, ver_offset;
15425 int i, dst_off;
15426 bool newver = false;
15427
15428 if (tg3_nvram_read(tp, 0xc, &offset) ||
15429 tg3_nvram_read(tp, 0x4, &start))
15430 return;
15431
15432 offset = tg3_nvram_logical_addr(tp, offset);
15433
15434 if (tg3_nvram_read(tp, offset, &val))
15435 return;
15436
15437 if ((val & 0xfc000000) == 0x0c000000) {
15438 if (tg3_nvram_read(tp, offset + 4, &val))
15439 return;
15440
15441 if (val == 0)
15442 newver = true;
15443 }
15444
15445 dst_off = strlen(tp->fw_ver);
15446
15447 if (newver) {
15448 if (TG3_VER_SIZE - dst_off < 16 ||
15449 tg3_nvram_read(tp, offset + 8, &ver_offset))
15450 return;
15451
15452 offset = offset + ver_offset - start;
15453 for (i = 0; i < 16; i += 4) {
15454 __be32 v;
15455 if (tg3_nvram_read_be32(tp, offset + i, &v))
15456 return;
15457
15458 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15459 }
15460 } else {
15461 u32 major, minor;
15462
15463 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15464 return;
15465
15466 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15467 TG3_NVM_BCVER_MAJSFT;
15468 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15469 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15470 "v%d.%02d", major, minor);
15471 }
15472 }
15473
15474 static void tg3_read_hwsb_ver(struct tg3 *tp)
15475 {
15476 u32 val, major, minor;
15477
15478 /* Use native endian representation */
15479 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15480 return;
15481
15482 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15483 TG3_NVM_HWSB_CFG1_MAJSFT;
15484 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15485 TG3_NVM_HWSB_CFG1_MINSFT;
15486
15487 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15488 }
15489
15490 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15491 {
15492 u32 offset, major, minor, build;
15493
15494 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15495
15496 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15497 return;
15498
15499 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15500 case TG3_EEPROM_SB_REVISION_0:
15501 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15502 break;
15503 case TG3_EEPROM_SB_REVISION_2:
15504 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15505 break;
15506 case TG3_EEPROM_SB_REVISION_3:
15507 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15508 break;
15509 case TG3_EEPROM_SB_REVISION_4:
15510 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15511 break;
15512 case TG3_EEPROM_SB_REVISION_5:
15513 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15514 break;
15515 case TG3_EEPROM_SB_REVISION_6:
15516 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15517 break;
15518 default:
15519 return;
15520 }
15521
15522 if (tg3_nvram_read(tp, offset, &val))
15523 return;
15524
15525 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15526 TG3_EEPROM_SB_EDH_BLD_SHFT;
15527 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15528 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15529 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15530
15531 if (minor > 99 || build > 26)
15532 return;
15533
15534 offset = strlen(tp->fw_ver);
15535 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15536 " v%d.%02d", major, minor);
15537
15538 if (build > 0) {
15539 offset = strlen(tp->fw_ver);
15540 if (offset < TG3_VER_SIZE - 1)
15541 tp->fw_ver[offset] = 'a' + build - 1;
15542 }
15543 }
15544
15545 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15546 {
15547 u32 val, offset, start;
15548 int i, vlen;
15549
15550 for (offset = TG3_NVM_DIR_START;
15551 offset < TG3_NVM_DIR_END;
15552 offset += TG3_NVM_DIRENT_SIZE) {
15553 if (tg3_nvram_read(tp, offset, &val))
15554 return;
15555
15556 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15557 break;
15558 }
15559
15560 if (offset == TG3_NVM_DIR_END)
15561 return;
15562
15563 if (!tg3_flag(tp, 5705_PLUS))
15564 start = 0x08000000;
15565 else if (tg3_nvram_read(tp, offset - 4, &start))
15566 return;
15567
15568 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15569 !tg3_fw_img_is_valid(tp, offset) ||
15570 tg3_nvram_read(tp, offset + 8, &val))
15571 return;
15572
15573 offset += val - start;
15574
15575 vlen = strlen(tp->fw_ver);
15576
15577 tp->fw_ver[vlen++] = ',';
15578 tp->fw_ver[vlen++] = ' ';
15579
15580 for (i = 0; i < 4; i++) {
15581 __be32 v;
15582 if (tg3_nvram_read_be32(tp, offset, &v))
15583 return;
15584
15585 offset += sizeof(v);
15586
15587 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15588 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15589 break;
15590 }
15591
15592 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15593 vlen += sizeof(v);
15594 }
15595 }
15596
15597 static void tg3_probe_ncsi(struct tg3 *tp)
15598 {
15599 u32 apedata;
15600
15601 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15602 if (apedata != APE_SEG_SIG_MAGIC)
15603 return;
15604
15605 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15606 if (!(apedata & APE_FW_STATUS_READY))
15607 return;
15608
15609 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15610 tg3_flag_set(tp, APE_HAS_NCSI);
15611 }
15612
15613 static void tg3_read_dash_ver(struct tg3 *tp)
15614 {
15615 int vlen;
15616 u32 apedata;
15617 char *fwtype;
15618
15619 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15620
15621 if (tg3_flag(tp, APE_HAS_NCSI))
15622 fwtype = "NCSI";
15623 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15624 fwtype = "SMASH";
15625 else
15626 fwtype = "DASH";
15627
15628 vlen = strlen(tp->fw_ver);
15629
15630 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15631 fwtype,
15632 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15633 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15634 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15635 (apedata & APE_FW_VERSION_BLDMSK));
15636 }
15637
15638 static void tg3_read_otp_ver(struct tg3 *tp)
15639 {
15640 u32 val, val2;
15641
15642 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15643 return;
15644
15645 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15646 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15647 TG3_OTP_MAGIC0_VALID(val)) {
15648 u64 val64 = (u64) val << 32 | val2;
15649 u32 ver = 0;
15650 int i, vlen;
15651
15652 for (i = 0; i < 7; i++) {
15653 if ((val64 & 0xff) == 0)
15654 break;
15655 ver = val64 & 0xff;
15656 val64 >>= 8;
15657 }
15658 vlen = strlen(tp->fw_ver);
15659 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15660 }
15661 }
15662
15663 static void tg3_read_fw_ver(struct tg3 *tp)
15664 {
15665 u32 val;
15666 bool vpd_vers = false;
15667
15668 if (tp->fw_ver[0] != 0)
15669 vpd_vers = true;
15670
15671 if (tg3_flag(tp, NO_NVRAM)) {
15672 strcat(tp->fw_ver, "sb");
15673 tg3_read_otp_ver(tp);
15674 return;
15675 }
15676
15677 if (tg3_nvram_read(tp, 0, &val))
15678 return;
15679
15680 if (val == TG3_EEPROM_MAGIC)
15681 tg3_read_bc_ver(tp);
15682 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15683 tg3_read_sb_ver(tp, val);
15684 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15685 tg3_read_hwsb_ver(tp);
15686
15687 if (tg3_flag(tp, ENABLE_ASF)) {
15688 if (tg3_flag(tp, ENABLE_APE)) {
15689 tg3_probe_ncsi(tp);
15690 if (!vpd_vers)
15691 tg3_read_dash_ver(tp);
15692 } else if (!vpd_vers) {
15693 tg3_read_mgmtfw_ver(tp);
15694 }
15695 }
15696
15697 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15698 }
15699
15700 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15701 {
15702 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15703 return TG3_RX_RET_MAX_SIZE_5717;
15704 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15705 return TG3_RX_RET_MAX_SIZE_5700;
15706 else
15707 return TG3_RX_RET_MAX_SIZE_5705;
15708 }
15709
15710 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15711 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15712 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15713 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15714 { },
15715 };
15716
15717 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15718 {
15719 struct pci_dev *peer;
15720 unsigned int func, devnr = tp->pdev->devfn & ~7;
15721
15722 for (func = 0; func < 8; func++) {
15723 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15724 if (peer && peer != tp->pdev)
15725 break;
15726 pci_dev_put(peer);
15727 }
15728 /* 5704 can be configured in single-port mode, set peer to
15729 * tp->pdev in that case.
15730 */
15731 if (!peer) {
15732 peer = tp->pdev;
15733 return peer;
15734 }
15735
15736 /*
15737 * We don't need to keep the refcount elevated; there's no way
15738 * to remove one half of this device without removing the other
15739 */
15740 pci_dev_put(peer);
15741
15742 return peer;
15743 }
15744
15745 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15746 {
15747 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15748 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15749 u32 reg;
15750
15751 /* All devices that use the alternate
15752 * ASIC REV location have a CPMU.
15753 */
15754 tg3_flag_set(tp, CPMU_PRESENT);
15755
15756 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15764 reg = TG3PCI_GEN2_PRODID_ASICREV;
15765 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15768 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15774 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15775 reg = TG3PCI_GEN15_PRODID_ASICREV;
15776 else
15777 reg = TG3PCI_PRODID_ASICREV;
15778
15779 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15780 }
15781
15782 /* Wrong chip ID in 5752 A0. This code can be removed later
15783 * as A0 is not in production.
15784 */
15785 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15786 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15787
15788 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15789 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15790
15791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15792 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15793 tg3_asic_rev(tp) == ASIC_REV_5720)
15794 tg3_flag_set(tp, 5717_PLUS);
15795
15796 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15797 tg3_asic_rev(tp) == ASIC_REV_57766)
15798 tg3_flag_set(tp, 57765_CLASS);
15799
15800 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15801 tg3_asic_rev(tp) == ASIC_REV_5762)
15802 tg3_flag_set(tp, 57765_PLUS);
15803
15804 /* Intentionally exclude ASIC_REV_5906 */
15805 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15806 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15807 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15808 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15809 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15810 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15811 tg3_flag(tp, 57765_PLUS))
15812 tg3_flag_set(tp, 5755_PLUS);
15813
15814 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15815 tg3_asic_rev(tp) == ASIC_REV_5714)
15816 tg3_flag_set(tp, 5780_CLASS);
15817
15818 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15819 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15820 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15821 tg3_flag(tp, 5755_PLUS) ||
15822 tg3_flag(tp, 5780_CLASS))
15823 tg3_flag_set(tp, 5750_PLUS);
15824
15825 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15826 tg3_flag(tp, 5750_PLUS))
15827 tg3_flag_set(tp, 5705_PLUS);
15828 }
15829
15830 static bool tg3_10_100_only_device(struct tg3 *tp,
15831 const struct pci_device_id *ent)
15832 {
15833 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15834
15835 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15836 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15837 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15838 return true;
15839
15840 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15841 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15842 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15843 return true;
15844 } else {
15845 return true;
15846 }
15847 }
15848
15849 return false;
15850 }
15851
15852 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15853 {
15854 u32 misc_ctrl_reg;
15855 u32 pci_state_reg, grc_misc_cfg;
15856 u32 val;
15857 u16 pci_cmd;
15858 int err;
15859
15860 /* Force memory write invalidate off. If we leave it on,
15861 * then on 5700_BX chips we have to enable a workaround.
15862 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15863 * to match the cacheline size. The Broadcom driver have this
15864 * workaround but turns MWI off all the times so never uses
15865 * it. This seems to suggest that the workaround is insufficient.
15866 */
15867 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15868 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15869 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15870
15871 /* Important! -- Make sure register accesses are byteswapped
15872 * correctly. Also, for those chips that require it, make
15873 * sure that indirect register accesses are enabled before
15874 * the first operation.
15875 */
15876 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15877 &misc_ctrl_reg);
15878 tp->misc_host_ctrl |= (misc_ctrl_reg &
15879 MISC_HOST_CTRL_CHIPREV);
15880 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15881 tp->misc_host_ctrl);
15882
15883 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15884
15885 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15886 * we need to disable memory and use config. cycles
15887 * only to access all registers. The 5702/03 chips
15888 * can mistakenly decode the special cycles from the
15889 * ICH chipsets as memory write cycles, causing corruption
15890 * of register and memory space. Only certain ICH bridges
15891 * will drive special cycles with non-zero data during the
15892 * address phase which can fall within the 5703's address
15893 * range. This is not an ICH bug as the PCI spec allows
15894 * non-zero address during special cycles. However, only
15895 * these ICH bridges are known to drive non-zero addresses
15896 * during special cycles.
15897 *
15898 * Since special cycles do not cross PCI bridges, we only
15899 * enable this workaround if the 5703 is on the secondary
15900 * bus of these ICH bridges.
15901 */
15902 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15903 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15904 static struct tg3_dev_id {
15905 u32 vendor;
15906 u32 device;
15907 u32 rev;
15908 } ich_chipsets[] = {
15909 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15910 PCI_ANY_ID },
15911 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15912 PCI_ANY_ID },
15913 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15914 0xa },
15915 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15916 PCI_ANY_ID },
15917 { },
15918 };
15919 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15920 struct pci_dev *bridge = NULL;
15921
15922 while (pci_id->vendor != 0) {
15923 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15924 bridge);
15925 if (!bridge) {
15926 pci_id++;
15927 continue;
15928 }
15929 if (pci_id->rev != PCI_ANY_ID) {
15930 if (bridge->revision > pci_id->rev)
15931 continue;
15932 }
15933 if (bridge->subordinate &&
15934 (bridge->subordinate->number ==
15935 tp->pdev->bus->number)) {
15936 tg3_flag_set(tp, ICH_WORKAROUND);
15937 pci_dev_put(bridge);
15938 break;
15939 }
15940 }
15941 }
15942
15943 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15944 static struct tg3_dev_id {
15945 u32 vendor;
15946 u32 device;
15947 } bridge_chipsets[] = {
15948 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15949 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15950 { },
15951 };
15952 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15953 struct pci_dev *bridge = NULL;
15954
15955 while (pci_id->vendor != 0) {
15956 bridge = pci_get_device(pci_id->vendor,
15957 pci_id->device,
15958 bridge);
15959 if (!bridge) {
15960 pci_id++;
15961 continue;
15962 }
15963 if (bridge->subordinate &&
15964 (bridge->subordinate->number <=
15965 tp->pdev->bus->number) &&
15966 (bridge->subordinate->busn_res.end >=
15967 tp->pdev->bus->number)) {
15968 tg3_flag_set(tp, 5701_DMA_BUG);
15969 pci_dev_put(bridge);
15970 break;
15971 }
15972 }
15973 }
15974
15975 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15976 * DMA addresses > 40-bit. This bridge may have other additional
15977 * 57xx devices behind it in some 4-port NIC designs for example.
15978 * Any tg3 device found behind the bridge will also need the 40-bit
15979 * DMA workaround.
15980 */
15981 if (tg3_flag(tp, 5780_CLASS)) {
15982 tg3_flag_set(tp, 40BIT_DMA_BUG);
15983 tp->msi_cap = tp->pdev->msi_cap;
15984 } else {
15985 struct pci_dev *bridge = NULL;
15986
15987 do {
15988 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15989 PCI_DEVICE_ID_SERVERWORKS_EPB,
15990 bridge);
15991 if (bridge && bridge->subordinate &&
15992 (bridge->subordinate->number <=
15993 tp->pdev->bus->number) &&
15994 (bridge->subordinate->busn_res.end >=
15995 tp->pdev->bus->number)) {
15996 tg3_flag_set(tp, 40BIT_DMA_BUG);
15997 pci_dev_put(bridge);
15998 break;
15999 }
16000 } while (bridge);
16001 }
16002
16003 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16004 tg3_asic_rev(tp) == ASIC_REV_5714)
16005 tp->pdev_peer = tg3_find_peer(tp);
16006
16007 /* Determine TSO capabilities */
16008 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16009 ; /* Do nothing. HW bug. */
16010 else if (tg3_flag(tp, 57765_PLUS))
16011 tg3_flag_set(tp, HW_TSO_3);
16012 else if (tg3_flag(tp, 5755_PLUS) ||
16013 tg3_asic_rev(tp) == ASIC_REV_5906)
16014 tg3_flag_set(tp, HW_TSO_2);
16015 else if (tg3_flag(tp, 5750_PLUS)) {
16016 tg3_flag_set(tp, HW_TSO_1);
16017 tg3_flag_set(tp, TSO_BUG);
16018 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16019 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16020 tg3_flag_clear(tp, TSO_BUG);
16021 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16022 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16023 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16024 tg3_flag_set(tp, FW_TSO);
16025 tg3_flag_set(tp, TSO_BUG);
16026 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16027 tp->fw_needed = FIRMWARE_TG3TSO5;
16028 else
16029 tp->fw_needed = FIRMWARE_TG3TSO;
16030 }
16031
16032 /* Selectively allow TSO based on operating conditions */
16033 if (tg3_flag(tp, HW_TSO_1) ||
16034 tg3_flag(tp, HW_TSO_2) ||
16035 tg3_flag(tp, HW_TSO_3) ||
16036 tg3_flag(tp, FW_TSO)) {
16037 /* For firmware TSO, assume ASF is disabled.
16038 * We'll disable TSO later if we discover ASF
16039 * is enabled in tg3_get_eeprom_hw_cfg().
16040 */
16041 tg3_flag_set(tp, TSO_CAPABLE);
16042 } else {
16043 tg3_flag_clear(tp, TSO_CAPABLE);
16044 tg3_flag_clear(tp, TSO_BUG);
16045 tp->fw_needed = NULL;
16046 }
16047
16048 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16049 tp->fw_needed = FIRMWARE_TG3;
16050
16051 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16052 tp->fw_needed = FIRMWARE_TG357766;
16053
16054 tp->irq_max = 1;
16055
16056 if (tg3_flag(tp, 5750_PLUS)) {
16057 tg3_flag_set(tp, SUPPORT_MSI);
16058 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16059 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16060 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16061 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16062 tp->pdev_peer == tp->pdev))
16063 tg3_flag_clear(tp, SUPPORT_MSI);
16064
16065 if (tg3_flag(tp, 5755_PLUS) ||
16066 tg3_asic_rev(tp) == ASIC_REV_5906) {
16067 tg3_flag_set(tp, 1SHOT_MSI);
16068 }
16069
16070 if (tg3_flag(tp, 57765_PLUS)) {
16071 tg3_flag_set(tp, SUPPORT_MSIX);
16072 tp->irq_max = TG3_IRQ_MAX_VECS;
16073 }
16074 }
16075
16076 tp->txq_max = 1;
16077 tp->rxq_max = 1;
16078 if (tp->irq_max > 1) {
16079 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16080 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16081
16082 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16083 tg3_asic_rev(tp) == ASIC_REV_5720)
16084 tp->txq_max = tp->irq_max - 1;
16085 }
16086
16087 if (tg3_flag(tp, 5755_PLUS) ||
16088 tg3_asic_rev(tp) == ASIC_REV_5906)
16089 tg3_flag_set(tp, SHORT_DMA_BUG);
16090
16091 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16092 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16093
16094 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16095 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16096 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16097 tg3_asic_rev(tp) == ASIC_REV_5762)
16098 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16099
16100 if (tg3_flag(tp, 57765_PLUS) &&
16101 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16102 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16103
16104 if (!tg3_flag(tp, 5705_PLUS) ||
16105 tg3_flag(tp, 5780_CLASS) ||
16106 tg3_flag(tp, USE_JUMBO_BDFLAG))
16107 tg3_flag_set(tp, JUMBO_CAPABLE);
16108
16109 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16110 &pci_state_reg);
16111
16112 if (pci_is_pcie(tp->pdev)) {
16113 u16 lnkctl;
16114
16115 tg3_flag_set(tp, PCI_EXPRESS);
16116
16117 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16118 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16119 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16120 tg3_flag_clear(tp, HW_TSO_2);
16121 tg3_flag_clear(tp, TSO_CAPABLE);
16122 }
16123 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16124 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16125 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16126 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16127 tg3_flag_set(tp, CLKREQ_BUG);
16128 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16129 tg3_flag_set(tp, L1PLLPD_EN);
16130 }
16131 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16132 /* BCM5785 devices are effectively PCIe devices, and should
16133 * follow PCIe codepaths, but do not have a PCIe capabilities
16134 * section.
16135 */
16136 tg3_flag_set(tp, PCI_EXPRESS);
16137 } else if (!tg3_flag(tp, 5705_PLUS) ||
16138 tg3_flag(tp, 5780_CLASS)) {
16139 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16140 if (!tp->pcix_cap) {
16141 dev_err(&tp->pdev->dev,
16142 "Cannot find PCI-X capability, aborting\n");
16143 return -EIO;
16144 }
16145
16146 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16147 tg3_flag_set(tp, PCIX_MODE);
16148 }
16149
16150 /* If we have an AMD 762 or VIA K8T800 chipset, write
16151 * reordering to the mailbox registers done by the host
16152 * controller can cause major troubles. We read back from
16153 * every mailbox register write to force the writes to be
16154 * posted to the chip in order.
16155 */
16156 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16157 !tg3_flag(tp, PCI_EXPRESS))
16158 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16159
16160 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16161 &tp->pci_cacheline_sz);
16162 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16163 &tp->pci_lat_timer);
16164 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16165 tp->pci_lat_timer < 64) {
16166 tp->pci_lat_timer = 64;
16167 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16168 tp->pci_lat_timer);
16169 }
16170
16171 /* Important! -- It is critical that the PCI-X hw workaround
16172 * situation is decided before the first MMIO register access.
16173 */
16174 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16175 /* 5700 BX chips need to have their TX producer index
16176 * mailboxes written twice to workaround a bug.
16177 */
16178 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16179
16180 /* If we are in PCI-X mode, enable register write workaround.
16181 *
16182 * The workaround is to use indirect register accesses
16183 * for all chip writes not to mailbox registers.
16184 */
16185 if (tg3_flag(tp, PCIX_MODE)) {
16186 u32 pm_reg;
16187
16188 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16189
16190 /* The chip can have it's power management PCI config
16191 * space registers clobbered due to this bug.
16192 * So explicitly force the chip into D0 here.
16193 */
16194 pci_read_config_dword(tp->pdev,
16195 tp->pm_cap + PCI_PM_CTRL,
16196 &pm_reg);
16197 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16198 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16199 pci_write_config_dword(tp->pdev,
16200 tp->pm_cap + PCI_PM_CTRL,
16201 pm_reg);
16202
16203 /* Also, force SERR#/PERR# in PCI command. */
16204 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16205 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16206 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16207 }
16208 }
16209
16210 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16211 tg3_flag_set(tp, PCI_HIGH_SPEED);
16212 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16213 tg3_flag_set(tp, PCI_32BIT);
16214
16215 /* Chip-specific fixup from Broadcom driver */
16216 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16217 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16218 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16219 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16220 }
16221
16222 /* Default fast path register access methods */
16223 tp->read32 = tg3_read32;
16224 tp->write32 = tg3_write32;
16225 tp->read32_mbox = tg3_read32;
16226 tp->write32_mbox = tg3_write32;
16227 tp->write32_tx_mbox = tg3_write32;
16228 tp->write32_rx_mbox = tg3_write32;
16229
16230 /* Various workaround register access methods */
16231 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16232 tp->write32 = tg3_write_indirect_reg32;
16233 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16234 (tg3_flag(tp, PCI_EXPRESS) &&
16235 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16236 /*
16237 * Back to back register writes can cause problems on these
16238 * chips, the workaround is to read back all reg writes
16239 * except those to mailbox regs.
16240 *
16241 * See tg3_write_indirect_reg32().
16242 */
16243 tp->write32 = tg3_write_flush_reg32;
16244 }
16245
16246 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16247 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16248 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16249 tp->write32_rx_mbox = tg3_write_flush_reg32;
16250 }
16251
16252 if (tg3_flag(tp, ICH_WORKAROUND)) {
16253 tp->read32 = tg3_read_indirect_reg32;
16254 tp->write32 = tg3_write_indirect_reg32;
16255 tp->read32_mbox = tg3_read_indirect_mbox;
16256 tp->write32_mbox = tg3_write_indirect_mbox;
16257 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16258 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16259
16260 iounmap(tp->regs);
16261 tp->regs = NULL;
16262
16263 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16264 pci_cmd &= ~PCI_COMMAND_MEMORY;
16265 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16266 }
16267 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16268 tp->read32_mbox = tg3_read32_mbox_5906;
16269 tp->write32_mbox = tg3_write32_mbox_5906;
16270 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16271 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16272 }
16273
16274 if (tp->write32 == tg3_write_indirect_reg32 ||
16275 (tg3_flag(tp, PCIX_MODE) &&
16276 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16277 tg3_asic_rev(tp) == ASIC_REV_5701)))
16278 tg3_flag_set(tp, SRAM_USE_CONFIG);
16279
16280 /* The memory arbiter has to be enabled in order for SRAM accesses
16281 * to succeed. Normally on powerup the tg3 chip firmware will make
16282 * sure it is enabled, but other entities such as system netboot
16283 * code might disable it.
16284 */
16285 val = tr32(MEMARB_MODE);
16286 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16287
16288 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16289 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16290 tg3_flag(tp, 5780_CLASS)) {
16291 if (tg3_flag(tp, PCIX_MODE)) {
16292 pci_read_config_dword(tp->pdev,
16293 tp->pcix_cap + PCI_X_STATUS,
16294 &val);
16295 tp->pci_fn = val & 0x7;
16296 }
16297 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16298 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16299 tg3_asic_rev(tp) == ASIC_REV_5720) {
16300 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16301 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16302 val = tr32(TG3_CPMU_STATUS);
16303
16304 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16305 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16306 else
16307 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16308 TG3_CPMU_STATUS_FSHFT_5719;
16309 }
16310
16311 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16312 tp->write32_tx_mbox = tg3_write_flush_reg32;
16313 tp->write32_rx_mbox = tg3_write_flush_reg32;
16314 }
16315
16316 /* Get eeprom hw config before calling tg3_set_power_state().
16317 * In particular, the TG3_FLAG_IS_NIC flag must be
16318 * determined before calling tg3_set_power_state() so that
16319 * we know whether or not to switch out of Vaux power.
16320 * When the flag is set, it means that GPIO1 is used for eeprom
16321 * write protect and also implies that it is a LOM where GPIOs
16322 * are not used to switch power.
16323 */
16324 tg3_get_eeprom_hw_cfg(tp);
16325
16326 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16327 tg3_flag_clear(tp, TSO_CAPABLE);
16328 tg3_flag_clear(tp, TSO_BUG);
16329 tp->fw_needed = NULL;
16330 }
16331
16332 if (tg3_flag(tp, ENABLE_APE)) {
16333 /* Allow reads and writes to the
16334 * APE register and memory space.
16335 */
16336 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16337 PCISTATE_ALLOW_APE_SHMEM_WR |
16338 PCISTATE_ALLOW_APE_PSPACE_WR;
16339 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16340 pci_state_reg);
16341
16342 tg3_ape_lock_init(tp);
16343 }
16344
16345 /* Set up tp->grc_local_ctrl before calling
16346 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16347 * will bring 5700's external PHY out of reset.
16348 * It is also used as eeprom write protect on LOMs.
16349 */
16350 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16351 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16352 tg3_flag(tp, EEPROM_WRITE_PROT))
16353 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16354 GRC_LCLCTRL_GPIO_OUTPUT1);
16355 /* Unused GPIO3 must be driven as output on 5752 because there
16356 * are no pull-up resistors on unused GPIO pins.
16357 */
16358 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16359 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16360
16361 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16362 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16363 tg3_flag(tp, 57765_CLASS))
16364 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16365
16366 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16367 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16368 /* Turn off the debug UART. */
16369 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16370 if (tg3_flag(tp, IS_NIC))
16371 /* Keep VMain power. */
16372 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16373 GRC_LCLCTRL_GPIO_OUTPUT0;
16374 }
16375
16376 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16377 tp->grc_local_ctrl |=
16378 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16379
16380 /* Switch out of Vaux if it is a NIC */
16381 tg3_pwrsrc_switch_to_vmain(tp);
16382
16383 /* Derive initial jumbo mode from MTU assigned in
16384 * ether_setup() via the alloc_etherdev() call
16385 */
16386 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16387 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16388
16389 /* Determine WakeOnLan speed to use. */
16390 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16391 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16392 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16393 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16394 tg3_flag_clear(tp, WOL_SPEED_100MB);
16395 } else {
16396 tg3_flag_set(tp, WOL_SPEED_100MB);
16397 }
16398
16399 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16400 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16401
16402 /* A few boards don't want Ethernet@WireSpeed phy feature */
16403 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16404 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16405 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16406 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16407 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16408 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16409 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16410
16411 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16412 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16413 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16414 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16415 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16416
16417 if (tg3_flag(tp, 5705_PLUS) &&
16418 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16419 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16420 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16421 !tg3_flag(tp, 57765_PLUS)) {
16422 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16423 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16424 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5761) {
16426 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16427 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16428 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16429 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16430 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16431 } else
16432 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16433 }
16434
16435 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16436 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16437 tp->phy_otp = tg3_read_otp_phycfg(tp);
16438 if (tp->phy_otp == 0)
16439 tp->phy_otp = TG3_OTP_DEFAULT;
16440 }
16441
16442 if (tg3_flag(tp, CPMU_PRESENT))
16443 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16444 else
16445 tp->mi_mode = MAC_MI_MODE_BASE;
16446
16447 tp->coalesce_mode = 0;
16448 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16449 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16450 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16451
16452 /* Set these bits to enable statistics workaround. */
16453 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16454 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16455 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16456 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16457 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16458 }
16459
16460 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16461 tg3_asic_rev(tp) == ASIC_REV_57780)
16462 tg3_flag_set(tp, USE_PHYLIB);
16463
16464 err = tg3_mdio_init(tp);
16465 if (err)
16466 return err;
16467
16468 /* Initialize data/descriptor byte/word swapping. */
16469 val = tr32(GRC_MODE);
16470 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16471 tg3_asic_rev(tp) == ASIC_REV_5762)
16472 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16473 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16474 GRC_MODE_B2HRX_ENABLE |
16475 GRC_MODE_HTX2B_ENABLE |
16476 GRC_MODE_HOST_STACKUP);
16477 else
16478 val &= GRC_MODE_HOST_STACKUP;
16479
16480 tw32(GRC_MODE, val | tp->grc_mode);
16481
16482 tg3_switch_clocks(tp);
16483
16484 /* Clear this out for sanity. */
16485 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16486
16487 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16488 &pci_state_reg);
16489 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16490 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16491 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16492 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16493 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16494 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16495 void __iomem *sram_base;
16496
16497 /* Write some dummy words into the SRAM status block
16498 * area, see if it reads back correctly. If the return
16499 * value is bad, force enable the PCIX workaround.
16500 */
16501 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16502
16503 writel(0x00000000, sram_base);
16504 writel(0x00000000, sram_base + 4);
16505 writel(0xffffffff, sram_base + 4);
16506 if (readl(sram_base) != 0x00000000)
16507 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16508 }
16509 }
16510
16511 udelay(50);
16512 tg3_nvram_init(tp);
16513
16514 /* If the device has an NVRAM, no need to load patch firmware */
16515 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16516 !tg3_flag(tp, NO_NVRAM))
16517 tp->fw_needed = NULL;
16518
16519 grc_misc_cfg = tr32(GRC_MISC_CFG);
16520 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16521
16522 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16523 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16524 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16525 tg3_flag_set(tp, IS_5788);
16526
16527 if (!tg3_flag(tp, IS_5788) &&
16528 tg3_asic_rev(tp) != ASIC_REV_5700)
16529 tg3_flag_set(tp, TAGGED_STATUS);
16530 if (tg3_flag(tp, TAGGED_STATUS)) {
16531 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16532 HOSTCC_MODE_CLRTICK_TXBD);
16533
16534 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16536 tp->misc_host_ctrl);
16537 }
16538
16539 /* Preserve the APE MAC_MODE bits */
16540 if (tg3_flag(tp, ENABLE_APE))
16541 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16542 else
16543 tp->mac_mode = 0;
16544
16545 if (tg3_10_100_only_device(tp, ent))
16546 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16547
16548 err = tg3_phy_probe(tp);
16549 if (err) {
16550 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16551 /* ... but do not return immediately ... */
16552 tg3_mdio_fini(tp);
16553 }
16554
16555 tg3_read_vpd(tp);
16556 tg3_read_fw_ver(tp);
16557
16558 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16559 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16560 } else {
16561 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16562 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16563 else
16564 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16565 }
16566
16567 /* 5700 {AX,BX} chips have a broken status block link
16568 * change bit implementation, so we must use the
16569 * status register in those cases.
16570 */
16571 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16572 tg3_flag_set(tp, USE_LINKCHG_REG);
16573 else
16574 tg3_flag_clear(tp, USE_LINKCHG_REG);
16575
16576 /* The led_ctrl is set during tg3_phy_probe, here we might
16577 * have to force the link status polling mechanism based
16578 * upon subsystem IDs.
16579 */
16580 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16581 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16582 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16583 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16584 tg3_flag_set(tp, USE_LINKCHG_REG);
16585 }
16586
16587 /* For all SERDES we poll the MAC status register. */
16588 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16589 tg3_flag_set(tp, POLL_SERDES);
16590 else
16591 tg3_flag_clear(tp, POLL_SERDES);
16592
16593 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16594 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16595 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16596 tg3_flag(tp, PCIX_MODE)) {
16597 tp->rx_offset = NET_SKB_PAD;
16598 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16599 tp->rx_copy_thresh = ~(u16)0;
16600 #endif
16601 }
16602
16603 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16604 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16605 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16606
16607 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16608
16609 /* Increment the rx prod index on the rx std ring by at most
16610 * 8 for these chips to workaround hw errata.
16611 */
16612 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16613 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16614 tg3_asic_rev(tp) == ASIC_REV_5755)
16615 tp->rx_std_max_post = 8;
16616
16617 if (tg3_flag(tp, ASPM_WORKAROUND))
16618 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16619 PCIE_PWR_MGMT_L1_THRESH_MSK;
16620
16621 return err;
16622 }
16623
16624 #ifdef CONFIG_SPARC
16625 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16626 {
16627 struct net_device *dev = tp->dev;
16628 struct pci_dev *pdev = tp->pdev;
16629 struct device_node *dp = pci_device_to_OF_node(pdev);
16630 const unsigned char *addr;
16631 int len;
16632
16633 addr = of_get_property(dp, "local-mac-address", &len);
16634 if (addr && len == 6) {
16635 memcpy(dev->dev_addr, addr, 6);
16636 return 0;
16637 }
16638 return -ENODEV;
16639 }
16640
16641 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16642 {
16643 struct net_device *dev = tp->dev;
16644
16645 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16646 return 0;
16647 }
16648 #endif
16649
16650 static int tg3_get_device_address(struct tg3 *tp)
16651 {
16652 struct net_device *dev = tp->dev;
16653 u32 hi, lo, mac_offset;
16654 int addr_ok = 0;
16655 int err;
16656
16657 #ifdef CONFIG_SPARC
16658 if (!tg3_get_macaddr_sparc(tp))
16659 return 0;
16660 #endif
16661
16662 if (tg3_flag(tp, IS_SSB_CORE)) {
16663 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16664 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16665 return 0;
16666 }
16667
16668 mac_offset = 0x7c;
16669 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16670 tg3_flag(tp, 5780_CLASS)) {
16671 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16672 mac_offset = 0xcc;
16673 if (tg3_nvram_lock(tp))
16674 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16675 else
16676 tg3_nvram_unlock(tp);
16677 } else if (tg3_flag(tp, 5717_PLUS)) {
16678 if (tp->pci_fn & 1)
16679 mac_offset = 0xcc;
16680 if (tp->pci_fn > 1)
16681 mac_offset += 0x18c;
16682 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16683 mac_offset = 0x10;
16684
16685 /* First try to get it from MAC address mailbox. */
16686 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16687 if ((hi >> 16) == 0x484b) {
16688 dev->dev_addr[0] = (hi >> 8) & 0xff;
16689 dev->dev_addr[1] = (hi >> 0) & 0xff;
16690
16691 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16692 dev->dev_addr[2] = (lo >> 24) & 0xff;
16693 dev->dev_addr[3] = (lo >> 16) & 0xff;
16694 dev->dev_addr[4] = (lo >> 8) & 0xff;
16695 dev->dev_addr[5] = (lo >> 0) & 0xff;
16696
16697 /* Some old bootcode may report a 0 MAC address in SRAM */
16698 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16699 }
16700 if (!addr_ok) {
16701 /* Next, try NVRAM. */
16702 if (!tg3_flag(tp, NO_NVRAM) &&
16703 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16704 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16705 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16706 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16707 }
16708 /* Finally just fetch it out of the MAC control regs. */
16709 else {
16710 hi = tr32(MAC_ADDR_0_HIGH);
16711 lo = tr32(MAC_ADDR_0_LOW);
16712
16713 dev->dev_addr[5] = lo & 0xff;
16714 dev->dev_addr[4] = (lo >> 8) & 0xff;
16715 dev->dev_addr[3] = (lo >> 16) & 0xff;
16716 dev->dev_addr[2] = (lo >> 24) & 0xff;
16717 dev->dev_addr[1] = hi & 0xff;
16718 dev->dev_addr[0] = (hi >> 8) & 0xff;
16719 }
16720 }
16721
16722 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16723 #ifdef CONFIG_SPARC
16724 if (!tg3_get_default_macaddr_sparc(tp))
16725 return 0;
16726 #endif
16727 return -EINVAL;
16728 }
16729 return 0;
16730 }
16731
16732 #define BOUNDARY_SINGLE_CACHELINE 1
16733 #define BOUNDARY_MULTI_CACHELINE 2
16734
16735 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16736 {
16737 int cacheline_size;
16738 u8 byte;
16739 int goal;
16740
16741 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16742 if (byte == 0)
16743 cacheline_size = 1024;
16744 else
16745 cacheline_size = (int) byte * 4;
16746
16747 /* On 5703 and later chips, the boundary bits have no
16748 * effect.
16749 */
16750 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16751 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16752 !tg3_flag(tp, PCI_EXPRESS))
16753 goto out;
16754
16755 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16756 goal = BOUNDARY_MULTI_CACHELINE;
16757 #else
16758 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16759 goal = BOUNDARY_SINGLE_CACHELINE;
16760 #else
16761 goal = 0;
16762 #endif
16763 #endif
16764
16765 if (tg3_flag(tp, 57765_PLUS)) {
16766 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16767 goto out;
16768 }
16769
16770 if (!goal)
16771 goto out;
16772
16773 /* PCI controllers on most RISC systems tend to disconnect
16774 * when a device tries to burst across a cache-line boundary.
16775 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16776 *
16777 * Unfortunately, for PCI-E there are only limited
16778 * write-side controls for this, and thus for reads
16779 * we will still get the disconnects. We'll also waste
16780 * these PCI cycles for both read and write for chips
16781 * other than 5700 and 5701 which do not implement the
16782 * boundary bits.
16783 */
16784 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16785 switch (cacheline_size) {
16786 case 16:
16787 case 32:
16788 case 64:
16789 case 128:
16790 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16791 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16792 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16793 } else {
16794 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16795 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16796 }
16797 break;
16798
16799 case 256:
16800 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16801 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16802 break;
16803
16804 default:
16805 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16806 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16807 break;
16808 }
16809 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16810 switch (cacheline_size) {
16811 case 16:
16812 case 32:
16813 case 64:
16814 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16815 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16816 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16817 break;
16818 }
16819 /* fallthrough */
16820 case 128:
16821 default:
16822 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16823 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16824 break;
16825 }
16826 } else {
16827 switch (cacheline_size) {
16828 case 16:
16829 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16830 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16831 DMA_RWCTRL_WRITE_BNDRY_16);
16832 break;
16833 }
16834 /* fallthrough */
16835 case 32:
16836 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16837 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16838 DMA_RWCTRL_WRITE_BNDRY_32);
16839 break;
16840 }
16841 /* fallthrough */
16842 case 64:
16843 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16844 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16845 DMA_RWCTRL_WRITE_BNDRY_64);
16846 break;
16847 }
16848 /* fallthrough */
16849 case 128:
16850 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16851 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16852 DMA_RWCTRL_WRITE_BNDRY_128);
16853 break;
16854 }
16855 /* fallthrough */
16856 case 256:
16857 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16858 DMA_RWCTRL_WRITE_BNDRY_256);
16859 break;
16860 case 512:
16861 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16862 DMA_RWCTRL_WRITE_BNDRY_512);
16863 break;
16864 case 1024:
16865 default:
16866 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16867 DMA_RWCTRL_WRITE_BNDRY_1024);
16868 break;
16869 }
16870 }
16871
16872 out:
16873 return val;
16874 }
16875
16876 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16877 int size, bool to_device)
16878 {
16879 struct tg3_internal_buffer_desc test_desc;
16880 u32 sram_dma_descs;
16881 int i, ret;
16882
16883 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16884
16885 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16886 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16887 tw32(RDMAC_STATUS, 0);
16888 tw32(WDMAC_STATUS, 0);
16889
16890 tw32(BUFMGR_MODE, 0);
16891 tw32(FTQ_RESET, 0);
16892
16893 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16894 test_desc.addr_lo = buf_dma & 0xffffffff;
16895 test_desc.nic_mbuf = 0x00002100;
16896 test_desc.len = size;
16897
16898 /*
16899 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16900 * the *second* time the tg3 driver was getting loaded after an
16901 * initial scan.
16902 *
16903 * Broadcom tells me:
16904 * ...the DMA engine is connected to the GRC block and a DMA
16905 * reset may affect the GRC block in some unpredictable way...
16906 * The behavior of resets to individual blocks has not been tested.
16907 *
16908 * Broadcom noted the GRC reset will also reset all sub-components.
16909 */
16910 if (to_device) {
16911 test_desc.cqid_sqid = (13 << 8) | 2;
16912
16913 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16914 udelay(40);
16915 } else {
16916 test_desc.cqid_sqid = (16 << 8) | 7;
16917
16918 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16919 udelay(40);
16920 }
16921 test_desc.flags = 0x00000005;
16922
16923 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16924 u32 val;
16925
16926 val = *(((u32 *)&test_desc) + i);
16927 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16928 sram_dma_descs + (i * sizeof(u32)));
16929 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16930 }
16931 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16932
16933 if (to_device)
16934 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16935 else
16936 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16937
16938 ret = -ENODEV;
16939 for (i = 0; i < 40; i++) {
16940 u32 val;
16941
16942 if (to_device)
16943 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16944 else
16945 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16946 if ((val & 0xffff) == sram_dma_descs) {
16947 ret = 0;
16948 break;
16949 }
16950
16951 udelay(100);
16952 }
16953
16954 return ret;
16955 }
16956
16957 #define TEST_BUFFER_SIZE 0x2000
16958
16959 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16960 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16961 { },
16962 };
16963
16964 static int tg3_test_dma(struct tg3 *tp)
16965 {
16966 dma_addr_t buf_dma;
16967 u32 *buf, saved_dma_rwctrl;
16968 int ret = 0;
16969
16970 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16971 &buf_dma, GFP_KERNEL);
16972 if (!buf) {
16973 ret = -ENOMEM;
16974 goto out_nofree;
16975 }
16976
16977 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16978 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16979
16980 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16981
16982 if (tg3_flag(tp, 57765_PLUS))
16983 goto out;
16984
16985 if (tg3_flag(tp, PCI_EXPRESS)) {
16986 /* DMA read watermark not used on PCIE */
16987 tp->dma_rwctrl |= 0x00180000;
16988 } else if (!tg3_flag(tp, PCIX_MODE)) {
16989 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16990 tg3_asic_rev(tp) == ASIC_REV_5750)
16991 tp->dma_rwctrl |= 0x003f0000;
16992 else
16993 tp->dma_rwctrl |= 0x003f000f;
16994 } else {
16995 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16996 tg3_asic_rev(tp) == ASIC_REV_5704) {
16997 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16998 u32 read_water = 0x7;
16999
17000 /* If the 5704 is behind the EPB bridge, we can
17001 * do the less restrictive ONE_DMA workaround for
17002 * better performance.
17003 */
17004 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17005 tg3_asic_rev(tp) == ASIC_REV_5704)
17006 tp->dma_rwctrl |= 0x8000;
17007 else if (ccval == 0x6 || ccval == 0x7)
17008 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17009
17010 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17011 read_water = 4;
17012 /* Set bit 23 to enable PCIX hw bug fix */
17013 tp->dma_rwctrl |=
17014 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17015 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17016 (1 << 23);
17017 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17018 /* 5780 always in PCIX mode */
17019 tp->dma_rwctrl |= 0x00144000;
17020 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17021 /* 5714 always in PCIX mode */
17022 tp->dma_rwctrl |= 0x00148000;
17023 } else {
17024 tp->dma_rwctrl |= 0x001b000f;
17025 }
17026 }
17027 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17028 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17029
17030 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17031 tg3_asic_rev(tp) == ASIC_REV_5704)
17032 tp->dma_rwctrl &= 0xfffffff0;
17033
17034 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17035 tg3_asic_rev(tp) == ASIC_REV_5701) {
17036 /* Remove this if it causes problems for some boards. */
17037 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17038
17039 /* On 5700/5701 chips, we need to set this bit.
17040 * Otherwise the chip will issue cacheline transactions
17041 * to streamable DMA memory with not all the byte
17042 * enables turned on. This is an error on several
17043 * RISC PCI controllers, in particular sparc64.
17044 *
17045 * On 5703/5704 chips, this bit has been reassigned
17046 * a different meaning. In particular, it is used
17047 * on those chips to enable a PCI-X workaround.
17048 */
17049 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17050 }
17051
17052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17053
17054 #if 0
17055 /* Unneeded, already done by tg3_get_invariants. */
17056 tg3_switch_clocks(tp);
17057 #endif
17058
17059 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17060 tg3_asic_rev(tp) != ASIC_REV_5701)
17061 goto out;
17062
17063 /* It is best to perform DMA test with maximum write burst size
17064 * to expose the 5700/5701 write DMA bug.
17065 */
17066 saved_dma_rwctrl = tp->dma_rwctrl;
17067 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17068 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17069
17070 while (1) {
17071 u32 *p = buf, i;
17072
17073 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17074 p[i] = i;
17075
17076 /* Send the buffer to the chip. */
17077 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17078 if (ret) {
17079 dev_err(&tp->pdev->dev,
17080 "%s: Buffer write failed. err = %d\n",
17081 __func__, ret);
17082 break;
17083 }
17084
17085 #if 0
17086 /* validate data reached card RAM correctly. */
17087 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17088 u32 val;
17089 tg3_read_mem(tp, 0x2100 + (i*4), &val);
17090 if (le32_to_cpu(val) != p[i]) {
17091 dev_err(&tp->pdev->dev,
17092 "%s: Buffer corrupted on device! "
17093 "(%d != %d)\n", __func__, val, i);
17094 /* ret = -ENODEV here? */
17095 }
17096 p[i] = 0;
17097 }
17098 #endif
17099 /* Now read it back. */
17100 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17101 if (ret) {
17102 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17103 "err = %d\n", __func__, ret);
17104 break;
17105 }
17106
17107 /* Verify it. */
17108 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17109 if (p[i] == i)
17110 continue;
17111
17112 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17113 DMA_RWCTRL_WRITE_BNDRY_16) {
17114 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17115 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17116 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17117 break;
17118 } else {
17119 dev_err(&tp->pdev->dev,
17120 "%s: Buffer corrupted on read back! "
17121 "(%d != %d)\n", __func__, p[i], i);
17122 ret = -ENODEV;
17123 goto out;
17124 }
17125 }
17126
17127 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17128 /* Success. */
17129 ret = 0;
17130 break;
17131 }
17132 }
17133 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17134 DMA_RWCTRL_WRITE_BNDRY_16) {
17135 /* DMA test passed without adjusting DMA boundary,
17136 * now look for chipsets that are known to expose the
17137 * DMA bug without failing the test.
17138 */
17139 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17140 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17141 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17142 } else {
17143 /* Safe to use the calculated DMA boundary. */
17144 tp->dma_rwctrl = saved_dma_rwctrl;
17145 }
17146
17147 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17148 }
17149
17150 out:
17151 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17152 out_nofree:
17153 return ret;
17154 }
17155
17156 static void tg3_init_bufmgr_config(struct tg3 *tp)
17157 {
17158 if (tg3_flag(tp, 57765_PLUS)) {
17159 tp->bufmgr_config.mbuf_read_dma_low_water =
17160 DEFAULT_MB_RDMA_LOW_WATER_5705;
17161 tp->bufmgr_config.mbuf_mac_rx_low_water =
17162 DEFAULT_MB_MACRX_LOW_WATER_57765;
17163 tp->bufmgr_config.mbuf_high_water =
17164 DEFAULT_MB_HIGH_WATER_57765;
17165
17166 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17167 DEFAULT_MB_RDMA_LOW_WATER_5705;
17168 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17169 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17170 tp->bufmgr_config.mbuf_high_water_jumbo =
17171 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17172 } else if (tg3_flag(tp, 5705_PLUS)) {
17173 tp->bufmgr_config.mbuf_read_dma_low_water =
17174 DEFAULT_MB_RDMA_LOW_WATER_5705;
17175 tp->bufmgr_config.mbuf_mac_rx_low_water =
17176 DEFAULT_MB_MACRX_LOW_WATER_5705;
17177 tp->bufmgr_config.mbuf_high_water =
17178 DEFAULT_MB_HIGH_WATER_5705;
17179 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17180 tp->bufmgr_config.mbuf_mac_rx_low_water =
17181 DEFAULT_MB_MACRX_LOW_WATER_5906;
17182 tp->bufmgr_config.mbuf_high_water =
17183 DEFAULT_MB_HIGH_WATER_5906;
17184 }
17185
17186 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17187 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17188 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17189 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17190 tp->bufmgr_config.mbuf_high_water_jumbo =
17191 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17192 } else {
17193 tp->bufmgr_config.mbuf_read_dma_low_water =
17194 DEFAULT_MB_RDMA_LOW_WATER;
17195 tp->bufmgr_config.mbuf_mac_rx_low_water =
17196 DEFAULT_MB_MACRX_LOW_WATER;
17197 tp->bufmgr_config.mbuf_high_water =
17198 DEFAULT_MB_HIGH_WATER;
17199
17200 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17201 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17202 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17203 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17204 tp->bufmgr_config.mbuf_high_water_jumbo =
17205 DEFAULT_MB_HIGH_WATER_JUMBO;
17206 }
17207
17208 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17209 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17210 }
17211
17212 static char *tg3_phy_string(struct tg3 *tp)
17213 {
17214 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17215 case TG3_PHY_ID_BCM5400: return "5400";
17216 case TG3_PHY_ID_BCM5401: return "5401";
17217 case TG3_PHY_ID_BCM5411: return "5411";
17218 case TG3_PHY_ID_BCM5701: return "5701";
17219 case TG3_PHY_ID_BCM5703: return "5703";
17220 case TG3_PHY_ID_BCM5704: return "5704";
17221 case TG3_PHY_ID_BCM5705: return "5705";
17222 case TG3_PHY_ID_BCM5750: return "5750";
17223 case TG3_PHY_ID_BCM5752: return "5752";
17224 case TG3_PHY_ID_BCM5714: return "5714";
17225 case TG3_PHY_ID_BCM5780: return "5780";
17226 case TG3_PHY_ID_BCM5755: return "5755";
17227 case TG3_PHY_ID_BCM5787: return "5787";
17228 case TG3_PHY_ID_BCM5784: return "5784";
17229 case TG3_PHY_ID_BCM5756: return "5722/5756";
17230 case TG3_PHY_ID_BCM5906: return "5906";
17231 case TG3_PHY_ID_BCM5761: return "5761";
17232 case TG3_PHY_ID_BCM5718C: return "5718C";
17233 case TG3_PHY_ID_BCM5718S: return "5718S";
17234 case TG3_PHY_ID_BCM57765: return "57765";
17235 case TG3_PHY_ID_BCM5719C: return "5719C";
17236 case TG3_PHY_ID_BCM5720C: return "5720C";
17237 case TG3_PHY_ID_BCM5762: return "5762C";
17238 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17239 case 0: return "serdes";
17240 default: return "unknown";
17241 }
17242 }
17243
17244 static char *tg3_bus_string(struct tg3 *tp, char *str)
17245 {
17246 if (tg3_flag(tp, PCI_EXPRESS)) {
17247 strcpy(str, "PCI Express");
17248 return str;
17249 } else if (tg3_flag(tp, PCIX_MODE)) {
17250 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17251
17252 strcpy(str, "PCIX:");
17253
17254 if ((clock_ctrl == 7) ||
17255 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17256 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17257 strcat(str, "133MHz");
17258 else if (clock_ctrl == 0)
17259 strcat(str, "33MHz");
17260 else if (clock_ctrl == 2)
17261 strcat(str, "50MHz");
17262 else if (clock_ctrl == 4)
17263 strcat(str, "66MHz");
17264 else if (clock_ctrl == 6)
17265 strcat(str, "100MHz");
17266 } else {
17267 strcpy(str, "PCI:");
17268 if (tg3_flag(tp, PCI_HIGH_SPEED))
17269 strcat(str, "66MHz");
17270 else
17271 strcat(str, "33MHz");
17272 }
17273 if (tg3_flag(tp, PCI_32BIT))
17274 strcat(str, ":32-bit");
17275 else
17276 strcat(str, ":64-bit");
17277 return str;
17278 }
17279
17280 static void tg3_init_coal(struct tg3 *tp)
17281 {
17282 struct ethtool_coalesce *ec = &tp->coal;
17283
17284 memset(ec, 0, sizeof(*ec));
17285 ec->cmd = ETHTOOL_GCOALESCE;
17286 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17287 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17288 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17289 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17290 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17291 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17292 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17293 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17294 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17295
17296 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17297 HOSTCC_MODE_CLRTICK_TXBD)) {
17298 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17299 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17300 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17301 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17302 }
17303
17304 if (tg3_flag(tp, 5705_PLUS)) {
17305 ec->rx_coalesce_usecs_irq = 0;
17306 ec->tx_coalesce_usecs_irq = 0;
17307 ec->stats_block_coalesce_usecs = 0;
17308 }
17309 }
17310
17311 static int tg3_init_one(struct pci_dev *pdev,
17312 const struct pci_device_id *ent)
17313 {
17314 struct net_device *dev;
17315 struct tg3 *tp;
17316 int i, err;
17317 u32 sndmbx, rcvmbx, intmbx;
17318 char str[40];
17319 u64 dma_mask, persist_dma_mask;
17320 netdev_features_t features = 0;
17321
17322 printk_once(KERN_INFO "%s\n", version);
17323
17324 err = pci_enable_device(pdev);
17325 if (err) {
17326 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17327 return err;
17328 }
17329
17330 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17331 if (err) {
17332 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17333 goto err_out_disable_pdev;
17334 }
17335
17336 pci_set_master(pdev);
17337
17338 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17339 if (!dev) {
17340 err = -ENOMEM;
17341 goto err_out_free_res;
17342 }
17343
17344 SET_NETDEV_DEV(dev, &pdev->dev);
17345
17346 tp = netdev_priv(dev);
17347 tp->pdev = pdev;
17348 tp->dev = dev;
17349 tp->pm_cap = pdev->pm_cap;
17350 tp->rx_mode = TG3_DEF_RX_MODE;
17351 tp->tx_mode = TG3_DEF_TX_MODE;
17352 tp->irq_sync = 1;
17353
17354 if (tg3_debug > 0)
17355 tp->msg_enable = tg3_debug;
17356 else
17357 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17358
17359 if (pdev_is_ssb_gige_core(pdev)) {
17360 tg3_flag_set(tp, IS_SSB_CORE);
17361 if (ssb_gige_must_flush_posted_writes(pdev))
17362 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17363 if (ssb_gige_one_dma_at_once(pdev))
17364 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17365 if (ssb_gige_have_roboswitch(pdev))
17366 tg3_flag_set(tp, ROBOSWITCH);
17367 if (ssb_gige_is_rgmii(pdev))
17368 tg3_flag_set(tp, RGMII_MODE);
17369 }
17370
17371 /* The word/byte swap controls here control register access byte
17372 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17373 * setting below.
17374 */
17375 tp->misc_host_ctrl =
17376 MISC_HOST_CTRL_MASK_PCI_INT |
17377 MISC_HOST_CTRL_WORD_SWAP |
17378 MISC_HOST_CTRL_INDIR_ACCESS |
17379 MISC_HOST_CTRL_PCISTATE_RW;
17380
17381 /* The NONFRM (non-frame) byte/word swap controls take effect
17382 * on descriptor entries, anything which isn't packet data.
17383 *
17384 * The StrongARM chips on the board (one for tx, one for rx)
17385 * are running in big-endian mode.
17386 */
17387 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17388 GRC_MODE_WSWAP_NONFRM_DATA);
17389 #ifdef __BIG_ENDIAN
17390 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17391 #endif
17392 spin_lock_init(&tp->lock);
17393 spin_lock_init(&tp->indirect_lock);
17394 INIT_WORK(&tp->reset_task, tg3_reset_task);
17395
17396 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17397 if (!tp->regs) {
17398 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17399 err = -ENOMEM;
17400 goto err_out_free_dev;
17401 }
17402
17403 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17404 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17415 tg3_flag_set(tp, ENABLE_APE);
17416 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17417 if (!tp->aperegs) {
17418 dev_err(&pdev->dev,
17419 "Cannot map APE registers, aborting\n");
17420 err = -ENOMEM;
17421 goto err_out_iounmap;
17422 }
17423 }
17424
17425 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17426 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17427
17428 dev->ethtool_ops = &tg3_ethtool_ops;
17429 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17430 dev->netdev_ops = &tg3_netdev_ops;
17431 dev->irq = pdev->irq;
17432
17433 err = tg3_get_invariants(tp, ent);
17434 if (err) {
17435 dev_err(&pdev->dev,
17436 "Problem fetching invariants of chip, aborting\n");
17437 goto err_out_apeunmap;
17438 }
17439
17440 /* The EPB bridge inside 5714, 5715, and 5780 and any
17441 * device behind the EPB cannot support DMA addresses > 40-bit.
17442 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17443 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17444 * do DMA address check in tg3_start_xmit().
17445 */
17446 if (tg3_flag(tp, IS_5788))
17447 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17448 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17449 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17450 #ifdef CONFIG_HIGHMEM
17451 dma_mask = DMA_BIT_MASK(64);
17452 #endif
17453 } else
17454 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17455
17456 /* Configure DMA attributes. */
17457 if (dma_mask > DMA_BIT_MASK(32)) {
17458 err = pci_set_dma_mask(pdev, dma_mask);
17459 if (!err) {
17460 features |= NETIF_F_HIGHDMA;
17461 err = pci_set_consistent_dma_mask(pdev,
17462 persist_dma_mask);
17463 if (err < 0) {
17464 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17465 "DMA for consistent allocations\n");
17466 goto err_out_apeunmap;
17467 }
17468 }
17469 }
17470 if (err || dma_mask == DMA_BIT_MASK(32)) {
17471 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17472 if (err) {
17473 dev_err(&pdev->dev,
17474 "No usable DMA configuration, aborting\n");
17475 goto err_out_apeunmap;
17476 }
17477 }
17478
17479 tg3_init_bufmgr_config(tp);
17480
17481 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17482
17483 /* 5700 B0 chips do not support checksumming correctly due
17484 * to hardware bugs.
17485 */
17486 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17487 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17488
17489 if (tg3_flag(tp, 5755_PLUS))
17490 features |= NETIF_F_IPV6_CSUM;
17491 }
17492
17493 /* TSO is on by default on chips that support hardware TSO.
17494 * Firmware TSO on older chips gives lower performance, so it
17495 * is off by default, but can be enabled using ethtool.
17496 */
17497 if ((tg3_flag(tp, HW_TSO_1) ||
17498 tg3_flag(tp, HW_TSO_2) ||
17499 tg3_flag(tp, HW_TSO_3)) &&
17500 (features & NETIF_F_IP_CSUM))
17501 features |= NETIF_F_TSO;
17502 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17503 if (features & NETIF_F_IPV6_CSUM)
17504 features |= NETIF_F_TSO6;
17505 if (tg3_flag(tp, HW_TSO_3) ||
17506 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17507 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17508 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17509 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17510 tg3_asic_rev(tp) == ASIC_REV_57780)
17511 features |= NETIF_F_TSO_ECN;
17512 }
17513
17514 dev->features |= features;
17515 dev->vlan_features |= features;
17516
17517 /*
17518 * Add loopback capability only for a subset of devices that support
17519 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17520 * loopback for the remaining devices.
17521 */
17522 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17523 !tg3_flag(tp, CPMU_PRESENT))
17524 /* Add the loopback capability */
17525 features |= NETIF_F_LOOPBACK;
17526
17527 dev->hw_features |= features;
17528
17529 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17530 !tg3_flag(tp, TSO_CAPABLE) &&
17531 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17532 tg3_flag_set(tp, MAX_RXPEND_64);
17533 tp->rx_pending = 63;
17534 }
17535
17536 err = tg3_get_device_address(tp);
17537 if (err) {
17538 dev_err(&pdev->dev,
17539 "Could not obtain valid ethernet address, aborting\n");
17540 goto err_out_apeunmap;
17541 }
17542
17543 /*
17544 * Reset chip in case UNDI or EFI driver did not shutdown
17545 * DMA self test will enable WDMAC and we'll see (spurious)
17546 * pending DMA on the PCI bus at that point.
17547 */
17548 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17549 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17550 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17551 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17552 }
17553
17554 err = tg3_test_dma(tp);
17555 if (err) {
17556 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17557 goto err_out_apeunmap;
17558 }
17559
17560 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17561 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17562 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17563 for (i = 0; i < tp->irq_max; i++) {
17564 struct tg3_napi *tnapi = &tp->napi[i];
17565
17566 tnapi->tp = tp;
17567 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17568
17569 tnapi->int_mbox = intmbx;
17570 if (i <= 4)
17571 intmbx += 0x8;
17572 else
17573 intmbx += 0x4;
17574
17575 tnapi->consmbox = rcvmbx;
17576 tnapi->prodmbox = sndmbx;
17577
17578 if (i)
17579 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17580 else
17581 tnapi->coal_now = HOSTCC_MODE_NOW;
17582
17583 if (!tg3_flag(tp, SUPPORT_MSIX))
17584 break;
17585
17586 /*
17587 * If we support MSIX, we'll be using RSS. If we're using
17588 * RSS, the first vector only handles link interrupts and the
17589 * remaining vectors handle rx and tx interrupts. Reuse the
17590 * mailbox values for the next iteration. The values we setup
17591 * above are still useful for the single vectored mode.
17592 */
17593 if (!i)
17594 continue;
17595
17596 rcvmbx += 0x8;
17597
17598 if (sndmbx & 0x4)
17599 sndmbx -= 0x4;
17600 else
17601 sndmbx += 0xc;
17602 }
17603
17604 tg3_init_coal(tp);
17605
17606 pci_set_drvdata(pdev, dev);
17607
17608 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17609 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17610 tg3_asic_rev(tp) == ASIC_REV_5762)
17611 tg3_flag_set(tp, PTP_CAPABLE);
17612
17613 tg3_timer_init(tp);
17614
17615 tg3_carrier_off(tp);
17616
17617 err = register_netdev(dev);
17618 if (err) {
17619 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17620 goto err_out_apeunmap;
17621 }
17622
17623 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17624 tp->board_part_number,
17625 tg3_chip_rev_id(tp),
17626 tg3_bus_string(tp, str),
17627 dev->dev_addr);
17628
17629 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17630 struct phy_device *phydev;
17631 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17632 netdev_info(dev,
17633 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17634 phydev->drv->name, dev_name(&phydev->dev));
17635 } else {
17636 char *ethtype;
17637
17638 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17639 ethtype = "10/100Base-TX";
17640 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17641 ethtype = "1000Base-SX";
17642 else
17643 ethtype = "10/100/1000Base-T";
17644
17645 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17646 "(WireSpeed[%d], EEE[%d])\n",
17647 tg3_phy_string(tp), ethtype,
17648 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17649 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17650 }
17651
17652 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17653 (dev->features & NETIF_F_RXCSUM) != 0,
17654 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17655 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17656 tg3_flag(tp, ENABLE_ASF) != 0,
17657 tg3_flag(tp, TSO_CAPABLE) != 0);
17658 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17659 tp->dma_rwctrl,
17660 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17661 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17662
17663 pci_save_state(pdev);
17664
17665 return 0;
17666
17667 err_out_apeunmap:
17668 if (tp->aperegs) {
17669 iounmap(tp->aperegs);
17670 tp->aperegs = NULL;
17671 }
17672
17673 err_out_iounmap:
17674 if (tp->regs) {
17675 iounmap(tp->regs);
17676 tp->regs = NULL;
17677 }
17678
17679 err_out_free_dev:
17680 free_netdev(dev);
17681
17682 err_out_free_res:
17683 pci_release_regions(pdev);
17684
17685 err_out_disable_pdev:
17686 if (pci_is_enabled(pdev))
17687 pci_disable_device(pdev);
17688 pci_set_drvdata(pdev, NULL);
17689 return err;
17690 }
17691
17692 static void tg3_remove_one(struct pci_dev *pdev)
17693 {
17694 struct net_device *dev = pci_get_drvdata(pdev);
17695
17696 if (dev) {
17697 struct tg3 *tp = netdev_priv(dev);
17698
17699 release_firmware(tp->fw);
17700
17701 tg3_reset_task_cancel(tp);
17702
17703 if (tg3_flag(tp, USE_PHYLIB)) {
17704 tg3_phy_fini(tp);
17705 tg3_mdio_fini(tp);
17706 }
17707
17708 unregister_netdev(dev);
17709 if (tp->aperegs) {
17710 iounmap(tp->aperegs);
17711 tp->aperegs = NULL;
17712 }
17713 if (tp->regs) {
17714 iounmap(tp->regs);
17715 tp->regs = NULL;
17716 }
17717 free_netdev(dev);
17718 pci_release_regions(pdev);
17719 pci_disable_device(pdev);
17720 pci_set_drvdata(pdev, NULL);
17721 }
17722 }
17723
17724 #ifdef CONFIG_PM_SLEEP
17725 static int tg3_suspend(struct device *device)
17726 {
17727 struct pci_dev *pdev = to_pci_dev(device);
17728 struct net_device *dev = pci_get_drvdata(pdev);
17729 struct tg3 *tp = netdev_priv(dev);
17730 int err;
17731
17732 if (!netif_running(dev))
17733 return 0;
17734
17735 tg3_reset_task_cancel(tp);
17736 tg3_phy_stop(tp);
17737 tg3_netif_stop(tp);
17738
17739 tg3_timer_stop(tp);
17740
17741 tg3_full_lock(tp, 1);
17742 tg3_disable_ints(tp);
17743 tg3_full_unlock(tp);
17744
17745 netif_device_detach(dev);
17746
17747 tg3_full_lock(tp, 0);
17748 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17749 tg3_flag_clear(tp, INIT_COMPLETE);
17750 tg3_full_unlock(tp);
17751
17752 err = tg3_power_down_prepare(tp);
17753 if (err) {
17754 int err2;
17755
17756 tg3_full_lock(tp, 0);
17757
17758 tg3_flag_set(tp, INIT_COMPLETE);
17759 err2 = tg3_restart_hw(tp, true);
17760 if (err2)
17761 goto out;
17762
17763 tg3_timer_start(tp);
17764
17765 netif_device_attach(dev);
17766 tg3_netif_start(tp);
17767
17768 out:
17769 tg3_full_unlock(tp);
17770
17771 if (!err2)
17772 tg3_phy_start(tp);
17773 }
17774
17775 return err;
17776 }
17777
17778 static int tg3_resume(struct device *device)
17779 {
17780 struct pci_dev *pdev = to_pci_dev(device);
17781 struct net_device *dev = pci_get_drvdata(pdev);
17782 struct tg3 *tp = netdev_priv(dev);
17783 int err;
17784
17785 if (!netif_running(dev))
17786 return 0;
17787
17788 netif_device_attach(dev);
17789
17790 tg3_full_lock(tp, 0);
17791
17792 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17793
17794 tg3_flag_set(tp, INIT_COMPLETE);
17795 err = tg3_restart_hw(tp,
17796 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17797 if (err)
17798 goto out;
17799
17800 tg3_timer_start(tp);
17801
17802 tg3_netif_start(tp);
17803
17804 out:
17805 tg3_full_unlock(tp);
17806
17807 if (!err)
17808 tg3_phy_start(tp);
17809
17810 return err;
17811 }
17812 #endif /* CONFIG_PM_SLEEP */
17813
17814 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17815
17816 static void tg3_shutdown(struct pci_dev *pdev)
17817 {
17818 struct net_device *dev = pci_get_drvdata(pdev);
17819 struct tg3 *tp = netdev_priv(dev);
17820
17821 rtnl_lock();
17822 netif_device_detach(dev);
17823
17824 if (netif_running(dev))
17825 dev_close(dev);
17826
17827 if (system_state == SYSTEM_POWER_OFF)
17828 tg3_power_down(tp);
17829
17830 rtnl_unlock();
17831 }
17832
17833 /**
17834 * tg3_io_error_detected - called when PCI error is detected
17835 * @pdev: Pointer to PCI device
17836 * @state: The current pci connection state
17837 *
17838 * This function is called after a PCI bus error affecting
17839 * this device has been detected.
17840 */
17841 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17842 pci_channel_state_t state)
17843 {
17844 struct net_device *netdev = pci_get_drvdata(pdev);
17845 struct tg3 *tp = netdev_priv(netdev);
17846 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17847
17848 netdev_info(netdev, "PCI I/O error detected\n");
17849
17850 rtnl_lock();
17851
17852 /* We probably don't have netdev yet */
17853 if (!netdev || !netif_running(netdev))
17854 goto done;
17855
17856 tg3_phy_stop(tp);
17857
17858 tg3_netif_stop(tp);
17859
17860 tg3_timer_stop(tp);
17861
17862 /* Want to make sure that the reset task doesn't run */
17863 tg3_reset_task_cancel(tp);
17864
17865 netif_device_detach(netdev);
17866
17867 /* Clean up software state, even if MMIO is blocked */
17868 tg3_full_lock(tp, 0);
17869 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17870 tg3_full_unlock(tp);
17871
17872 done:
17873 if (state == pci_channel_io_perm_failure) {
17874 if (netdev) {
17875 tg3_napi_enable(tp);
17876 dev_close(netdev);
17877 }
17878 err = PCI_ERS_RESULT_DISCONNECT;
17879 } else {
17880 pci_disable_device(pdev);
17881 }
17882
17883 rtnl_unlock();
17884
17885 return err;
17886 }
17887
17888 /**
17889 * tg3_io_slot_reset - called after the pci bus has been reset.
17890 * @pdev: Pointer to PCI device
17891 *
17892 * Restart the card from scratch, as if from a cold-boot.
17893 * At this point, the card has exprienced a hard reset,
17894 * followed by fixups by BIOS, and has its config space
17895 * set up identically to what it was at cold boot.
17896 */
17897 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17898 {
17899 struct net_device *netdev = pci_get_drvdata(pdev);
17900 struct tg3 *tp = netdev_priv(netdev);
17901 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17902 int err;
17903
17904 rtnl_lock();
17905
17906 if (pci_enable_device(pdev)) {
17907 dev_err(&pdev->dev,
17908 "Cannot re-enable PCI device after reset.\n");
17909 goto done;
17910 }
17911
17912 pci_set_master(pdev);
17913 pci_restore_state(pdev);
17914 pci_save_state(pdev);
17915
17916 if (!netdev || !netif_running(netdev)) {
17917 rc = PCI_ERS_RESULT_RECOVERED;
17918 goto done;
17919 }
17920
17921 err = tg3_power_up(tp);
17922 if (err)
17923 goto done;
17924
17925 rc = PCI_ERS_RESULT_RECOVERED;
17926
17927 done:
17928 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17929 tg3_napi_enable(tp);
17930 dev_close(netdev);
17931 }
17932 rtnl_unlock();
17933
17934 return rc;
17935 }
17936
17937 /**
17938 * tg3_io_resume - called when traffic can start flowing again.
17939 * @pdev: Pointer to PCI device
17940 *
17941 * This callback is called when the error recovery driver tells
17942 * us that its OK to resume normal operation.
17943 */
17944 static void tg3_io_resume(struct pci_dev *pdev)
17945 {
17946 struct net_device *netdev = pci_get_drvdata(pdev);
17947 struct tg3 *tp = netdev_priv(netdev);
17948 int err;
17949
17950 rtnl_lock();
17951
17952 if (!netif_running(netdev))
17953 goto done;
17954
17955 tg3_full_lock(tp, 0);
17956 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17957 tg3_flag_set(tp, INIT_COMPLETE);
17958 err = tg3_restart_hw(tp, true);
17959 if (err) {
17960 tg3_full_unlock(tp);
17961 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17962 goto done;
17963 }
17964
17965 netif_device_attach(netdev);
17966
17967 tg3_timer_start(tp);
17968
17969 tg3_netif_start(tp);
17970
17971 tg3_full_unlock(tp);
17972
17973 tg3_phy_start(tp);
17974
17975 done:
17976 rtnl_unlock();
17977 }
17978
17979 static const struct pci_error_handlers tg3_err_handler = {
17980 .error_detected = tg3_io_error_detected,
17981 .slot_reset = tg3_io_slot_reset,
17982 .resume = tg3_io_resume
17983 };
17984
17985 static struct pci_driver tg3_driver = {
17986 .name = DRV_MODULE_NAME,
17987 .id_table = tg3_pci_tbl,
17988 .probe = tg3_init_one,
17989 .remove = tg3_remove_one,
17990 .err_handler = &tg3_err_handler,
17991 .driver.pm = &tg3_pm_ops,
17992 .shutdown = tg3_shutdown,
17993 };
17994
17995 module_pci_driver(tg3_driver);