2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218 static char version
[] =
219 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION
);
225 MODULE_FIRMWARE(FIRMWARE_TG3
);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
229 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug
, int, 0);
231 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
256 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
257 TG3_DRV_DATA_FLAG_5705_10_100
},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
259 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
260 TG3_DRV_DATA_FLAG_5705_10_100
},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
271 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
277 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
286 PCI_VENDOR_ID_LENOVO
,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
288 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
291 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
311 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
312 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
314 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
315 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
329 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
331 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
350 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
352 static const struct {
353 const char string
[ETH_GSTRING_LEN
];
354 } ethtool_stats_keys
[] = {
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
387 { "tx_flow_control" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
420 { "rx_threshold_hit" },
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
432 { "mbuf_lwm_thresh_hit" },
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
446 static const struct {
447 const char string
[ETH_GSTRING_LEN
];
448 } ethtool_test_keys
[] = {
449 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
450 [TG3_LINK_TEST
] = { "link test (online) " },
451 [TG3_REGISTER_TEST
] = { "register test (offline)" },
452 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
462 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
464 writel(val
, tp
->regs
+ off
);
467 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
469 return readl(tp
->regs
+ off
);
472 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
474 writel(val
, tp
->aperegs
+ off
);
477 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
479 return readl(tp
->aperegs
+ off
);
482 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
486 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
487 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
489 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
492 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
494 writel(val
, tp
->regs
+ off
);
495 readl(tp
->regs
+ off
);
498 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
503 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
504 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
505 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
506 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
510 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
514 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
515 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
516 TG3_64BIT_REG_LOW
, val
);
519 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
520 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
521 TG3_64BIT_REG_LOW
, val
);
525 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
526 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
528 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
533 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
535 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
536 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
540 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
545 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
546 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
547 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
548 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
559 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
560 /* Non-posted methods */
561 tp
->write32(tp
, off
, val
);
564 tg3_write32(tp
, off
, val
);
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
576 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
578 tp
->write32_mbox(tp
, off
, val
);
579 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
580 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
581 !tg3_flag(tp
, ICH_WORKAROUND
)))
582 tp
->read32_mbox(tp
, off
);
585 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
587 void __iomem
*mbox
= tp
->regs
+ off
;
589 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
591 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
592 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
596 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
598 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
601 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
603 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
617 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
621 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
622 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
625 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
626 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
627 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
634 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
639 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
642 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
646 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
647 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
652 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
653 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
654 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
655 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
661 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
666 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
669 static void tg3_ape_lock_init(struct tg3
*tp
)
674 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
675 regbase
= TG3_APE_LOCK_GRANT
;
677 regbase
= TG3_APE_PER_LOCK_GRANT
;
679 /* Make sure the driver hasn't any stale locks. */
680 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
682 case TG3_APE_LOCK_PHY0
:
683 case TG3_APE_LOCK_PHY1
:
684 case TG3_APE_LOCK_PHY2
:
685 case TG3_APE_LOCK_PHY3
:
686 bit
= APE_LOCK_GRANT_DRIVER
;
690 bit
= APE_LOCK_GRANT_DRIVER
;
692 bit
= 1 << tp
->pci_fn
;
694 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
699 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
703 u32 status
, req
, gnt
, bit
;
705 if (!tg3_flag(tp
, ENABLE_APE
))
709 case TG3_APE_LOCK_GPIO
:
710 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
712 case TG3_APE_LOCK_GRC
:
713 case TG3_APE_LOCK_MEM
:
715 bit
= APE_LOCK_REQ_DRIVER
;
717 bit
= 1 << tp
->pci_fn
;
719 case TG3_APE_LOCK_PHY0
:
720 case TG3_APE_LOCK_PHY1
:
721 case TG3_APE_LOCK_PHY2
:
722 case TG3_APE_LOCK_PHY3
:
723 bit
= APE_LOCK_REQ_DRIVER
;
729 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
730 req
= TG3_APE_LOCK_REQ
;
731 gnt
= TG3_APE_LOCK_GRANT
;
733 req
= TG3_APE_PER_LOCK_REQ
;
734 gnt
= TG3_APE_PER_LOCK_GRANT
;
739 tg3_ape_write32(tp
, req
+ off
, bit
);
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i
= 0; i
< 100; i
++) {
743 status
= tg3_ape_read32(tp
, gnt
+ off
);
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp
, gnt
+ off
, bit
);
758 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
762 if (!tg3_flag(tp
, ENABLE_APE
))
766 case TG3_APE_LOCK_GPIO
:
767 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
769 case TG3_APE_LOCK_GRC
:
770 case TG3_APE_LOCK_MEM
:
772 bit
= APE_LOCK_GRANT_DRIVER
;
774 bit
= 1 << tp
->pci_fn
;
776 case TG3_APE_LOCK_PHY0
:
777 case TG3_APE_LOCK_PHY1
:
778 case TG3_APE_LOCK_PHY2
:
779 case TG3_APE_LOCK_PHY3
:
780 bit
= APE_LOCK_GRANT_DRIVER
;
786 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
787 gnt
= TG3_APE_LOCK_GRANT
;
789 gnt
= TG3_APE_PER_LOCK_GRANT
;
791 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
794 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
799 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
802 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
803 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
806 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
809 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
812 return timeout_us
? 0 : -EBUSY
;
815 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
819 for (i
= 0; i
< timeout_us
/ 10; i
++) {
820 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
822 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
828 return i
== timeout_us
/ 10;
831 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
835 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
837 if (!tg3_flag(tp
, APE_HAS_NCSI
))
840 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
841 if (apedata
!= APE_SEG_SIG_MAGIC
)
844 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
845 if (!(apedata
& APE_FW_STATUS_READY
))
848 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
850 msgoff
= bufoff
+ 2 * sizeof(u32
);
851 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
856 /* Cap xfer sizes to scratchpad limits. */
857 length
= (len
> maxlen
) ? maxlen
: len
;
860 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
861 if (!(apedata
& APE_FW_STATUS_READY
))
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err
= tg3_ape_event_lock(tp
, 1000);
869 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
870 APE_EVENT_STATUS_SCRTCHPD_READ
|
871 APE_EVENT_STATUS_EVENT_PENDING
;
872 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
874 tg3_ape_write32(tp
, bufoff
, base_off
);
875 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
877 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
878 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
882 if (tg3_ape_wait_for_event(tp
, 30000))
885 for (i
= 0; length
; i
+= 4, length
-= 4) {
886 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
887 memcpy(data
, &val
, sizeof(u32
));
895 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
900 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
901 if (apedata
!= APE_SEG_SIG_MAGIC
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
905 if (!(apedata
& APE_FW_STATUS_READY
))
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err
= tg3_ape_event_lock(tp
, 1000);
913 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
914 event
| APE_EVENT_STATUS_EVENT_PENDING
);
916 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
917 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
922 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
927 if (!tg3_flag(tp
, ENABLE_APE
))
931 case RESET_KIND_INIT
:
932 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
933 APE_HOST_SEG_SIG_MAGIC
);
934 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
935 APE_HOST_SEG_LEN_MAGIC
);
936 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
937 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
940 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
941 APE_HOST_BEHAV_NO_PHYLOCK
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
943 TG3_APE_HOST_DRVR_STATE_START
);
945 event
= APE_EVENT_STATUS_STATE_START
;
947 case RESET_KIND_SHUTDOWN
:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
953 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
955 if (device_may_wakeup(&tp
->pdev
->dev
) &&
956 tg3_flag(tp
, WOL_ENABLE
)) {
957 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
958 TG3_APE_HOST_WOL_SPEED_AUTO
);
959 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
961 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
963 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
965 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
967 case RESET_KIND_SUSPEND
:
968 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
974 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
976 tg3_ape_send_event(tp
, event
);
979 static void tg3_disable_ints(struct tg3
*tp
)
983 tw32(TG3PCI_MISC_HOST_CTRL
,
984 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
985 for (i
= 0; i
< tp
->irq_max
; i
++)
986 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
989 static void tg3_enable_ints(struct tg3
*tp
)
996 tw32(TG3PCI_MISC_HOST_CTRL
,
997 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
999 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1000 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1001 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1003 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1004 if (tg3_flag(tp
, 1SHOT_MSI
))
1005 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1007 tp
->coal_now
|= tnapi
->coal_now
;
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1012 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1013 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1015 tw32(HOSTCC_MODE
, tp
->coal_now
);
1017 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1020 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1022 struct tg3
*tp
= tnapi
->tp
;
1023 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1024 unsigned int work_exists
= 0;
1026 /* check for phy events */
1027 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1028 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1032 /* check for TX work to do */
1033 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1036 /* check for RX work to do */
1037 if (tnapi
->rx_rcb_prod_idx
&&
1038 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1049 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1051 struct tg3
*tp
= tnapi
->tp
;
1053 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1060 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1061 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1062 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1065 static void tg3_switch_clocks(struct tg3
*tp
)
1068 u32 orig_clock_ctrl
;
1070 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1073 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1075 orig_clock_ctrl
= clock_ctrl
;
1076 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1077 CLOCK_CTRL_CLKRUN_OENABLE
|
1079 tp
->pci_clock_ctrl
= clock_ctrl
;
1081 if (tg3_flag(tp
, 5705_PLUS
)) {
1082 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1084 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1086 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1089 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1092 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1098 #define PHY_BUSY_LOOPS 5000
1100 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1107 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1109 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1113 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1117 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1118 MI_COM_PHY_ADDR_MASK
);
1119 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1120 MI_COM_REG_ADDR_MASK
);
1121 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1123 tw32_f(MAC_MI_COM
, frame_val
);
1125 loops
= PHY_BUSY_LOOPS
;
1126 while (loops
!= 0) {
1128 frame_val
= tr32(MAC_MI_COM
);
1130 if ((frame_val
& MI_COM_BUSY
) == 0) {
1132 frame_val
= tr32(MAC_MI_COM
);
1140 *val
= frame_val
& MI_COM_DATA_MASK
;
1144 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1145 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1149 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1154 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1156 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1159 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1166 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1167 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1170 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1172 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1176 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1178 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1179 MI_COM_PHY_ADDR_MASK
);
1180 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1181 MI_COM_REG_ADDR_MASK
);
1182 frame_val
|= (val
& MI_COM_DATA_MASK
);
1183 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1185 tw32_f(MAC_MI_COM
, frame_val
);
1187 loops
= PHY_BUSY_LOOPS
;
1188 while (loops
!= 0) {
1190 frame_val
= tr32(MAC_MI_COM
);
1191 if ((frame_val
& MI_COM_BUSY
) == 0) {
1193 frame_val
= tr32(MAC_MI_COM
);
1203 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1204 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1208 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1213 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1215 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1218 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1222 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1226 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1230 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1231 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1235 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1241 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1245 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1254 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1258 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1264 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1268 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1270 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1275 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1279 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1281 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1286 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1290 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1291 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1294 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1299 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1301 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1302 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1304 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1312 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1318 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1320 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1322 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1323 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1328 static int tg3_bmcr_reset(struct tg3
*tp
)
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1336 phy_control
= BMCR_RESET
;
1337 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1343 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1347 if ((phy_control
& BMCR_RESET
) == 0) {
1359 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1361 struct tg3
*tp
= bp
->priv
;
1364 spin_lock_bh(&tp
->lock
);
1366 if (tg3_readphy(tp
, reg
, &val
))
1369 spin_unlock_bh(&tp
->lock
);
1374 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1376 struct tg3
*tp
= bp
->priv
;
1379 spin_lock_bh(&tp
->lock
);
1381 if (tg3_writephy(tp
, reg
, val
))
1384 spin_unlock_bh(&tp
->lock
);
1389 static int tg3_mdio_reset(struct mii_bus
*bp
)
1394 static void tg3_mdio_config_5785(struct tg3
*tp
)
1397 struct phy_device
*phydev
;
1399 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1400 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1401 case PHY_ID_BCM50610
:
1402 case PHY_ID_BCM50610M
:
1403 val
= MAC_PHYCFG2_50610_LED_MODES
;
1405 case PHY_ID_BCMAC131
:
1406 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1408 case PHY_ID_RTL8211C
:
1409 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1411 case PHY_ID_RTL8201E
:
1412 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1418 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1419 tw32(MAC_PHYCFG2
, val
);
1421 val
= tr32(MAC_PHYCFG1
);
1422 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1423 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1424 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1425 tw32(MAC_PHYCFG1
, val
);
1430 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1431 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1432 MAC_PHYCFG2_FMODE_MASK_MASK
|
1433 MAC_PHYCFG2_GMODE_MASK_MASK
|
1434 MAC_PHYCFG2_ACT_MASK_MASK
|
1435 MAC_PHYCFG2_QUAL_MASK_MASK
|
1436 MAC_PHYCFG2_INBAND_ENABLE
;
1438 tw32(MAC_PHYCFG2
, val
);
1440 val
= tr32(MAC_PHYCFG1
);
1441 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1443 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1444 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1445 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1446 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1447 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1449 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1450 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1451 tw32(MAC_PHYCFG1
, val
);
1453 val
= tr32(MAC_EXT_RGMII_MODE
);
1454 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1455 MAC_RGMII_MODE_RX_QUALITY
|
1456 MAC_RGMII_MODE_RX_ACTIVITY
|
1457 MAC_RGMII_MODE_RX_ENG_DET
|
1458 MAC_RGMII_MODE_TX_ENABLE
|
1459 MAC_RGMII_MODE_TX_LOWPWR
|
1460 MAC_RGMII_MODE_TX_RESET
);
1461 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1462 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1463 val
|= MAC_RGMII_MODE_RX_INT_B
|
1464 MAC_RGMII_MODE_RX_QUALITY
|
1465 MAC_RGMII_MODE_RX_ACTIVITY
|
1466 MAC_RGMII_MODE_RX_ENG_DET
;
1467 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1468 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1469 MAC_RGMII_MODE_TX_LOWPWR
|
1470 MAC_RGMII_MODE_TX_RESET
;
1472 tw32(MAC_EXT_RGMII_MODE
, val
);
1475 static void tg3_mdio_start(struct tg3
*tp
)
1477 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1478 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1481 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1482 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1483 tg3_mdio_config_5785(tp
);
1486 static int tg3_mdio_init(struct tg3
*tp
)
1490 struct phy_device
*phydev
;
1492 if (tg3_flag(tp
, 5717_PLUS
)) {
1495 tp
->phy_addr
= tp
->pci_fn
+ 1;
1497 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1498 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1500 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1505 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1509 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1512 tp
->mdio_bus
= mdiobus_alloc();
1513 if (tp
->mdio_bus
== NULL
)
1516 tp
->mdio_bus
->name
= "tg3 mdio bus";
1517 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1518 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1519 tp
->mdio_bus
->priv
= tp
;
1520 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1521 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1522 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1523 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1524 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1525 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1527 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1528 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1535 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1538 i
= mdiobus_register(tp
->mdio_bus
);
1540 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1541 mdiobus_free(tp
->mdio_bus
);
1545 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1547 if (!phydev
|| !phydev
->drv
) {
1548 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1549 mdiobus_unregister(tp
->mdio_bus
);
1550 mdiobus_free(tp
->mdio_bus
);
1554 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1555 case PHY_ID_BCM57780
:
1556 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1557 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1559 case PHY_ID_BCM50610
:
1560 case PHY_ID_BCM50610M
:
1561 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1562 PHY_BRCM_RX_REFCLK_UNUSED
|
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1565 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1566 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1567 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1568 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1569 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1570 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1572 case PHY_ID_RTL8211C
:
1573 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1575 case PHY_ID_RTL8201E
:
1576 case PHY_ID_BCMAC131
:
1577 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1578 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1579 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1583 tg3_flag_set(tp
, MDIOBUS_INITED
);
1585 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1586 tg3_mdio_config_5785(tp
);
1591 static void tg3_mdio_fini(struct tg3
*tp
)
1593 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1594 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1595 mdiobus_unregister(tp
->mdio_bus
);
1596 mdiobus_free(tp
->mdio_bus
);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1605 val
= tr32(GRC_RX_CPU_EVENT
);
1606 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1607 tw32_f(GRC_RX_CPU_EVENT
, val
);
1609 tp
->last_event_jiffies
= jiffies
;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1618 unsigned int delay_cnt
;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1625 if (time_remain
< 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt
= jiffies_to_usecs(time_remain
);
1630 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1631 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1632 delay_cnt
= (delay_cnt
>> 3) + 1;
1634 for (i
= 0; i
< delay_cnt
; i
++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1647 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1649 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1650 val
|= (reg
& 0xffff);
1654 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1656 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1657 val
|= (reg
& 0xffff);
1661 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1662 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1664 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1665 val
|= (reg
& 0xffff);
1669 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3
*tp
)
1681 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1684 tg3_phy_gather_ump_data(tp
, data
);
1686 tg3_wait_for_event_ack(tp
);
1688 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1689 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1690 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1691 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1692 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1693 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1695 tg3_generate_fw_event(tp
);
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3
*tp
)
1701 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp
);
1705 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1707 tg3_generate_fw_event(tp
);
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp
);
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1717 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1720 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1722 case RESET_KIND_INIT
:
1723 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1727 case RESET_KIND_SHUTDOWN
:
1728 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1732 case RESET_KIND_SUSPEND
:
1733 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1742 if (kind
== RESET_KIND_INIT
||
1743 kind
== RESET_KIND_SUSPEND
)
1744 tg3_ape_driver_state_change(tp
, kind
);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1750 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1752 case RESET_KIND_INIT
:
1753 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 DRV_STATE_START_DONE
);
1757 case RESET_KIND_SHUTDOWN
:
1758 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 DRV_STATE_UNLOAD_DONE
);
1767 if (kind
== RESET_KIND_SHUTDOWN
)
1768 tg3_ape_driver_state_change(tp
, kind
);
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1774 if (tg3_flag(tp
, ENABLE_ASF
)) {
1776 case RESET_KIND_INIT
:
1777 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1781 case RESET_KIND_SHUTDOWN
:
1782 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1786 case RESET_KIND_SUSPEND
:
1787 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1797 static int tg3_poll_fw(struct tg3
*tp
)
1802 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1808 /* Wait up to 20ms for init done. */
1809 for (i
= 0; i
< 200; i
++) {
1810 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1817 /* Wait for firmware initialization to complete. */
1818 for (i
= 0; i
< 100000; i
++) {
1819 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1820 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1830 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1831 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1833 netdev_info(tp
->dev
, "No firmware running\n");
1836 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1846 static void tg3_link_report(struct tg3
*tp
)
1848 if (!netif_carrier_ok(tp
->dev
)) {
1849 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1850 tg3_ump_link_report(tp
);
1851 } else if (netif_msg_link(tp
)) {
1852 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1853 (tp
->link_config
.active_speed
== SPEED_1000
?
1855 (tp
->link_config
.active_speed
== SPEED_100
?
1857 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1860 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1861 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1863 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1866 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1867 netdev_info(tp
->dev
, "EEE is %s\n",
1868 tp
->setlpicnt
? "enabled" : "disabled");
1870 tg3_ump_link_report(tp
);
1874 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1878 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1879 miireg
= ADVERTISE_1000XPAUSE
;
1880 else if (flow_ctrl
& FLOW_CTRL_TX
)
1881 miireg
= ADVERTISE_1000XPSE_ASYM
;
1882 else if (flow_ctrl
& FLOW_CTRL_RX
)
1883 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1890 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1894 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1895 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1896 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1897 if (lcladv
& ADVERTISE_1000XPAUSE
)
1899 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1906 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1910 u32 old_rx_mode
= tp
->rx_mode
;
1911 u32 old_tx_mode
= tp
->tx_mode
;
1913 if (tg3_flag(tp
, USE_PHYLIB
))
1914 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1916 autoneg
= tp
->link_config
.autoneg
;
1918 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1919 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1920 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1922 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1924 flowctrl
= tp
->link_config
.flowctrl
;
1926 tp
->link_config
.active_flowctrl
= flowctrl
;
1928 if (flowctrl
& FLOW_CTRL_RX
)
1929 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1931 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1933 if (old_rx_mode
!= tp
->rx_mode
)
1934 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1936 if (flowctrl
& FLOW_CTRL_TX
)
1937 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1939 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1941 if (old_tx_mode
!= tp
->tx_mode
)
1942 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1945 static void tg3_adjust_link(struct net_device
*dev
)
1947 u8 oldflowctrl
, linkmesg
= 0;
1948 u32 mac_mode
, lcl_adv
, rmt_adv
;
1949 struct tg3
*tp
= netdev_priv(dev
);
1950 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1952 spin_lock_bh(&tp
->lock
);
1954 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1955 MAC_MODE_HALF_DUPLEX
);
1957 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1963 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1964 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1965 else if (phydev
->speed
== SPEED_1000
||
1966 tg3_asic_rev(tp
) != ASIC_REV_5785
)
1967 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1969 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1971 if (phydev
->duplex
== DUPLEX_HALF
)
1972 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1974 lcl_adv
= mii_advertise_flowctrl(
1975 tp
->link_config
.flowctrl
);
1978 rmt_adv
= LPA_PAUSE_CAP
;
1979 if (phydev
->asym_pause
)
1980 rmt_adv
|= LPA_PAUSE_ASYM
;
1983 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1985 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1987 if (mac_mode
!= tp
->mac_mode
) {
1988 tp
->mac_mode
= mac_mode
;
1989 tw32_f(MAC_MODE
, tp
->mac_mode
);
1993 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
1994 if (phydev
->speed
== SPEED_10
)
1996 MAC_MI_STAT_10MBPS_MODE
|
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1999 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2002 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2003 tw32(MAC_TX_LENGTHS
,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2005 (6 << TX_LENGTHS_IPG_SHIFT
) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2008 tw32(MAC_TX_LENGTHS
,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2010 (6 << TX_LENGTHS_IPG_SHIFT
) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2013 if (phydev
->link
!= tp
->old_link
||
2014 phydev
->speed
!= tp
->link_config
.active_speed
||
2015 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2016 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2019 tp
->old_link
= phydev
->link
;
2020 tp
->link_config
.active_speed
= phydev
->speed
;
2021 tp
->link_config
.active_duplex
= phydev
->duplex
;
2023 spin_unlock_bh(&tp
->lock
);
2026 tg3_link_report(tp
);
2029 static int tg3_phy_init(struct tg3
*tp
)
2031 struct phy_device
*phydev
;
2033 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2036 /* Bring the PHY back to a known state. */
2039 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2041 /* Attach the MAC to the PHY. */
2042 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2043 tg3_adjust_link
, phydev
->interface
);
2044 if (IS_ERR(phydev
)) {
2045 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev
);
2049 /* Mask with MAC supported features. */
2050 switch (phydev
->interface
) {
2051 case PHY_INTERFACE_MODE_GMII
:
2052 case PHY_INTERFACE_MODE_RGMII
:
2053 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2054 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2056 SUPPORTED_Asym_Pause
);
2060 case PHY_INTERFACE_MODE_MII
:
2061 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2063 SUPPORTED_Asym_Pause
);
2066 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2070 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2072 phydev
->advertising
= phydev
->supported
;
2077 static void tg3_phy_start(struct tg3
*tp
)
2079 struct phy_device
*phydev
;
2081 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2084 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2086 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2087 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2088 phydev
->speed
= tp
->link_config
.speed
;
2089 phydev
->duplex
= tp
->link_config
.duplex
;
2090 phydev
->autoneg
= tp
->link_config
.autoneg
;
2091 phydev
->advertising
= tp
->link_config
.advertising
;
2096 phy_start_aneg(phydev
);
2099 static void tg3_phy_stop(struct tg3
*tp
)
2101 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2104 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2107 static void tg3_phy_fini(struct tg3
*tp
)
2109 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2110 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2111 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2115 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2120 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2123 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err
= tg3_phy_auxctl_write(tp
,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2132 err
= tg3_phy_auxctl_read(tp
,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2137 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2138 err
= tg3_phy_auxctl_write(tp
,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2145 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2149 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2152 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2153 phytest
| MII_TG3_FET_SHADOW_EN
);
2154 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2156 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2158 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2159 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2161 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2165 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2169 if (!tg3_flag(tp
, 5705_PLUS
) ||
2170 (tg3_flag(tp
, 5717_PLUS
) &&
2171 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2174 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2175 tg3_phy_fet_toggle_apd(tp
, enable
);
2179 reg
= MII_TG3_MISC_SHDW_WREN
|
2180 MII_TG3_MISC_SHDW_SCR5_SEL
|
2181 MII_TG3_MISC_SHDW_SCR5_LPED
|
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2183 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2184 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2185 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2186 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2188 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2191 reg
= MII_TG3_MISC_SHDW_WREN
|
2192 MII_TG3_MISC_SHDW_APD_SEL
|
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2195 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2197 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2200 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2204 if (!tg3_flag(tp
, 5705_PLUS
) ||
2205 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2208 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2211 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2212 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2214 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2215 ephy
| MII_TG3_FET_SHADOW_EN
);
2216 if (!tg3_readphy(tp
, reg
, &phy
)) {
2218 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2220 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2221 tg3_writephy(tp
, reg
, phy
);
2223 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2228 ret
= tg3_phy_auxctl_read(tp
,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2232 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2234 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2235 tg3_phy_auxctl_write(tp
,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2241 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2246 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2249 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2251 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2252 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2255 static void tg3_phy_apply_otp(struct tg3
*tp
)
2264 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2267 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2268 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2269 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2271 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2272 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2273 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2275 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2276 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2277 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2279 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2280 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2282 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2283 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2285 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2286 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2287 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2289 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2292 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2296 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2301 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2302 current_link_up
== 1 &&
2303 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2304 (tp
->link_config
.active_speed
== SPEED_100
||
2305 tp
->link_config
.active_speed
== SPEED_1000
)) {
2308 if (tp
->link_config
.active_speed
== SPEED_1000
)
2309 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2311 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2313 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2315 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2316 TG3_CL45_D7_EEERES_STAT
, &val
);
2318 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2319 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2323 if (!tp
->setlpicnt
) {
2324 if (current_link_up
== 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2326 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2330 val
= tr32(TG3_CPMU_EEE_MODE
);
2331 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2335 static void tg3_phy_eee_enable(struct tg3
*tp
)
2339 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2340 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2341 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2342 tg3_flag(tp
, 57765_CLASS
)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2344 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2345 MII_TG3_DSP_TAP26_RMRXSTO
;
2346 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2347 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2350 val
= tr32(TG3_CPMU_EEE_MODE
);
2351 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2354 static int tg3_wait_macro_done(struct tg3
*tp
)
2361 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2362 if ((tmp32
& 0x1000) == 0)
2372 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2374 static const u32 test_pat
[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382 for (chan
= 0; chan
< 4; chan
++) {
2385 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2386 (chan
* 0x2000) | 0x0200);
2387 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2389 for (i
= 0; i
< 6; i
++)
2390 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2393 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2394 if (tg3_wait_macro_done(tp
)) {
2399 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2400 (chan
* 0x2000) | 0x0200);
2401 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2402 if (tg3_wait_macro_done(tp
)) {
2407 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2408 if (tg3_wait_macro_done(tp
)) {
2413 for (i
= 0; i
< 6; i
+= 2) {
2416 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2417 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2418 tg3_wait_macro_done(tp
)) {
2424 if (low
!= test_pat
[chan
][i
] ||
2425 high
!= test_pat
[chan
][i
+1]) {
2426 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2427 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2428 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2438 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2442 for (chan
= 0; chan
< 4; chan
++) {
2445 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2446 (chan
* 0x2000) | 0x0200);
2447 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2448 for (i
= 0; i
< 6; i
++)
2449 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2450 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2451 if (tg3_wait_macro_done(tp
))
2458 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2460 u32 reg32
, phy9_orig
;
2461 int retries
, do_phy_reset
, err
;
2467 err
= tg3_bmcr_reset(tp
);
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2478 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp
, MII_BMCR
,
2482 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2488 tg3_writephy(tp
, MII_CTRL1000
,
2489 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2491 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2498 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2501 } while (--retries
);
2503 err
= tg3_phy_reset_chanpat(tp
);
2507 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2509 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2510 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2512 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2514 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2516 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2518 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2525 static void tg3_carrier_on(struct tg3
*tp
)
2527 netif_carrier_on(tp
->dev
);
2531 static void tg3_carrier_off(struct tg3
*tp
)
2533 netif_carrier_off(tp
->dev
);
2534 tp
->link_up
= false;
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2540 static int tg3_phy_reset(struct tg3
*tp
)
2545 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2546 val
= tr32(GRC_MISC_CFG
);
2547 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2550 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2551 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2555 if (netif_running(tp
->dev
) && tp
->link_up
) {
2556 tg3_carrier_off(tp
);
2557 tg3_link_report(tp
);
2560 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2561 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2562 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2563 err
= tg3_phy_reset_5703_4_5(tp
);
2570 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2571 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2572 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2573 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2575 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2578 err
= tg3_bmcr_reset(tp
);
2582 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2583 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2584 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2586 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2589 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2590 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2591 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2592 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2594 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2600 if (tg3_flag(tp
, 5717_PLUS
) &&
2601 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2604 tg3_phy_apply_otp(tp
);
2606 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2607 tg3_phy_toggle_apd(tp
, true);
2609 tg3_phy_toggle_apd(tp
, false);
2612 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2614 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2619 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2620 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2621 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2624 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2626 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2631 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2633 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2634 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2635 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2636 tg3_writephy(tp
, MII_TG3_TEST1
,
2637 MII_TG3_TEST1_TRIM_EN
| 0x4);
2639 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2641 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2650 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err
= tg3_phy_auxctl_read(tp
,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2655 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2656 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2662 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2663 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2664 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2665 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2668 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2673 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2674 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2676 tg3_phy_toggle_automdix(tp
, 1);
2677 tg3_phy_set_wirespeed(tp
);
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2697 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2701 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2702 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2703 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2705 status
= tr32(TG3_CPMU_DRV_STATUS
);
2707 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2708 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2709 status
|= (newstat
<< shift
);
2711 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2712 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2713 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2715 tw32(TG3_CPMU_DRV_STATUS
, status
);
2717 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2722 if (!tg3_flag(tp
, IS_NIC
))
2725 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2726 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2727 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2728 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2731 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2733 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2736 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2738 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2749 if (!tg3_flag(tp
, IS_NIC
) ||
2750 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2751 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2754 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2756 tw32_wait_f(GRC_LOCAL_CTRL
,
2757 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2760 tw32_wait_f(GRC_LOCAL_CTRL
,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2764 tw32_wait_f(GRC_LOCAL_CTRL
,
2765 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2771 if (!tg3_flag(tp
, IS_NIC
))
2774 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2775 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2776 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2777 (GRC_LCLCTRL_GPIO_OE0
|
2778 GRC_LCLCTRL_GPIO_OE1
|
2779 GRC_LCLCTRL_GPIO_OE2
|
2780 GRC_LCLCTRL_GPIO_OUTPUT0
|
2781 GRC_LCLCTRL_GPIO_OUTPUT1
),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2783 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2784 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2787 GRC_LCLCTRL_GPIO_OE1
|
2788 GRC_LCLCTRL_GPIO_OE2
|
2789 GRC_LCLCTRL_GPIO_OUTPUT0
|
2790 GRC_LCLCTRL_GPIO_OUTPUT1
|
2792 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2795 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2796 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2799 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2800 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2804 u32 grc_local_ctrl
= 0;
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2808 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2809 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2811 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2
= tp
->nic_sram_data_cfg
&
2816 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2818 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2819 GRC_LCLCTRL_GPIO_OE1
|
2820 GRC_LCLCTRL_GPIO_OE2
|
2821 GRC_LCLCTRL_GPIO_OUTPUT1
|
2822 GRC_LCLCTRL_GPIO_OUTPUT2
;
2824 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2825 GRC_LCLCTRL_GPIO_OUTPUT2
);
2827 tw32_wait_f(GRC_LOCAL_CTRL
,
2828 tp
->grc_local_ctrl
| grc_local_ctrl
,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2831 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2833 tw32_wait_f(GRC_LOCAL_CTRL
,
2834 tp
->grc_local_ctrl
| grc_local_ctrl
,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2838 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2839 tw32_wait_f(GRC_LOCAL_CTRL
,
2840 tp
->grc_local_ctrl
| grc_local_ctrl
,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2846 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2854 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2855 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2857 msg
= tg3_set_function_status(tp
, msg
);
2859 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2862 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2863 tg3_pwrsrc_switch_to_vaux(tp
);
2865 tg3_pwrsrc_die_with_vmain(tp
);
2868 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2871 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2873 bool need_vaux
= false;
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2879 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2880 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2881 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2882 tg3_frob_aux_power_5717(tp
, include_wol
?
2883 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2887 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2888 struct net_device
*dev_peer
;
2890 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2892 /* remove_one() may have been run on the peer. */
2894 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2896 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2899 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2900 tg3_flag(tp_peer
, ENABLE_ASF
))
2905 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2906 tg3_flag(tp
, ENABLE_ASF
))
2910 tg3_pwrsrc_switch_to_vaux(tp
);
2912 tg3_pwrsrc_die_with_vmain(tp
);
2915 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2917 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2919 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2920 if (speed
!= SPEED_10
)
2922 } else if (speed
== SPEED_10
)
2928 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2932 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2933 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
2934 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2935 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2938 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2939 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2940 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2945 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2947 val
= tr32(GRC_MISC_CFG
);
2948 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2951 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2953 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2956 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2957 tg3_writephy(tp
, MII_BMCR
,
2958 BMCR_ANENABLE
| BMCR_ANRESTART
);
2960 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2961 phytest
| MII_TG3_FET_SHADOW_EN
);
2962 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2963 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2965 MII_TG3_FET_SHDW_AUXMODE4
,
2968 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2971 } else if (do_low_power
) {
2972 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2975 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2977 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2978 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2981 /* The PHY should not be powered down on some chips because
2984 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2985 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2986 (tg3_asic_rev(tp
) == ASIC_REV_5780
&&
2987 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2988 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
2992 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2993 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2994 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2995 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2996 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3000 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3
*tp
)
3006 if (tg3_flag(tp
, NVRAM
)) {
3009 if (tp
->nvram_lock_cnt
== 0) {
3010 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3011 for (i
= 0; i
< 8000; i
++) {
3012 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3017 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3021 tp
->nvram_lock_cnt
++;
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3
*tp
)
3029 if (tg3_flag(tp
, NVRAM
)) {
3030 if (tp
->nvram_lock_cnt
> 0)
3031 tp
->nvram_lock_cnt
--;
3032 if (tp
->nvram_lock_cnt
== 0)
3033 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3
*tp
)
3040 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3041 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3043 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3
*tp
)
3050 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3051 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3053 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3057 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3058 u32 offset
, u32
*val
)
3063 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3066 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3067 EEPROM_ADDR_DEVID_MASK
|
3069 tw32(GRC_EEPROM_ADDR
,
3071 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3072 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3073 EEPROM_ADDR_ADDR_MASK
) |
3074 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3076 for (i
= 0; i
< 1000; i
++) {
3077 tmp
= tr32(GRC_EEPROM_ADDR
);
3079 if (tmp
& EEPROM_ADDR_COMPLETE
)
3083 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3086 tmp
= tr32(GRC_EEPROM_DATA
);
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3097 #define NVRAM_CMD_TIMEOUT 10000
3099 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3103 tw32(NVRAM_CMD
, nvram_cmd
);
3104 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3106 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3112 if (i
== NVRAM_CMD_TIMEOUT
)
3118 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3120 if (tg3_flag(tp
, NVRAM
) &&
3121 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3122 tg3_flag(tp
, FLASH
) &&
3123 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3124 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3126 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS
) +
3128 (addr
% tp
->nvram_pagesize
);
3133 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3135 if (tg3_flag(tp
, NVRAM
) &&
3136 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3137 tg3_flag(tp
, FLASH
) &&
3138 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3139 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3141 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3142 tp
->nvram_pagesize
) +
3143 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3154 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3158 if (!tg3_flag(tp
, NVRAM
))
3159 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3161 offset
= tg3_nvram_phys_addr(tp
, offset
);
3163 if (offset
> NVRAM_ADDR_MSK
)
3166 ret
= tg3_nvram_lock(tp
);
3170 tg3_enable_nvram_access(tp
);
3172 tw32(NVRAM_ADDR
, offset
);
3173 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3174 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3177 *val
= tr32(NVRAM_RDDATA
);
3179 tg3_disable_nvram_access(tp
);
3181 tg3_nvram_unlock(tp
);
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3190 int res
= tg3_nvram_read(tp
, offset
, &v
);
3192 *val
= cpu_to_be32(v
);
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3197 u32 offset
, u32 len
, u8
*buf
)
3202 for (i
= 0; i
< len
; i
+= 4) {
3208 memcpy(&data
, buf
+ i
, 4);
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3216 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3218 val
= tr32(GRC_EEPROM_ADDR
);
3219 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3221 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3223 tw32(GRC_EEPROM_ADDR
, val
|
3224 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3225 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3229 for (j
= 0; j
< 1000; j
++) {
3230 val
= tr32(GRC_EEPROM_ADDR
);
3232 if (val
& EEPROM_ADDR_COMPLETE
)
3236 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3250 u32 pagesize
= tp
->nvram_pagesize
;
3251 u32 pagemask
= pagesize
- 1;
3255 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3261 u32 phy_addr
, page_off
, size
;
3263 phy_addr
= offset
& ~pagemask
;
3265 for (j
= 0; j
< pagesize
; j
+= 4) {
3266 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3267 (__be32
*) (tmp
+ j
));
3274 page_off
= offset
& pagemask
;
3281 memcpy(tmp
+ page_off
, buf
, size
);
3283 offset
= offset
+ (pagesize
- page_off
);
3285 tg3_enable_nvram_access(tp
);
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3291 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3293 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR
, phy_addr
);
3299 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3300 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3302 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3308 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3311 for (j
= 0; j
< pagesize
; j
+= 4) {
3314 data
= *((__be32
*) (tmp
+ j
));
3316 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3318 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3320 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3324 nvram_cmd
|= NVRAM_CMD_FIRST
;
3325 else if (j
== (pagesize
- 4))
3326 nvram_cmd
|= NVRAM_CMD_LAST
;
3328 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3336 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3337 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3350 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3351 u32 page_off
, phy_addr
, nvram_cmd
;
3354 memcpy(&data
, buf
+ i
, 4);
3355 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3357 page_off
= offset
% tp
->nvram_pagesize
;
3359 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3361 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3363 if (page_off
== 0 || i
== 0)
3364 nvram_cmd
|= NVRAM_CMD_FIRST
;
3365 if (page_off
== (tp
->nvram_pagesize
- 4))
3366 nvram_cmd
|= NVRAM_CMD_LAST
;
3369 nvram_cmd
|= NVRAM_CMD_LAST
;
3371 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3372 !tg3_flag(tp
, FLASH
) ||
3373 !tg3_flag(tp
, 57765_PLUS
))
3374 tw32(NVRAM_ADDR
, phy_addr
);
3376 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3377 !tg3_flag(tp
, 5755_PLUS
) &&
3378 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3379 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3382 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3383 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3387 if (!tg3_flag(tp
, FLASH
)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3392 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3404 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3405 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3410 if (!tg3_flag(tp
, NVRAM
)) {
3411 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3415 ret
= tg3_nvram_lock(tp
);
3419 tg3_enable_nvram_access(tp
);
3420 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3421 tw32(NVRAM_WRITE1
, 0x406);
3423 grc_mode
= tr32(GRC_MODE
);
3424 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3426 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3427 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3430 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3434 grc_mode
= tr32(GRC_MODE
);
3435 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3437 tg3_disable_nvram_access(tp
);
3438 tg3_nvram_unlock(tp
);
3441 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3442 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3454 /* tp->lock is held. */
3455 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3458 const int iters
= 10000;
3460 for (i
= 0; i
< iters
; i
++) {
3461 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3462 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3463 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3467 return (i
== iters
) ? -EBUSY
: 0;
3470 /* tp->lock is held. */
3471 static int tg3_rxcpu_pause(struct tg3
*tp
)
3473 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3475 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3476 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3482 /* tp->lock is held. */
3483 static int tg3_txcpu_pause(struct tg3
*tp
)
3485 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3488 /* tp->lock is held. */
3489 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3491 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3492 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3495 /* tp->lock is held. */
3496 static void tg3_rxcpu_resume(struct tg3
*tp
)
3498 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3501 /* tp->lock is held. */
3502 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3506 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3508 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3509 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3511 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3514 if (cpu_base
== RX_CPU_BASE
) {
3515 rc
= tg3_rxcpu_pause(tp
);
3518 * There is only an Rx CPU for the 5750 derivative in the
3521 if (tg3_flag(tp
, IS_SSB_CORE
))
3524 rc
= tg3_txcpu_pause(tp
);
3528 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3529 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3533 /* Clear firmware's nvram arbitration. */
3534 if (tg3_flag(tp
, NVRAM
))
3535 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3539 static int tg3_fw_data_len(struct tg3
*tp
,
3540 const struct tg3_firmware_hdr
*fw_hdr
)
3544 /* Non fragmented firmware have one firmware header followed by a
3545 * contiguous chunk of data to be written. The length field in that
3546 * header is not the length of data to be written but the complete
3547 * length of the bss. The data length is determined based on
3548 * tp->fw->size minus headers.
3550 * Fragmented firmware have a main header followed by multiple
3551 * fragments. Each fragment is identical to non fragmented firmware
3552 * with a firmware header followed by a contiguous chunk of data. In
3553 * the main header, the length field is unused and set to 0xffffffff.
3554 * In each fragment header the length is the entire size of that
3555 * fragment i.e. fragment data + header length. Data length is
3556 * therefore length field in the header minus TG3_FW_HDR_LEN.
3558 if (tp
->fw_len
== 0xffffffff)
3559 fw_len
= be32_to_cpu(fw_hdr
->len
);
3561 fw_len
= tp
->fw
->size
;
3563 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3566 /* tp->lock is held. */
3567 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3568 u32 cpu_scratch_base
, int cpu_scratch_size
,
3569 const struct tg3_firmware_hdr
*fw_hdr
)
3571 int err
, lock_err
, i
;
3572 void (*write_op
)(struct tg3
*, u32
, u32
);
3573 int total_len
= tp
->fw
->size
;
3575 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3577 "%s: Trying to load TX cpu firmware which is 5705\n",
3582 if (tg3_flag(tp
, 5705_PLUS
))
3583 write_op
= tg3_write_mem
;
3585 write_op
= tg3_write_indirect_reg32
;
3587 /* It is possible that bootcode is still loading at this point.
3588 * Get the nvram lock first before halting the cpu.
3590 lock_err
= tg3_nvram_lock(tp
);
3591 err
= tg3_halt_cpu(tp
, cpu_base
);
3593 tg3_nvram_unlock(tp
);
3597 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3598 write_op(tp
, cpu_scratch_base
+ i
, 0);
3599 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3600 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3603 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3604 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3605 write_op(tp
, cpu_scratch_base
+
3606 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3608 be32_to_cpu(fw_data
[i
]));
3610 total_len
-= be32_to_cpu(fw_hdr
->len
);
3612 /* Advance to next fragment */
3613 fw_hdr
= (struct tg3_firmware_hdr
*)
3614 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3615 } while (total_len
> 0);
3623 /* tp->lock is held. */
3624 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3627 const int iters
= 5;
3629 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3630 tw32_f(cpu_base
+ CPU_PC
, pc
);
3632 for (i
= 0; i
< iters
; i
++) {
3633 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3635 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3636 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3637 tw32_f(cpu_base
+ CPU_PC
, pc
);
3641 return (i
== iters
) ? -EBUSY
: 0;
3644 /* tp->lock is held. */
3645 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3647 const struct tg3_firmware_hdr
*fw_hdr
;
3650 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3652 /* Firmware blob starts with version numbers, followed by
3653 start address and length. We are setting complete length.
3654 length = end_address_of_bss - start_address_of_text.
3655 Remainder is the blob to be loaded contiguously
3656 from start address. */
3658 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3659 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3664 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3665 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3670 /* Now startup only the RX cpu. */
3671 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3672 be32_to_cpu(fw_hdr
->base_addr
));
3674 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3675 "should be %08x\n", __func__
,
3676 tr32(RX_CPU_BASE
+ CPU_PC
),
3677 be32_to_cpu(fw_hdr
->base_addr
));
3681 tg3_rxcpu_resume(tp
);
3686 /* tp->lock is held. */
3687 static int tg3_load_tso_firmware(struct tg3
*tp
)
3689 const struct tg3_firmware_hdr
*fw_hdr
;
3690 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3693 if (!tg3_flag(tp
, FW_TSO
))
3696 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3698 /* Firmware blob starts with version numbers, followed by
3699 start address and length. We are setting complete length.
3700 length = end_address_of_bss - start_address_of_text.
3701 Remainder is the blob to be loaded contiguously
3702 from start address. */
3704 cpu_scratch_size
= tp
->fw_len
;
3706 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3707 cpu_base
= RX_CPU_BASE
;
3708 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3710 cpu_base
= TX_CPU_BASE
;
3711 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3712 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3715 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3716 cpu_scratch_base
, cpu_scratch_size
,
3721 /* Now startup the cpu. */
3722 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3723 be32_to_cpu(fw_hdr
->base_addr
));
3726 "%s fails to set CPU PC, is %08x should be %08x\n",
3727 __func__
, tr32(cpu_base
+ CPU_PC
),
3728 be32_to_cpu(fw_hdr
->base_addr
));
3732 tg3_resume_cpu(tp
, cpu_base
);
3737 /* tp->lock is held. */
3738 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3740 u32 addr_high
, addr_low
;
3743 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3744 tp
->dev
->dev_addr
[1]);
3745 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3746 (tp
->dev
->dev_addr
[3] << 16) |
3747 (tp
->dev
->dev_addr
[4] << 8) |
3748 (tp
->dev
->dev_addr
[5] << 0));
3749 for (i
= 0; i
< 4; i
++) {
3750 if (i
== 1 && skip_mac_1
)
3752 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3753 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3756 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3757 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3758 for (i
= 0; i
< 12; i
++) {
3759 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3760 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3764 addr_high
= (tp
->dev
->dev_addr
[0] +
3765 tp
->dev
->dev_addr
[1] +
3766 tp
->dev
->dev_addr
[2] +
3767 tp
->dev
->dev_addr
[3] +
3768 tp
->dev
->dev_addr
[4] +
3769 tp
->dev
->dev_addr
[5]) &
3770 TX_BACKOFF_SEED_MASK
;
3771 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3774 static void tg3_enable_register_access(struct tg3
*tp
)
3777 * Make sure register accesses (indirect or otherwise) will function
3780 pci_write_config_dword(tp
->pdev
,
3781 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3784 static int tg3_power_up(struct tg3
*tp
)
3788 tg3_enable_register_access(tp
);
3790 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3792 /* Switch out of Vaux if it is a NIC */
3793 tg3_pwrsrc_switch_to_vmain(tp
);
3795 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3801 static int tg3_setup_phy(struct tg3
*, int);
3803 static int tg3_power_down_prepare(struct tg3
*tp
)
3806 bool device_should_wake
, do_low_power
;
3808 tg3_enable_register_access(tp
);
3810 /* Restore the CLKREQ setting. */
3811 if (tg3_flag(tp
, CLKREQ_BUG
))
3812 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3813 PCI_EXP_LNKCTL_CLKREQ_EN
);
3815 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3816 tw32(TG3PCI_MISC_HOST_CTRL
,
3817 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3819 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3820 tg3_flag(tp
, WOL_ENABLE
);
3822 if (tg3_flag(tp
, USE_PHYLIB
)) {
3823 do_low_power
= false;
3824 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3825 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3826 struct phy_device
*phydev
;
3827 u32 phyid
, advertising
;
3829 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3831 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3833 tp
->link_config
.speed
= phydev
->speed
;
3834 tp
->link_config
.duplex
= phydev
->duplex
;
3835 tp
->link_config
.autoneg
= phydev
->autoneg
;
3836 tp
->link_config
.advertising
= phydev
->advertising
;
3838 advertising
= ADVERTISED_TP
|
3840 ADVERTISED_Autoneg
|
3841 ADVERTISED_10baseT_Half
;
3843 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3844 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3846 ADVERTISED_100baseT_Half
|
3847 ADVERTISED_100baseT_Full
|
3848 ADVERTISED_10baseT_Full
;
3850 advertising
|= ADVERTISED_10baseT_Full
;
3853 phydev
->advertising
= advertising
;
3855 phy_start_aneg(phydev
);
3857 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3858 if (phyid
!= PHY_ID_BCMAC131
) {
3859 phyid
&= PHY_BCM_OUI_MASK
;
3860 if (phyid
== PHY_BCM_OUI_1
||
3861 phyid
== PHY_BCM_OUI_2
||
3862 phyid
== PHY_BCM_OUI_3
)
3863 do_low_power
= true;
3867 do_low_power
= true;
3869 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3870 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3872 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3873 tg3_setup_phy(tp
, 0);
3876 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3879 val
= tr32(GRC_VCPU_EXT_CTRL
);
3880 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3881 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3885 for (i
= 0; i
< 200; i
++) {
3886 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3887 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3892 if (tg3_flag(tp
, WOL_CAP
))
3893 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3894 WOL_DRV_STATE_SHUTDOWN
|
3898 if (device_should_wake
) {
3901 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3903 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3904 tg3_phy_auxctl_write(tp
,
3905 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3906 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3907 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3908 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3912 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3913 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3915 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3917 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3918 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
3919 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3920 SPEED_100
: SPEED_10
;
3921 if (tg3_5700_link_polarity(tp
, speed
))
3922 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3924 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3927 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3930 if (!tg3_flag(tp
, 5750_PLUS
))
3931 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3933 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3934 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3935 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3936 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3938 if (tg3_flag(tp
, ENABLE_APE
))
3939 mac_mode
|= MAC_MODE_APE_TX_EN
|
3940 MAC_MODE_APE_RX_EN
|
3941 MAC_MODE_TDE_ENABLE
;
3943 tw32_f(MAC_MODE
, mac_mode
);
3946 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3950 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3951 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3952 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
3955 base_val
= tp
->pci_clock_ctrl
;
3956 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3957 CLOCK_CTRL_TXCLK_DISABLE
);
3959 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3960 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3961 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3962 tg3_flag(tp
, CPMU_PRESENT
) ||
3963 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3965 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3966 u32 newbits1
, newbits2
;
3968 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3969 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3970 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3971 CLOCK_CTRL_TXCLK_DISABLE
|
3973 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3974 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3975 newbits1
= CLOCK_CTRL_625_CORE
;
3976 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3978 newbits1
= CLOCK_CTRL_ALTCLK
;
3979 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3982 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3985 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3988 if (!tg3_flag(tp
, 5705_PLUS
)) {
3991 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3992 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3993 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3994 CLOCK_CTRL_TXCLK_DISABLE
|
3995 CLOCK_CTRL_44MHZ_CORE
);
3997 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4000 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4001 tp
->pci_clock_ctrl
| newbits3
, 40);
4005 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4006 tg3_power_down_phy(tp
, do_low_power
);
4008 tg3_frob_aux_power(tp
, true);
4010 /* Workaround for unstable PLL clock */
4011 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4012 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4013 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4014 u32 val
= tr32(0x7d00);
4016 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4018 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4021 err
= tg3_nvram_lock(tp
);
4022 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4024 tg3_nvram_unlock(tp
);
4028 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4033 static void tg3_power_down(struct tg3
*tp
)
4035 tg3_power_down_prepare(tp
);
4037 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4038 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4041 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4043 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4044 case MII_TG3_AUX_STAT_10HALF
:
4046 *duplex
= DUPLEX_HALF
;
4049 case MII_TG3_AUX_STAT_10FULL
:
4051 *duplex
= DUPLEX_FULL
;
4054 case MII_TG3_AUX_STAT_100HALF
:
4056 *duplex
= DUPLEX_HALF
;
4059 case MII_TG3_AUX_STAT_100FULL
:
4061 *duplex
= DUPLEX_FULL
;
4064 case MII_TG3_AUX_STAT_1000HALF
:
4065 *speed
= SPEED_1000
;
4066 *duplex
= DUPLEX_HALF
;
4069 case MII_TG3_AUX_STAT_1000FULL
:
4070 *speed
= SPEED_1000
;
4071 *duplex
= DUPLEX_FULL
;
4075 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4076 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4078 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4082 *speed
= SPEED_UNKNOWN
;
4083 *duplex
= DUPLEX_UNKNOWN
;
4088 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4093 new_adv
= ADVERTISE_CSMA
;
4094 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4095 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4097 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4101 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4102 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4104 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4105 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4106 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4108 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4113 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4116 tw32(TG3_CPMU_EEE_MODE
,
4117 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4119 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4124 /* Advertise 100-BaseTX EEE ability */
4125 if (advertise
& ADVERTISED_100baseT_Full
)
4126 val
|= MDIO_AN_EEE_ADV_100TX
;
4127 /* Advertise 1000-BaseT EEE ability */
4128 if (advertise
& ADVERTISED_1000baseT_Full
)
4129 val
|= MDIO_AN_EEE_ADV_1000T
;
4130 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4134 switch (tg3_asic_rev(tp
)) {
4136 case ASIC_REV_57765
:
4137 case ASIC_REV_57766
:
4139 /* If we advertised any eee advertisements above... */
4141 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4142 MII_TG3_DSP_TAP26_RMRXSTO
|
4143 MII_TG3_DSP_TAP26_OPCSINPT
;
4144 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4148 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4149 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4150 MII_TG3_DSP_CH34TP2_HIBW01
);
4153 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4162 static void tg3_phy_copper_begin(struct tg3
*tp
)
4164 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4165 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4168 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4169 adv
= ADVERTISED_10baseT_Half
|
4170 ADVERTISED_10baseT_Full
;
4171 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4172 adv
|= ADVERTISED_100baseT_Half
|
4173 ADVERTISED_100baseT_Full
;
4175 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4177 adv
= tp
->link_config
.advertising
;
4178 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4179 adv
&= ~(ADVERTISED_1000baseT_Half
|
4180 ADVERTISED_1000baseT_Full
);
4182 fc
= tp
->link_config
.flowctrl
;
4185 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4187 tg3_writephy(tp
, MII_BMCR
,
4188 BMCR_ANENABLE
| BMCR_ANRESTART
);
4191 u32 bmcr
, orig_bmcr
;
4193 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4194 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4197 switch (tp
->link_config
.speed
) {
4203 bmcr
|= BMCR_SPEED100
;
4207 bmcr
|= BMCR_SPEED1000
;
4211 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4212 bmcr
|= BMCR_FULLDPLX
;
4214 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4215 (bmcr
!= orig_bmcr
)) {
4216 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4217 for (i
= 0; i
< 1500; i
++) {
4221 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4222 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4224 if (!(tmp
& BMSR_LSTATUS
)) {
4229 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4235 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4239 /* Turn off tap power management. */
4240 /* Set Extended packet length bit */
4241 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4243 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4244 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4245 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4246 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4247 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4254 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4256 u32 advmsk
, tgtadv
, advertising
;
4258 advertising
= tp
->link_config
.advertising
;
4259 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4261 advmsk
= ADVERTISE_ALL
;
4262 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4263 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4264 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4267 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4270 if ((*lcladv
& advmsk
) != tgtadv
)
4273 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4276 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4278 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4282 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4283 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4284 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4285 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4286 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4288 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4291 if (tg3_ctrl
!= tgtadv
)
4298 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4302 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4305 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4308 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4311 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4314 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4315 tp
->link_config
.rmt_adv
= lpeth
;
4320 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, int curr_link_up
)
4322 if (curr_link_up
!= tp
->link_up
) {
4326 tg3_carrier_off(tp
);
4327 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4328 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4331 tg3_link_report(tp
);
4338 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4340 int current_link_up
;
4342 u32 lcl_adv
, rmt_adv
;
4350 (MAC_STATUS_SYNC_CHANGED
|
4351 MAC_STATUS_CFG_CHANGED
|
4352 MAC_STATUS_MI_COMPLETION
|
4353 MAC_STATUS_LNKSTATE_CHANGED
));
4356 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4358 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4362 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4364 /* Some third-party PHYs need to be reset on link going
4367 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4368 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4369 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4371 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4372 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4373 !(bmsr
& BMSR_LSTATUS
))
4379 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4380 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4381 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4382 !tg3_flag(tp
, INIT_COMPLETE
))
4385 if (!(bmsr
& BMSR_LSTATUS
)) {
4386 err
= tg3_init_5401phy_dsp(tp
);
4390 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4391 for (i
= 0; i
< 1000; i
++) {
4393 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4394 (bmsr
& BMSR_LSTATUS
)) {
4400 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4401 TG3_PHY_REV_BCM5401_B0
&&
4402 !(bmsr
& BMSR_LSTATUS
) &&
4403 tp
->link_config
.active_speed
== SPEED_1000
) {
4404 err
= tg3_phy_reset(tp
);
4406 err
= tg3_init_5401phy_dsp(tp
);
4411 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4412 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4413 /* 5701 {A0,B0} CRC bug workaround */
4414 tg3_writephy(tp
, 0x15, 0x0a75);
4415 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4416 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4417 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4420 /* Clear pending interrupts... */
4421 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4422 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4424 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4425 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4426 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4427 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4429 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4430 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4431 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4432 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4433 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4435 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4438 current_link_up
= 0;
4439 current_speed
= SPEED_UNKNOWN
;
4440 current_duplex
= DUPLEX_UNKNOWN
;
4441 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4442 tp
->link_config
.rmt_adv
= 0;
4444 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4445 err
= tg3_phy_auxctl_read(tp
,
4446 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4448 if (!err
&& !(val
& (1 << 10))) {
4449 tg3_phy_auxctl_write(tp
,
4450 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4457 for (i
= 0; i
< 100; i
++) {
4458 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4459 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4460 (bmsr
& BMSR_LSTATUS
))
4465 if (bmsr
& BMSR_LSTATUS
) {
4468 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4469 for (i
= 0; i
< 2000; i
++) {
4471 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4476 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4481 for (i
= 0; i
< 200; i
++) {
4482 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4483 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4485 if (bmcr
&& bmcr
!= 0x7fff)
4493 tp
->link_config
.active_speed
= current_speed
;
4494 tp
->link_config
.active_duplex
= current_duplex
;
4496 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4497 if ((bmcr
& BMCR_ANENABLE
) &&
4498 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4499 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4500 current_link_up
= 1;
4502 if (!(bmcr
& BMCR_ANENABLE
) &&
4503 tp
->link_config
.speed
== current_speed
&&
4504 tp
->link_config
.duplex
== current_duplex
&&
4505 tp
->link_config
.flowctrl
==
4506 tp
->link_config
.active_flowctrl
) {
4507 current_link_up
= 1;
4511 if (current_link_up
== 1 &&
4512 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4515 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4516 reg
= MII_TG3_FET_GEN_STAT
;
4517 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4519 reg
= MII_TG3_EXT_STAT
;
4520 bit
= MII_TG3_EXT_STAT_MDIX
;
4523 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4524 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4526 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4531 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4532 tg3_phy_copper_begin(tp
);
4534 if (tg3_flag(tp
, ROBOSWITCH
)) {
4535 current_link_up
= 1;
4536 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4537 current_speed
= SPEED_1000
;
4538 current_duplex
= DUPLEX_FULL
;
4539 tp
->link_config
.active_speed
= current_speed
;
4540 tp
->link_config
.active_duplex
= current_duplex
;
4543 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4544 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4545 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4546 current_link_up
= 1;
4549 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4550 if (current_link_up
== 1) {
4551 if (tp
->link_config
.active_speed
== SPEED_100
||
4552 tp
->link_config
.active_speed
== SPEED_10
)
4553 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4555 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4556 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4557 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4559 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4561 /* In order for the 5750 core in BCM4785 chip to work properly
4562 * in RGMII mode, the Led Control Register must be set up.
4564 if (tg3_flag(tp
, RGMII_MODE
)) {
4565 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4566 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4568 if (tp
->link_config
.active_speed
== SPEED_10
)
4569 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4570 else if (tp
->link_config
.active_speed
== SPEED_100
)
4571 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4572 LED_CTRL_100MBPS_ON
);
4573 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4574 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4575 LED_CTRL_1000MBPS_ON
);
4577 tw32(MAC_LED_CTRL
, led_ctrl
);
4581 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4582 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4583 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4585 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4586 if (current_link_up
== 1 &&
4587 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4588 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4590 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4593 /* ??? Without this setting Netgear GA302T PHY does not
4594 * ??? send/receive packets...
4596 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4597 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4598 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4599 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4603 tw32_f(MAC_MODE
, tp
->mac_mode
);
4606 tg3_phy_eee_adjust(tp
, current_link_up
);
4608 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4609 /* Polled via timer. */
4610 tw32_f(MAC_EVENT
, 0);
4612 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4616 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4617 current_link_up
== 1 &&
4618 tp
->link_config
.active_speed
== SPEED_1000
&&
4619 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4622 (MAC_STATUS_SYNC_CHANGED
|
4623 MAC_STATUS_CFG_CHANGED
));
4626 NIC_SRAM_FIRMWARE_MBOX
,
4627 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4630 /* Prevent send BD corruption. */
4631 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4632 if (tp
->link_config
.active_speed
== SPEED_100
||
4633 tp
->link_config
.active_speed
== SPEED_10
)
4634 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4635 PCI_EXP_LNKCTL_CLKREQ_EN
);
4637 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4638 PCI_EXP_LNKCTL_CLKREQ_EN
);
4641 tg3_test_and_report_link_chg(tp
, current_link_up
);
4646 struct tg3_fiber_aneginfo
{
4648 #define ANEG_STATE_UNKNOWN 0
4649 #define ANEG_STATE_AN_ENABLE 1
4650 #define ANEG_STATE_RESTART_INIT 2
4651 #define ANEG_STATE_RESTART 3
4652 #define ANEG_STATE_DISABLE_LINK_OK 4
4653 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4654 #define ANEG_STATE_ABILITY_DETECT 6
4655 #define ANEG_STATE_ACK_DETECT_INIT 7
4656 #define ANEG_STATE_ACK_DETECT 8
4657 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4658 #define ANEG_STATE_COMPLETE_ACK 10
4659 #define ANEG_STATE_IDLE_DETECT_INIT 11
4660 #define ANEG_STATE_IDLE_DETECT 12
4661 #define ANEG_STATE_LINK_OK 13
4662 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4663 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4666 #define MR_AN_ENABLE 0x00000001
4667 #define MR_RESTART_AN 0x00000002
4668 #define MR_AN_COMPLETE 0x00000004
4669 #define MR_PAGE_RX 0x00000008
4670 #define MR_NP_LOADED 0x00000010
4671 #define MR_TOGGLE_TX 0x00000020
4672 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4673 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4674 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4675 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4676 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4677 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4678 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4679 #define MR_TOGGLE_RX 0x00002000
4680 #define MR_NP_RX 0x00004000
4682 #define MR_LINK_OK 0x80000000
4684 unsigned long link_time
, cur_time
;
4686 u32 ability_match_cfg
;
4687 int ability_match_count
;
4689 char ability_match
, idle_match
, ack_match
;
4691 u32 txconfig
, rxconfig
;
4692 #define ANEG_CFG_NP 0x00000080
4693 #define ANEG_CFG_ACK 0x00000040
4694 #define ANEG_CFG_RF2 0x00000020
4695 #define ANEG_CFG_RF1 0x00000010
4696 #define ANEG_CFG_PS2 0x00000001
4697 #define ANEG_CFG_PS1 0x00008000
4698 #define ANEG_CFG_HD 0x00004000
4699 #define ANEG_CFG_FD 0x00002000
4700 #define ANEG_CFG_INVAL 0x00001f06
4705 #define ANEG_TIMER_ENAB 2
4706 #define ANEG_FAILED -1
4708 #define ANEG_STATE_SETTLE_TIME 10000
4710 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4711 struct tg3_fiber_aneginfo
*ap
)
4714 unsigned long delta
;
4718 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4722 ap
->ability_match_cfg
= 0;
4723 ap
->ability_match_count
= 0;
4724 ap
->ability_match
= 0;
4730 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4731 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4733 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4734 ap
->ability_match_cfg
= rx_cfg_reg
;
4735 ap
->ability_match
= 0;
4736 ap
->ability_match_count
= 0;
4738 if (++ap
->ability_match_count
> 1) {
4739 ap
->ability_match
= 1;
4740 ap
->ability_match_cfg
= rx_cfg_reg
;
4743 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4751 ap
->ability_match_cfg
= 0;
4752 ap
->ability_match_count
= 0;
4753 ap
->ability_match
= 0;
4759 ap
->rxconfig
= rx_cfg_reg
;
4762 switch (ap
->state
) {
4763 case ANEG_STATE_UNKNOWN
:
4764 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4765 ap
->state
= ANEG_STATE_AN_ENABLE
;
4768 case ANEG_STATE_AN_ENABLE
:
4769 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4770 if (ap
->flags
& MR_AN_ENABLE
) {
4773 ap
->ability_match_cfg
= 0;
4774 ap
->ability_match_count
= 0;
4775 ap
->ability_match
= 0;
4779 ap
->state
= ANEG_STATE_RESTART_INIT
;
4781 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4785 case ANEG_STATE_RESTART_INIT
:
4786 ap
->link_time
= ap
->cur_time
;
4787 ap
->flags
&= ~(MR_NP_LOADED
);
4789 tw32(MAC_TX_AUTO_NEG
, 0);
4790 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4791 tw32_f(MAC_MODE
, tp
->mac_mode
);
4794 ret
= ANEG_TIMER_ENAB
;
4795 ap
->state
= ANEG_STATE_RESTART
;
4798 case ANEG_STATE_RESTART
:
4799 delta
= ap
->cur_time
- ap
->link_time
;
4800 if (delta
> ANEG_STATE_SETTLE_TIME
)
4801 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4803 ret
= ANEG_TIMER_ENAB
;
4806 case ANEG_STATE_DISABLE_LINK_OK
:
4810 case ANEG_STATE_ABILITY_DETECT_INIT
:
4811 ap
->flags
&= ~(MR_TOGGLE_TX
);
4812 ap
->txconfig
= ANEG_CFG_FD
;
4813 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4814 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4815 ap
->txconfig
|= ANEG_CFG_PS1
;
4816 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4817 ap
->txconfig
|= ANEG_CFG_PS2
;
4818 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4819 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4820 tw32_f(MAC_MODE
, tp
->mac_mode
);
4823 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4826 case ANEG_STATE_ABILITY_DETECT
:
4827 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4828 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4831 case ANEG_STATE_ACK_DETECT_INIT
:
4832 ap
->txconfig
|= ANEG_CFG_ACK
;
4833 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4834 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4835 tw32_f(MAC_MODE
, tp
->mac_mode
);
4838 ap
->state
= ANEG_STATE_ACK_DETECT
;
4841 case ANEG_STATE_ACK_DETECT
:
4842 if (ap
->ack_match
!= 0) {
4843 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4844 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4845 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4847 ap
->state
= ANEG_STATE_AN_ENABLE
;
4849 } else if (ap
->ability_match
!= 0 &&
4850 ap
->rxconfig
== 0) {
4851 ap
->state
= ANEG_STATE_AN_ENABLE
;
4855 case ANEG_STATE_COMPLETE_ACK_INIT
:
4856 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4860 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4861 MR_LP_ADV_HALF_DUPLEX
|
4862 MR_LP_ADV_SYM_PAUSE
|
4863 MR_LP_ADV_ASYM_PAUSE
|
4864 MR_LP_ADV_REMOTE_FAULT1
|
4865 MR_LP_ADV_REMOTE_FAULT2
|
4866 MR_LP_ADV_NEXT_PAGE
|
4869 if (ap
->rxconfig
& ANEG_CFG_FD
)
4870 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4871 if (ap
->rxconfig
& ANEG_CFG_HD
)
4872 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4873 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4874 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4875 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4876 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4877 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4878 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4879 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4880 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4881 if (ap
->rxconfig
& ANEG_CFG_NP
)
4882 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4884 ap
->link_time
= ap
->cur_time
;
4886 ap
->flags
^= (MR_TOGGLE_TX
);
4887 if (ap
->rxconfig
& 0x0008)
4888 ap
->flags
|= MR_TOGGLE_RX
;
4889 if (ap
->rxconfig
& ANEG_CFG_NP
)
4890 ap
->flags
|= MR_NP_RX
;
4891 ap
->flags
|= MR_PAGE_RX
;
4893 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4894 ret
= ANEG_TIMER_ENAB
;
4897 case ANEG_STATE_COMPLETE_ACK
:
4898 if (ap
->ability_match
!= 0 &&
4899 ap
->rxconfig
== 0) {
4900 ap
->state
= ANEG_STATE_AN_ENABLE
;
4903 delta
= ap
->cur_time
- ap
->link_time
;
4904 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4905 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4906 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4908 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4909 !(ap
->flags
& MR_NP_RX
)) {
4910 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4918 case ANEG_STATE_IDLE_DETECT_INIT
:
4919 ap
->link_time
= ap
->cur_time
;
4920 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4921 tw32_f(MAC_MODE
, tp
->mac_mode
);
4924 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4925 ret
= ANEG_TIMER_ENAB
;
4928 case ANEG_STATE_IDLE_DETECT
:
4929 if (ap
->ability_match
!= 0 &&
4930 ap
->rxconfig
== 0) {
4931 ap
->state
= ANEG_STATE_AN_ENABLE
;
4934 delta
= ap
->cur_time
- ap
->link_time
;
4935 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4936 /* XXX another gem from the Broadcom driver :( */
4937 ap
->state
= ANEG_STATE_LINK_OK
;
4941 case ANEG_STATE_LINK_OK
:
4942 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4946 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4947 /* ??? unimplemented */
4950 case ANEG_STATE_NEXT_PAGE_WAIT
:
4951 /* ??? unimplemented */
4962 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4965 struct tg3_fiber_aneginfo aninfo
;
4966 int status
= ANEG_FAILED
;
4970 tw32_f(MAC_TX_AUTO_NEG
, 0);
4972 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4973 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4976 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4979 memset(&aninfo
, 0, sizeof(aninfo
));
4980 aninfo
.flags
|= MR_AN_ENABLE
;
4981 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4982 aninfo
.cur_time
= 0;
4984 while (++tick
< 195000) {
4985 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4986 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4992 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4993 tw32_f(MAC_MODE
, tp
->mac_mode
);
4996 *txflags
= aninfo
.txconfig
;
4997 *rxflags
= aninfo
.flags
;
4999 if (status
== ANEG_DONE
&&
5000 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5001 MR_LP_ADV_FULL_DUPLEX
)))
5007 static void tg3_init_bcm8002(struct tg3
*tp
)
5009 u32 mac_status
= tr32(MAC_STATUS
);
5012 /* Reset when initting first time or we have a link. */
5013 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5014 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5017 /* Set PLL lock range. */
5018 tg3_writephy(tp
, 0x16, 0x8007);
5021 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5023 /* Wait for reset to complete. */
5024 /* XXX schedule_timeout() ... */
5025 for (i
= 0; i
< 500; i
++)
5028 /* Config mode; select PMA/Ch 1 regs. */
5029 tg3_writephy(tp
, 0x10, 0x8411);
5031 /* Enable auto-lock and comdet, select txclk for tx. */
5032 tg3_writephy(tp
, 0x11, 0x0a10);
5034 tg3_writephy(tp
, 0x18, 0x00a0);
5035 tg3_writephy(tp
, 0x16, 0x41ff);
5037 /* Assert and deassert POR. */
5038 tg3_writephy(tp
, 0x13, 0x0400);
5040 tg3_writephy(tp
, 0x13, 0x0000);
5042 tg3_writephy(tp
, 0x11, 0x0a50);
5044 tg3_writephy(tp
, 0x11, 0x0a10);
5046 /* Wait for signal to stabilize */
5047 /* XXX schedule_timeout() ... */
5048 for (i
= 0; i
< 15000; i
++)
5051 /* Deselect the channel register so we can read the PHYID
5054 tg3_writephy(tp
, 0x10, 0x8011);
5057 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5060 u32 sg_dig_ctrl
, sg_dig_status
;
5061 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5062 int workaround
, port_a
;
5063 int current_link_up
;
5066 expected_sg_dig_ctrl
= 0;
5069 current_link_up
= 0;
5071 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5072 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5074 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5077 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5078 /* preserve bits 20-23 for voltage regulator */
5079 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5082 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5084 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5085 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5087 u32 val
= serdes_cfg
;
5093 tw32_f(MAC_SERDES_CFG
, val
);
5096 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5098 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5099 tg3_setup_flow_control(tp
, 0, 0);
5100 current_link_up
= 1;
5105 /* Want auto-negotiation. */
5106 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5108 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5109 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5110 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5111 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5112 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5114 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5115 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5116 tp
->serdes_counter
&&
5117 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5118 MAC_STATUS_RCVD_CFG
)) ==
5119 MAC_STATUS_PCS_SYNCED
)) {
5120 tp
->serdes_counter
--;
5121 current_link_up
= 1;
5126 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5127 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5129 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5131 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5132 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5133 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5134 MAC_STATUS_SIGNAL_DET
)) {
5135 sg_dig_status
= tr32(SG_DIG_STATUS
);
5136 mac_status
= tr32(MAC_STATUS
);
5138 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5139 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5140 u32 local_adv
= 0, remote_adv
= 0;
5142 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5143 local_adv
|= ADVERTISE_1000XPAUSE
;
5144 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5145 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5147 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5148 remote_adv
|= LPA_1000XPAUSE
;
5149 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5150 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5152 tp
->link_config
.rmt_adv
=
5153 mii_adv_to_ethtool_adv_x(remote_adv
);
5155 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5156 current_link_up
= 1;
5157 tp
->serdes_counter
= 0;
5158 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5159 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5160 if (tp
->serdes_counter
)
5161 tp
->serdes_counter
--;
5164 u32 val
= serdes_cfg
;
5171 tw32_f(MAC_SERDES_CFG
, val
);
5174 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5177 /* Link parallel detection - link is up */
5178 /* only if we have PCS_SYNC and not */
5179 /* receiving config code words */
5180 mac_status
= tr32(MAC_STATUS
);
5181 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5182 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5183 tg3_setup_flow_control(tp
, 0, 0);
5184 current_link_up
= 1;
5186 TG3_PHYFLG_PARALLEL_DETECT
;
5187 tp
->serdes_counter
=
5188 SERDES_PARALLEL_DET_TIMEOUT
;
5190 goto restart_autoneg
;
5194 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5195 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5199 return current_link_up
;
5202 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5204 int current_link_up
= 0;
5206 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5209 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5210 u32 txflags
, rxflags
;
5213 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5214 u32 local_adv
= 0, remote_adv
= 0;
5216 if (txflags
& ANEG_CFG_PS1
)
5217 local_adv
|= ADVERTISE_1000XPAUSE
;
5218 if (txflags
& ANEG_CFG_PS2
)
5219 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5221 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5222 remote_adv
|= LPA_1000XPAUSE
;
5223 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5224 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5226 tp
->link_config
.rmt_adv
=
5227 mii_adv_to_ethtool_adv_x(remote_adv
);
5229 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5231 current_link_up
= 1;
5233 for (i
= 0; i
< 30; i
++) {
5236 (MAC_STATUS_SYNC_CHANGED
|
5237 MAC_STATUS_CFG_CHANGED
));
5239 if ((tr32(MAC_STATUS
) &
5240 (MAC_STATUS_SYNC_CHANGED
|
5241 MAC_STATUS_CFG_CHANGED
)) == 0)
5245 mac_status
= tr32(MAC_STATUS
);
5246 if (current_link_up
== 0 &&
5247 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5248 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5249 current_link_up
= 1;
5251 tg3_setup_flow_control(tp
, 0, 0);
5253 /* Forcing 1000FD link up. */
5254 current_link_up
= 1;
5256 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5259 tw32_f(MAC_MODE
, tp
->mac_mode
);
5264 return current_link_up
;
5267 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5270 u16 orig_active_speed
;
5271 u8 orig_active_duplex
;
5273 int current_link_up
;
5276 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5277 orig_active_speed
= tp
->link_config
.active_speed
;
5278 orig_active_duplex
= tp
->link_config
.active_duplex
;
5280 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5282 tg3_flag(tp
, INIT_COMPLETE
)) {
5283 mac_status
= tr32(MAC_STATUS
);
5284 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5285 MAC_STATUS_SIGNAL_DET
|
5286 MAC_STATUS_CFG_CHANGED
|
5287 MAC_STATUS_RCVD_CFG
);
5288 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5289 MAC_STATUS_SIGNAL_DET
)) {
5290 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5291 MAC_STATUS_CFG_CHANGED
));
5296 tw32_f(MAC_TX_AUTO_NEG
, 0);
5298 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5299 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5300 tw32_f(MAC_MODE
, tp
->mac_mode
);
5303 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5304 tg3_init_bcm8002(tp
);
5306 /* Enable link change event even when serdes polling. */
5307 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5310 current_link_up
= 0;
5311 tp
->link_config
.rmt_adv
= 0;
5312 mac_status
= tr32(MAC_STATUS
);
5314 if (tg3_flag(tp
, HW_AUTONEG
))
5315 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5317 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5319 tp
->napi
[0].hw_status
->status
=
5320 (SD_STATUS_UPDATED
|
5321 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5323 for (i
= 0; i
< 100; i
++) {
5324 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5325 MAC_STATUS_CFG_CHANGED
));
5327 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5328 MAC_STATUS_CFG_CHANGED
|
5329 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5333 mac_status
= tr32(MAC_STATUS
);
5334 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5335 current_link_up
= 0;
5336 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5337 tp
->serdes_counter
== 0) {
5338 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5339 MAC_MODE_SEND_CONFIGS
));
5341 tw32_f(MAC_MODE
, tp
->mac_mode
);
5345 if (current_link_up
== 1) {
5346 tp
->link_config
.active_speed
= SPEED_1000
;
5347 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5348 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5349 LED_CTRL_LNKLED_OVERRIDE
|
5350 LED_CTRL_1000MBPS_ON
));
5352 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5353 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5354 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5355 LED_CTRL_LNKLED_OVERRIDE
|
5356 LED_CTRL_TRAFFIC_OVERRIDE
));
5359 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5360 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5361 if (orig_pause_cfg
!= now_pause_cfg
||
5362 orig_active_speed
!= tp
->link_config
.active_speed
||
5363 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5364 tg3_link_report(tp
);
5370 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5372 int current_link_up
, err
= 0;
5376 u32 local_adv
, remote_adv
;
5378 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5379 tw32_f(MAC_MODE
, tp
->mac_mode
);
5385 (MAC_STATUS_SYNC_CHANGED
|
5386 MAC_STATUS_CFG_CHANGED
|
5387 MAC_STATUS_MI_COMPLETION
|
5388 MAC_STATUS_LNKSTATE_CHANGED
));
5394 current_link_up
= 0;
5395 current_speed
= SPEED_UNKNOWN
;
5396 current_duplex
= DUPLEX_UNKNOWN
;
5397 tp
->link_config
.rmt_adv
= 0;
5399 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5400 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5401 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5402 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5403 bmsr
|= BMSR_LSTATUS
;
5405 bmsr
&= ~BMSR_LSTATUS
;
5408 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5410 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5411 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5412 /* do nothing, just check for link up at the end */
5413 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5416 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5417 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5418 ADVERTISE_1000XPAUSE
|
5419 ADVERTISE_1000XPSE_ASYM
|
5422 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5423 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5425 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5426 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5427 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5428 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5430 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5431 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5432 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5439 bmcr
&= ~BMCR_SPEED1000
;
5440 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5442 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5443 new_bmcr
|= BMCR_FULLDPLX
;
5445 if (new_bmcr
!= bmcr
) {
5446 /* BMCR_SPEED1000 is a reserved bit that needs
5447 * to be set on write.
5449 new_bmcr
|= BMCR_SPEED1000
;
5451 /* Force a linkdown */
5455 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5456 adv
&= ~(ADVERTISE_1000XFULL
|
5457 ADVERTISE_1000XHALF
|
5459 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5460 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5464 tg3_carrier_off(tp
);
5466 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5468 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5469 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5470 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5471 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5472 bmsr
|= BMSR_LSTATUS
;
5474 bmsr
&= ~BMSR_LSTATUS
;
5476 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5480 if (bmsr
& BMSR_LSTATUS
) {
5481 current_speed
= SPEED_1000
;
5482 current_link_up
= 1;
5483 if (bmcr
& BMCR_FULLDPLX
)
5484 current_duplex
= DUPLEX_FULL
;
5486 current_duplex
= DUPLEX_HALF
;
5491 if (bmcr
& BMCR_ANENABLE
) {
5494 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5495 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5496 common
= local_adv
& remote_adv
;
5497 if (common
& (ADVERTISE_1000XHALF
|
5498 ADVERTISE_1000XFULL
)) {
5499 if (common
& ADVERTISE_1000XFULL
)
5500 current_duplex
= DUPLEX_FULL
;
5502 current_duplex
= DUPLEX_HALF
;
5504 tp
->link_config
.rmt_adv
=
5505 mii_adv_to_ethtool_adv_x(remote_adv
);
5506 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5507 /* Link is up via parallel detect */
5509 current_link_up
= 0;
5514 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5515 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5517 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5518 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5519 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5521 tw32_f(MAC_MODE
, tp
->mac_mode
);
5524 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5526 tp
->link_config
.active_speed
= current_speed
;
5527 tp
->link_config
.active_duplex
= current_duplex
;
5529 tg3_test_and_report_link_chg(tp
, current_link_up
);
5533 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5535 if (tp
->serdes_counter
) {
5536 /* Give autoneg time to complete. */
5537 tp
->serdes_counter
--;
5542 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5545 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5546 if (bmcr
& BMCR_ANENABLE
) {
5549 /* Select shadow register 0x1f */
5550 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5551 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5553 /* Select expansion interrupt status register */
5554 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5555 MII_TG3_DSP_EXP1_INT_STAT
);
5556 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5557 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5559 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5560 /* We have signal detect and not receiving
5561 * config code words, link is up by parallel
5565 bmcr
&= ~BMCR_ANENABLE
;
5566 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5567 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5568 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5571 } else if (tp
->link_up
&&
5572 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5573 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5576 /* Select expansion interrupt status register */
5577 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5578 MII_TG3_DSP_EXP1_INT_STAT
);
5579 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5583 /* Config code words received, turn on autoneg. */
5584 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5585 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5587 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5593 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5598 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5599 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5600 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5601 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5603 err
= tg3_setup_copper_phy(tp
, force_reset
);
5605 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5608 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5609 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5611 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5616 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5617 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5618 tw32(GRC_MISC_CFG
, val
);
5621 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5622 (6 << TX_LENGTHS_IPG_SHIFT
);
5623 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5624 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5625 val
|= tr32(MAC_TX_LENGTHS
) &
5626 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5627 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5629 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5630 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5631 tw32(MAC_TX_LENGTHS
, val
|
5632 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5634 tw32(MAC_TX_LENGTHS
, val
|
5635 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5637 if (!tg3_flag(tp
, 5705_PLUS
)) {
5639 tw32(HOSTCC_STAT_COAL_TICKS
,
5640 tp
->coal
.stats_block_coalesce_usecs
);
5642 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5646 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5647 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5649 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5652 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5653 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5659 /* tp->lock must be held */
5660 static u64
tg3_refclk_read(struct tg3
*tp
)
5662 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
5663 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
5666 /* tp->lock must be held */
5667 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
5669 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
5670 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
5671 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
5672 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
5675 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
5676 static inline void tg3_full_unlock(struct tg3
*tp
);
5677 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
5679 struct tg3
*tp
= netdev_priv(dev
);
5681 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
5682 SOF_TIMESTAMPING_RX_SOFTWARE
|
5683 SOF_TIMESTAMPING_SOFTWARE
|
5684 SOF_TIMESTAMPING_TX_HARDWARE
|
5685 SOF_TIMESTAMPING_RX_HARDWARE
|
5686 SOF_TIMESTAMPING_RAW_HARDWARE
;
5689 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
5691 info
->phc_index
= -1;
5693 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5695 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5696 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
5697 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5698 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5702 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
5704 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5705 bool neg_adj
= false;
5713 /* Frequency adjustment is performed using hardware with a 24 bit
5714 * accumulator and a programmable correction value. On each clk, the
5715 * correction value gets added to the accumulator and when it
5716 * overflows, the time counter is incremented/decremented.
5718 * So conversion from ppb to correction value is
5719 * ppb * (1 << 24) / 1000000000
5721 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
5722 TG3_EAV_REF_CLK_CORRECT_MASK
;
5724 tg3_full_lock(tp
, 0);
5727 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
5728 TG3_EAV_REF_CLK_CORRECT_EN
|
5729 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
5731 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
5733 tg3_full_unlock(tp
);
5738 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
5740 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5742 tg3_full_lock(tp
, 0);
5743 tp
->ptp_adjust
+= delta
;
5744 tg3_full_unlock(tp
);
5749 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
5753 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5755 tg3_full_lock(tp
, 0);
5756 ns
= tg3_refclk_read(tp
);
5757 ns
+= tp
->ptp_adjust
;
5758 tg3_full_unlock(tp
);
5760 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
5761 ts
->tv_nsec
= remainder
;
5766 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
5767 const struct timespec
*ts
)
5770 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5772 ns
= timespec_to_ns(ts
);
5774 tg3_full_lock(tp
, 0);
5775 tg3_refclk_write(tp
, ns
);
5777 tg3_full_unlock(tp
);
5782 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
5783 struct ptp_clock_request
*rq
, int on
)
5788 static const struct ptp_clock_info tg3_ptp_caps
= {
5789 .owner
= THIS_MODULE
,
5790 .name
= "tg3 clock",
5791 .max_adj
= 250000000,
5796 .adjfreq
= tg3_ptp_adjfreq
,
5797 .adjtime
= tg3_ptp_adjtime
,
5798 .gettime
= tg3_ptp_gettime
,
5799 .settime
= tg3_ptp_settime
,
5800 .enable
= tg3_ptp_enable
,
5803 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
5804 struct skb_shared_hwtstamps
*timestamp
)
5806 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
5807 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
5811 /* tp->lock must be held */
5812 static void tg3_ptp_init(struct tg3
*tp
)
5814 if (!tg3_flag(tp
, PTP_CAPABLE
))
5817 /* Initialize the hardware clock to the system time. */
5818 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
5820 tp
->ptp_info
= tg3_ptp_caps
;
5823 /* tp->lock must be held */
5824 static void tg3_ptp_resume(struct tg3
*tp
)
5826 if (!tg3_flag(tp
, PTP_CAPABLE
))
5829 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
5833 static void tg3_ptp_fini(struct tg3
*tp
)
5835 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
5838 ptp_clock_unregister(tp
->ptp_clock
);
5839 tp
->ptp_clock
= NULL
;
5843 static inline int tg3_irq_sync(struct tg3
*tp
)
5845 return tp
->irq_sync
;
5848 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5852 dst
= (u32
*)((u8
*)dst
+ off
);
5853 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5854 *dst
++ = tr32(off
+ i
);
5857 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5859 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5860 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5861 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5862 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5863 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5864 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5865 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5866 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5867 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5868 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5869 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5870 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5871 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5872 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5873 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5874 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5875 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5876 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5877 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5879 if (tg3_flag(tp
, SUPPORT_MSIX
))
5880 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5882 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5883 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5884 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5885 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5886 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5887 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5888 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5889 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5891 if (!tg3_flag(tp
, 5705_PLUS
)) {
5892 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5893 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5894 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5897 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5898 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5899 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5900 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5901 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5903 if (tg3_flag(tp
, NVRAM
))
5904 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5907 static void tg3_dump_state(struct tg3
*tp
)
5912 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5916 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5917 /* Read up to but not including private PCI registers */
5918 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5919 regs
[i
/ sizeof(u32
)] = tr32(i
);
5921 tg3_dump_legacy_regs(tp
, regs
);
5923 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5924 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5925 !regs
[i
+ 2] && !regs
[i
+ 3])
5928 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5930 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5935 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5936 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5938 /* SW status block */
5940 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5942 tnapi
->hw_status
->status
,
5943 tnapi
->hw_status
->status_tag
,
5944 tnapi
->hw_status
->rx_jumbo_consumer
,
5945 tnapi
->hw_status
->rx_consumer
,
5946 tnapi
->hw_status
->rx_mini_consumer
,
5947 tnapi
->hw_status
->idx
[0].rx_producer
,
5948 tnapi
->hw_status
->idx
[0].tx_consumer
);
5951 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5953 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5954 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5956 tnapi
->prodring
.rx_std_prod_idx
,
5957 tnapi
->prodring
.rx_std_cons_idx
,
5958 tnapi
->prodring
.rx_jmb_prod_idx
,
5959 tnapi
->prodring
.rx_jmb_cons_idx
);
5963 /* This is called whenever we suspect that the system chipset is re-
5964 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5965 * is bogus tx completions. We try to recover by setting the
5966 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5969 static void tg3_tx_recover(struct tg3
*tp
)
5971 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5972 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5974 netdev_warn(tp
->dev
,
5975 "The system may be re-ordering memory-mapped I/O "
5976 "cycles to the network device, attempting to recover. "
5977 "Please report the problem to the driver maintainer "
5978 "and include system chipset information.\n");
5980 spin_lock(&tp
->lock
);
5981 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5982 spin_unlock(&tp
->lock
);
5985 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5987 /* Tell compiler to fetch tx indices from memory. */
5989 return tnapi
->tx_pending
-
5990 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5993 /* Tigon3 never reports partial packet sends. So we do not
5994 * need special logic to handle SKBs that have not had all
5995 * of their frags sent yet, like SunGEM does.
5997 static void tg3_tx(struct tg3_napi
*tnapi
)
5999 struct tg3
*tp
= tnapi
->tp
;
6000 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6001 u32 sw_idx
= tnapi
->tx_cons
;
6002 struct netdev_queue
*txq
;
6003 int index
= tnapi
- tp
->napi
;
6004 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6006 if (tg3_flag(tp
, ENABLE_TSS
))
6009 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6011 while (sw_idx
!= hw_idx
) {
6012 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6013 struct sk_buff
*skb
= ri
->skb
;
6016 if (unlikely(skb
== NULL
)) {
6021 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6022 struct skb_shared_hwtstamps timestamp
;
6023 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6024 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6026 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6028 skb_tstamp_tx(skb
, ×tamp
);
6031 pci_unmap_single(tp
->pdev
,
6032 dma_unmap_addr(ri
, mapping
),
6038 while (ri
->fragmented
) {
6039 ri
->fragmented
= false;
6040 sw_idx
= NEXT_TX(sw_idx
);
6041 ri
= &tnapi
->tx_buffers
[sw_idx
];
6044 sw_idx
= NEXT_TX(sw_idx
);
6046 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6047 ri
= &tnapi
->tx_buffers
[sw_idx
];
6048 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6051 pci_unmap_page(tp
->pdev
,
6052 dma_unmap_addr(ri
, mapping
),
6053 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6056 while (ri
->fragmented
) {
6057 ri
->fragmented
= false;
6058 sw_idx
= NEXT_TX(sw_idx
);
6059 ri
= &tnapi
->tx_buffers
[sw_idx
];
6062 sw_idx
= NEXT_TX(sw_idx
);
6066 bytes_compl
+= skb
->len
;
6070 if (unlikely(tx_bug
)) {
6076 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6078 tnapi
->tx_cons
= sw_idx
;
6080 /* Need to make the tx_cons update visible to tg3_start_xmit()
6081 * before checking for netif_queue_stopped(). Without the
6082 * memory barrier, there is a small possibility that tg3_start_xmit()
6083 * will miss it and cause the queue to be stopped forever.
6087 if (unlikely(netif_tx_queue_stopped(txq
) &&
6088 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6089 __netif_tx_lock(txq
, smp_processor_id());
6090 if (netif_tx_queue_stopped(txq
) &&
6091 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6092 netif_tx_wake_queue(txq
);
6093 __netif_tx_unlock(txq
);
6097 static void tg3_frag_free(bool is_frag
, void *data
)
6100 put_page(virt_to_head_page(data
));
6105 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6107 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6113 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6114 map_sz
, PCI_DMA_FROMDEVICE
);
6115 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6120 /* Returns size of skb allocated or < 0 on error.
6122 * We only need to fill in the address because the other members
6123 * of the RX descriptor are invariant, see tg3_init_rings.
6125 * Note the purposeful assymetry of cpu vs. chip accesses. For
6126 * posting buffers we only dirty the first cache line of the RX
6127 * descriptor (containing the address). Whereas for the RX status
6128 * buffers the cpu only reads the last cacheline of the RX descriptor
6129 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6131 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6132 u32 opaque_key
, u32 dest_idx_unmasked
,
6133 unsigned int *frag_size
)
6135 struct tg3_rx_buffer_desc
*desc
;
6136 struct ring_info
*map
;
6139 int skb_size
, data_size
, dest_idx
;
6141 switch (opaque_key
) {
6142 case RXD_OPAQUE_RING_STD
:
6143 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6144 desc
= &tpr
->rx_std
[dest_idx
];
6145 map
= &tpr
->rx_std_buffers
[dest_idx
];
6146 data_size
= tp
->rx_pkt_map_sz
;
6149 case RXD_OPAQUE_RING_JUMBO
:
6150 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6151 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6152 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6153 data_size
= TG3_RX_JMB_MAP_SZ
;
6160 /* Do not overwrite any of the map or rp information
6161 * until we are sure we can commit to a new buffer.
6163 * Callers depend upon this behavior and assume that
6164 * we leave everything unchanged if we fail.
6166 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6167 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6168 if (skb_size
<= PAGE_SIZE
) {
6169 data
= netdev_alloc_frag(skb_size
);
6170 *frag_size
= skb_size
;
6172 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6178 mapping
= pci_map_single(tp
->pdev
,
6179 data
+ TG3_RX_OFFSET(tp
),
6181 PCI_DMA_FROMDEVICE
);
6182 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6183 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6188 dma_unmap_addr_set(map
, mapping
, mapping
);
6190 desc
->addr_hi
= ((u64
)mapping
>> 32);
6191 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6196 /* We only need to move over in the address because the other
6197 * members of the RX descriptor are invariant. See notes above
6198 * tg3_alloc_rx_data for full details.
6200 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6201 struct tg3_rx_prodring_set
*dpr
,
6202 u32 opaque_key
, int src_idx
,
6203 u32 dest_idx_unmasked
)
6205 struct tg3
*tp
= tnapi
->tp
;
6206 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6207 struct ring_info
*src_map
, *dest_map
;
6208 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6211 switch (opaque_key
) {
6212 case RXD_OPAQUE_RING_STD
:
6213 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6214 dest_desc
= &dpr
->rx_std
[dest_idx
];
6215 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6216 src_desc
= &spr
->rx_std
[src_idx
];
6217 src_map
= &spr
->rx_std_buffers
[src_idx
];
6220 case RXD_OPAQUE_RING_JUMBO
:
6221 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6222 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6223 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6224 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6225 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6232 dest_map
->data
= src_map
->data
;
6233 dma_unmap_addr_set(dest_map
, mapping
,
6234 dma_unmap_addr(src_map
, mapping
));
6235 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6236 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6238 /* Ensure that the update to the skb happens after the physical
6239 * addresses have been transferred to the new BD location.
6243 src_map
->data
= NULL
;
6246 /* The RX ring scheme is composed of multiple rings which post fresh
6247 * buffers to the chip, and one special ring the chip uses to report
6248 * status back to the host.
6250 * The special ring reports the status of received packets to the
6251 * host. The chip does not write into the original descriptor the
6252 * RX buffer was obtained from. The chip simply takes the original
6253 * descriptor as provided by the host, updates the status and length
6254 * field, then writes this into the next status ring entry.
6256 * Each ring the host uses to post buffers to the chip is described
6257 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6258 * it is first placed into the on-chip ram. When the packet's length
6259 * is known, it walks down the TG3_BDINFO entries to select the ring.
6260 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6261 * which is within the range of the new packet's length is chosen.
6263 * The "separate ring for rx status" scheme may sound queer, but it makes
6264 * sense from a cache coherency perspective. If only the host writes
6265 * to the buffer post rings, and only the chip writes to the rx status
6266 * rings, then cache lines never move beyond shared-modified state.
6267 * If both the host and chip were to write into the same ring, cache line
6268 * eviction could occur since both entities want it in an exclusive state.
6270 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6272 struct tg3
*tp
= tnapi
->tp
;
6273 u32 work_mask
, rx_std_posted
= 0;
6274 u32 std_prod_idx
, jmb_prod_idx
;
6275 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6278 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6280 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6282 * We need to order the read of hw_idx and the read of
6283 * the opaque cookie.
6288 std_prod_idx
= tpr
->rx_std_prod_idx
;
6289 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6290 while (sw_idx
!= hw_idx
&& budget
> 0) {
6291 struct ring_info
*ri
;
6292 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6294 struct sk_buff
*skb
;
6295 dma_addr_t dma_addr
;
6296 u32 opaque_key
, desc_idx
, *post_ptr
;
6300 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6301 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6302 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6303 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6304 dma_addr
= dma_unmap_addr(ri
, mapping
);
6306 post_ptr
= &std_prod_idx
;
6308 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6309 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6310 dma_addr
= dma_unmap_addr(ri
, mapping
);
6312 post_ptr
= &jmb_prod_idx
;
6314 goto next_pkt_nopost
;
6316 work_mask
|= opaque_key
;
6318 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6319 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6321 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6322 desc_idx
, *post_ptr
);
6324 /* Other statistics kept track of by card. */
6329 prefetch(data
+ TG3_RX_OFFSET(tp
));
6330 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6333 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6334 RXD_FLAG_PTPSTAT_PTPV1
||
6335 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6336 RXD_FLAG_PTPSTAT_PTPV2
) {
6337 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6338 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6341 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6343 unsigned int frag_size
;
6345 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6346 *post_ptr
, &frag_size
);
6350 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6351 PCI_DMA_FROMDEVICE
);
6353 skb
= build_skb(data
, frag_size
);
6355 tg3_frag_free(frag_size
!= 0, data
);
6356 goto drop_it_no_recycle
;
6358 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6359 /* Ensure that the update to the data happens
6360 * after the usage of the old DMA mapping.
6367 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6368 desc_idx
, *post_ptr
);
6370 skb
= netdev_alloc_skb(tp
->dev
,
6371 len
+ TG3_RAW_IP_ALIGN
);
6373 goto drop_it_no_recycle
;
6375 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6376 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6378 data
+ TG3_RX_OFFSET(tp
),
6380 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6385 tg3_hwclock_to_timestamp(tp
, tstamp
,
6386 skb_hwtstamps(skb
));
6388 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6389 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6390 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6391 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6392 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6394 skb_checksum_none_assert(skb
);
6396 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6398 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6399 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6401 goto drop_it_no_recycle
;
6404 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6405 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6406 __vlan_hwaccel_put_tag(skb
,
6407 desc
->err_vlan
& RXD_VLAN_MASK
);
6409 napi_gro_receive(&tnapi
->napi
, skb
);
6417 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6418 tpr
->rx_std_prod_idx
= std_prod_idx
&
6419 tp
->rx_std_ring_mask
;
6420 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6421 tpr
->rx_std_prod_idx
);
6422 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6427 sw_idx
&= tp
->rx_ret_ring_mask
;
6429 /* Refresh hw_idx to see if there is new work */
6430 if (sw_idx
== hw_idx
) {
6431 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6436 /* ACK the status ring. */
6437 tnapi
->rx_rcb_ptr
= sw_idx
;
6438 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6440 /* Refill RX ring(s). */
6441 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6442 /* Sync BD data before updating mailbox */
6445 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6446 tpr
->rx_std_prod_idx
= std_prod_idx
&
6447 tp
->rx_std_ring_mask
;
6448 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6449 tpr
->rx_std_prod_idx
);
6451 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6452 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6453 tp
->rx_jmb_ring_mask
;
6454 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6455 tpr
->rx_jmb_prod_idx
);
6458 } else if (work_mask
) {
6459 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6460 * updated before the producer indices can be updated.
6464 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6465 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6467 if (tnapi
!= &tp
->napi
[1]) {
6468 tp
->rx_refill
= true;
6469 napi_schedule(&tp
->napi
[1].napi
);
6476 static void tg3_poll_link(struct tg3
*tp
)
6478 /* handle link change and other phy events */
6479 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6480 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6482 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6483 sblk
->status
= SD_STATUS_UPDATED
|
6484 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6485 spin_lock(&tp
->lock
);
6486 if (tg3_flag(tp
, USE_PHYLIB
)) {
6488 (MAC_STATUS_SYNC_CHANGED
|
6489 MAC_STATUS_CFG_CHANGED
|
6490 MAC_STATUS_MI_COMPLETION
|
6491 MAC_STATUS_LNKSTATE_CHANGED
));
6494 tg3_setup_phy(tp
, 0);
6495 spin_unlock(&tp
->lock
);
6500 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6501 struct tg3_rx_prodring_set
*dpr
,
6502 struct tg3_rx_prodring_set
*spr
)
6504 u32 si
, di
, cpycnt
, src_prod_idx
;
6508 src_prod_idx
= spr
->rx_std_prod_idx
;
6510 /* Make sure updates to the rx_std_buffers[] entries and the
6511 * standard producer index are seen in the correct order.
6515 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6518 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6519 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6521 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6522 spr
->rx_std_cons_idx
;
6524 cpycnt
= min(cpycnt
,
6525 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6527 si
= spr
->rx_std_cons_idx
;
6528 di
= dpr
->rx_std_prod_idx
;
6530 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6531 if (dpr
->rx_std_buffers
[i
].data
) {
6541 /* Ensure that updates to the rx_std_buffers ring and the
6542 * shadowed hardware producer ring from tg3_recycle_skb() are
6543 * ordered correctly WRT the skb check above.
6547 memcpy(&dpr
->rx_std_buffers
[di
],
6548 &spr
->rx_std_buffers
[si
],
6549 cpycnt
* sizeof(struct ring_info
));
6551 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6552 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6553 sbd
= &spr
->rx_std
[si
];
6554 dbd
= &dpr
->rx_std
[di
];
6555 dbd
->addr_hi
= sbd
->addr_hi
;
6556 dbd
->addr_lo
= sbd
->addr_lo
;
6559 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6560 tp
->rx_std_ring_mask
;
6561 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6562 tp
->rx_std_ring_mask
;
6566 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6568 /* Make sure updates to the rx_jmb_buffers[] entries and
6569 * the jumbo producer index are seen in the correct order.
6573 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6576 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6577 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6579 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6580 spr
->rx_jmb_cons_idx
;
6582 cpycnt
= min(cpycnt
,
6583 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6585 si
= spr
->rx_jmb_cons_idx
;
6586 di
= dpr
->rx_jmb_prod_idx
;
6588 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6589 if (dpr
->rx_jmb_buffers
[i
].data
) {
6599 /* Ensure that updates to the rx_jmb_buffers ring and the
6600 * shadowed hardware producer ring from tg3_recycle_skb() are
6601 * ordered correctly WRT the skb check above.
6605 memcpy(&dpr
->rx_jmb_buffers
[di
],
6606 &spr
->rx_jmb_buffers
[si
],
6607 cpycnt
* sizeof(struct ring_info
));
6609 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6610 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6611 sbd
= &spr
->rx_jmb
[si
].std
;
6612 dbd
= &dpr
->rx_jmb
[di
].std
;
6613 dbd
->addr_hi
= sbd
->addr_hi
;
6614 dbd
->addr_lo
= sbd
->addr_lo
;
6617 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6618 tp
->rx_jmb_ring_mask
;
6619 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6620 tp
->rx_jmb_ring_mask
;
6626 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6628 struct tg3
*tp
= tnapi
->tp
;
6630 /* run TX completion thread */
6631 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6633 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6637 if (!tnapi
->rx_rcb_prod_idx
)
6640 /* run RX thread, within the bounds set by NAPI.
6641 * All RX "locking" is done by ensuring outside
6642 * code synchronizes with tg3->napi.poll()
6644 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6645 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6647 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6648 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6650 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6651 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6653 tp
->rx_refill
= false;
6654 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
6655 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6656 &tp
->napi
[i
].prodring
);
6660 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6661 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6662 dpr
->rx_std_prod_idx
);
6664 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6665 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6666 dpr
->rx_jmb_prod_idx
);
6671 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6677 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6679 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6680 schedule_work(&tp
->reset_task
);
6683 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6685 cancel_work_sync(&tp
->reset_task
);
6686 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6687 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6690 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6692 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6693 struct tg3
*tp
= tnapi
->tp
;
6695 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6698 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6700 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6703 if (unlikely(work_done
>= budget
))
6706 /* tp->last_tag is used in tg3_int_reenable() below
6707 * to tell the hw how much work has been processed,
6708 * so we must read it before checking for more work.
6710 tnapi
->last_tag
= sblk
->status_tag
;
6711 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6714 /* check for RX/TX work to do */
6715 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6716 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6718 /* This test here is not race free, but will reduce
6719 * the number of interrupts by looping again.
6721 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6724 napi_complete(napi
);
6725 /* Reenable interrupts. */
6726 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6728 /* This test here is synchronized by napi_schedule()
6729 * and napi_complete() to close the race condition.
6731 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6732 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6733 HOSTCC_MODE_ENABLE
|
6744 /* work_done is guaranteed to be less than budget. */
6745 napi_complete(napi
);
6746 tg3_reset_task_schedule(tp
);
6750 static void tg3_process_error(struct tg3
*tp
)
6753 bool real_error
= false;
6755 if (tg3_flag(tp
, ERROR_PROCESSED
))
6758 /* Check Flow Attention register */
6759 val
= tr32(HOSTCC_FLOW_ATTN
);
6760 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6761 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6765 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6766 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6770 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6771 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6780 tg3_flag_set(tp
, ERROR_PROCESSED
);
6781 tg3_reset_task_schedule(tp
);
6784 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6786 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6787 struct tg3
*tp
= tnapi
->tp
;
6789 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6792 if (sblk
->status
& SD_STATUS_ERROR
)
6793 tg3_process_error(tp
);
6797 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6799 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6802 if (unlikely(work_done
>= budget
))
6805 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6806 /* tp->last_tag is used in tg3_int_reenable() below
6807 * to tell the hw how much work has been processed,
6808 * so we must read it before checking for more work.
6810 tnapi
->last_tag
= sblk
->status_tag
;
6811 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6814 sblk
->status
&= ~SD_STATUS_UPDATED
;
6816 if (likely(!tg3_has_work(tnapi
))) {
6817 napi_complete(napi
);
6818 tg3_int_reenable(tnapi
);
6826 /* work_done is guaranteed to be less than budget. */
6827 napi_complete(napi
);
6828 tg3_reset_task_schedule(tp
);
6832 static void tg3_napi_disable(struct tg3
*tp
)
6836 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6837 napi_disable(&tp
->napi
[i
].napi
);
6840 static void tg3_napi_enable(struct tg3
*tp
)
6844 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6845 napi_enable(&tp
->napi
[i
].napi
);
6848 static void tg3_napi_init(struct tg3
*tp
)
6852 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6853 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6854 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6857 static void tg3_napi_fini(struct tg3
*tp
)
6861 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6862 netif_napi_del(&tp
->napi
[i
].napi
);
6865 static inline void tg3_netif_stop(struct tg3
*tp
)
6867 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6868 tg3_napi_disable(tp
);
6869 netif_carrier_off(tp
->dev
);
6870 netif_tx_disable(tp
->dev
);
6873 /* tp->lock must be held */
6874 static inline void tg3_netif_start(struct tg3
*tp
)
6878 /* NOTE: unconditional netif_tx_wake_all_queues is only
6879 * appropriate so long as all callers are assured to
6880 * have free tx slots (such as after tg3_init_hw)
6882 netif_tx_wake_all_queues(tp
->dev
);
6885 netif_carrier_on(tp
->dev
);
6887 tg3_napi_enable(tp
);
6888 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6889 tg3_enable_ints(tp
);
6892 static void tg3_irq_quiesce(struct tg3
*tp
)
6896 BUG_ON(tp
->irq_sync
);
6901 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6902 synchronize_irq(tp
->napi
[i
].irq_vec
);
6905 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6906 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6907 * with as well. Most of the time, this is not necessary except when
6908 * shutting down the device.
6910 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6912 spin_lock_bh(&tp
->lock
);
6914 tg3_irq_quiesce(tp
);
6917 static inline void tg3_full_unlock(struct tg3
*tp
)
6919 spin_unlock_bh(&tp
->lock
);
6922 /* One-shot MSI handler - Chip automatically disables interrupt
6923 * after sending MSI so driver doesn't have to do it.
6925 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6927 struct tg3_napi
*tnapi
= dev_id
;
6928 struct tg3
*tp
= tnapi
->tp
;
6930 prefetch(tnapi
->hw_status
);
6932 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6934 if (likely(!tg3_irq_sync(tp
)))
6935 napi_schedule(&tnapi
->napi
);
6940 /* MSI ISR - No need to check for interrupt sharing and no need to
6941 * flush status block and interrupt mailbox. PCI ordering rules
6942 * guarantee that MSI will arrive after the status block.
6944 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6946 struct tg3_napi
*tnapi
= dev_id
;
6947 struct tg3
*tp
= tnapi
->tp
;
6949 prefetch(tnapi
->hw_status
);
6951 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6953 * Writing any value to intr-mbox-0 clears PCI INTA# and
6954 * chip-internal interrupt pending events.
6955 * Writing non-zero to intr-mbox-0 additional tells the
6956 * NIC to stop sending us irqs, engaging "in-intr-handler"
6959 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6960 if (likely(!tg3_irq_sync(tp
)))
6961 napi_schedule(&tnapi
->napi
);
6963 return IRQ_RETVAL(1);
6966 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6968 struct tg3_napi
*tnapi
= dev_id
;
6969 struct tg3
*tp
= tnapi
->tp
;
6970 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6971 unsigned int handled
= 1;
6973 /* In INTx mode, it is possible for the interrupt to arrive at
6974 * the CPU before the status block posted prior to the interrupt.
6975 * Reading the PCI State register will confirm whether the
6976 * interrupt is ours and will flush the status block.
6978 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6979 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6980 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6987 * Writing any value to intr-mbox-0 clears PCI INTA# and
6988 * chip-internal interrupt pending events.
6989 * Writing non-zero to intr-mbox-0 additional tells the
6990 * NIC to stop sending us irqs, engaging "in-intr-handler"
6993 * Flush the mailbox to de-assert the IRQ immediately to prevent
6994 * spurious interrupts. The flush impacts performance but
6995 * excessive spurious interrupts can be worse in some cases.
6997 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6998 if (tg3_irq_sync(tp
))
7000 sblk
->status
&= ~SD_STATUS_UPDATED
;
7001 if (likely(tg3_has_work(tnapi
))) {
7002 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7003 napi_schedule(&tnapi
->napi
);
7005 /* No work, shared interrupt perhaps? re-enable
7006 * interrupts, and flush that PCI write
7008 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7012 return IRQ_RETVAL(handled
);
7015 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7017 struct tg3_napi
*tnapi
= dev_id
;
7018 struct tg3
*tp
= tnapi
->tp
;
7019 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7020 unsigned int handled
= 1;
7022 /* In INTx mode, it is possible for the interrupt to arrive at
7023 * the CPU before the status block posted prior to the interrupt.
7024 * Reading the PCI State register will confirm whether the
7025 * interrupt is ours and will flush the status block.
7027 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7028 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7029 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7036 * writing any value to intr-mbox-0 clears PCI INTA# and
7037 * chip-internal interrupt pending events.
7038 * writing non-zero to intr-mbox-0 additional tells the
7039 * NIC to stop sending us irqs, engaging "in-intr-handler"
7042 * Flush the mailbox to de-assert the IRQ immediately to prevent
7043 * spurious interrupts. The flush impacts performance but
7044 * excessive spurious interrupts can be worse in some cases.
7046 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7049 * In a shared interrupt configuration, sometimes other devices'
7050 * interrupts will scream. We record the current status tag here
7051 * so that the above check can report that the screaming interrupts
7052 * are unhandled. Eventually they will be silenced.
7054 tnapi
->last_irq_tag
= sblk
->status_tag
;
7056 if (tg3_irq_sync(tp
))
7059 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7061 napi_schedule(&tnapi
->napi
);
7064 return IRQ_RETVAL(handled
);
7067 /* ISR for interrupt test */
7068 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7070 struct tg3_napi
*tnapi
= dev_id
;
7071 struct tg3
*tp
= tnapi
->tp
;
7072 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7074 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7075 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7076 tg3_disable_ints(tp
);
7077 return IRQ_RETVAL(1);
7079 return IRQ_RETVAL(0);
7082 #ifdef CONFIG_NET_POLL_CONTROLLER
7083 static void tg3_poll_controller(struct net_device
*dev
)
7086 struct tg3
*tp
= netdev_priv(dev
);
7088 if (tg3_irq_sync(tp
))
7091 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7092 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7096 static void tg3_tx_timeout(struct net_device
*dev
)
7098 struct tg3
*tp
= netdev_priv(dev
);
7100 if (netif_msg_tx_err(tp
)) {
7101 netdev_err(dev
, "transmit timed out, resetting\n");
7105 tg3_reset_task_schedule(tp
);
7108 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7109 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7111 u32 base
= (u32
) mapping
& 0xffffffff;
7113 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7116 /* Test for DMA addresses > 40-bit */
7117 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7120 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7121 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7122 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7129 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7130 dma_addr_t mapping
, u32 len
, u32 flags
,
7133 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7134 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7135 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7136 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7139 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7140 dma_addr_t map
, u32 len
, u32 flags
,
7143 struct tg3
*tp
= tnapi
->tp
;
7146 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7149 if (tg3_4g_overflow_test(map
, len
))
7152 if (tg3_40bit_overflow_test(tp
, map
, len
))
7155 if (tp
->dma_limit
) {
7156 u32 prvidx
= *entry
;
7157 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7158 while (len
> tp
->dma_limit
&& *budget
) {
7159 u32 frag_len
= tp
->dma_limit
;
7160 len
-= tp
->dma_limit
;
7162 /* Avoid the 8byte DMA problem */
7164 len
+= tp
->dma_limit
/ 2;
7165 frag_len
= tp
->dma_limit
/ 2;
7168 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7170 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7171 frag_len
, tmp_flag
, mss
, vlan
);
7174 *entry
= NEXT_TX(*entry
);
7181 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7182 len
, flags
, mss
, vlan
);
7184 *entry
= NEXT_TX(*entry
);
7187 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7191 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7192 len
, flags
, mss
, vlan
);
7193 *entry
= NEXT_TX(*entry
);
7199 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7202 struct sk_buff
*skb
;
7203 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7208 pci_unmap_single(tnapi
->tp
->pdev
,
7209 dma_unmap_addr(txb
, mapping
),
7213 while (txb
->fragmented
) {
7214 txb
->fragmented
= false;
7215 entry
= NEXT_TX(entry
);
7216 txb
= &tnapi
->tx_buffers
[entry
];
7219 for (i
= 0; i
<= last
; i
++) {
7220 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7222 entry
= NEXT_TX(entry
);
7223 txb
= &tnapi
->tx_buffers
[entry
];
7225 pci_unmap_page(tnapi
->tp
->pdev
,
7226 dma_unmap_addr(txb
, mapping
),
7227 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7229 while (txb
->fragmented
) {
7230 txb
->fragmented
= false;
7231 entry
= NEXT_TX(entry
);
7232 txb
= &tnapi
->tx_buffers
[entry
];
7237 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7238 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7239 struct sk_buff
**pskb
,
7240 u32
*entry
, u32
*budget
,
7241 u32 base_flags
, u32 mss
, u32 vlan
)
7243 struct tg3
*tp
= tnapi
->tp
;
7244 struct sk_buff
*new_skb
, *skb
= *pskb
;
7245 dma_addr_t new_addr
= 0;
7248 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7249 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7251 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7253 new_skb
= skb_copy_expand(skb
,
7254 skb_headroom(skb
) + more_headroom
,
7255 skb_tailroom(skb
), GFP_ATOMIC
);
7261 /* New SKB is guaranteed to be linear. */
7262 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7264 /* Make sure the mapping succeeded */
7265 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7266 dev_kfree_skb(new_skb
);
7269 u32 save_entry
= *entry
;
7271 base_flags
|= TXD_FLAG_END
;
7273 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7274 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7277 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7278 new_skb
->len
, base_flags
,
7280 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7281 dev_kfree_skb(new_skb
);
7292 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7294 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7295 * TSO header is greater than 80 bytes.
7297 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7299 struct sk_buff
*segs
, *nskb
;
7300 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7302 /* Estimate the number of fragments in the worst case */
7303 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7304 netif_stop_queue(tp
->dev
);
7306 /* netif_tx_stop_queue() must be done before checking
7307 * checking tx index in tg3_tx_avail() below, because in
7308 * tg3_tx(), we update tx index before checking for
7309 * netif_tx_queue_stopped().
7312 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7313 return NETDEV_TX_BUSY
;
7315 netif_wake_queue(tp
->dev
);
7318 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7320 goto tg3_tso_bug_end
;
7326 tg3_start_xmit(nskb
, tp
->dev
);
7332 return NETDEV_TX_OK
;
7335 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7336 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7338 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7340 struct tg3
*tp
= netdev_priv(dev
);
7341 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7343 int i
= -1, would_hit_hwbug
;
7345 struct tg3_napi
*tnapi
;
7346 struct netdev_queue
*txq
;
7349 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7350 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7351 if (tg3_flag(tp
, ENABLE_TSS
))
7354 budget
= tg3_tx_avail(tnapi
);
7356 /* We are running in BH disabled context with netif_tx_lock
7357 * and TX reclaim runs via tp->napi.poll inside of a software
7358 * interrupt. Furthermore, IRQ processing runs lockless so we have
7359 * no IRQ context deadlocks to worry about either. Rejoice!
7361 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7362 if (!netif_tx_queue_stopped(txq
)) {
7363 netif_tx_stop_queue(txq
);
7365 /* This is a hard error, log it. */
7367 "BUG! Tx Ring full when queue awake!\n");
7369 return NETDEV_TX_BUSY
;
7372 entry
= tnapi
->tx_prod
;
7374 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7375 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7377 mss
= skb_shinfo(skb
)->gso_size
;
7380 u32 tcp_opt_len
, hdr_len
;
7382 if (skb_header_cloned(skb
) &&
7383 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7387 tcp_opt_len
= tcp_optlen(skb
);
7389 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7391 if (!skb_is_gso_v6(skb
)) {
7393 iph
->tot_len
= htons(mss
+ hdr_len
);
7396 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7397 tg3_flag(tp
, TSO_BUG
))
7398 return tg3_tso_bug(tp
, skb
);
7400 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7401 TXD_FLAG_CPU_POST_DMA
);
7403 if (tg3_flag(tp
, HW_TSO_1
) ||
7404 tg3_flag(tp
, HW_TSO_2
) ||
7405 tg3_flag(tp
, HW_TSO_3
)) {
7406 tcp_hdr(skb
)->check
= 0;
7407 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7409 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7414 if (tg3_flag(tp
, HW_TSO_3
)) {
7415 mss
|= (hdr_len
& 0xc) << 12;
7417 base_flags
|= 0x00000010;
7418 base_flags
|= (hdr_len
& 0x3e0) << 5;
7419 } else if (tg3_flag(tp
, HW_TSO_2
))
7420 mss
|= hdr_len
<< 9;
7421 else if (tg3_flag(tp
, HW_TSO_1
) ||
7422 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7423 if (tcp_opt_len
|| iph
->ihl
> 5) {
7426 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7427 mss
|= (tsflags
<< 11);
7430 if (tcp_opt_len
|| iph
->ihl
> 5) {
7433 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7434 base_flags
|= tsflags
<< 12;
7439 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7440 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7441 base_flags
|= TXD_FLAG_JMB_PKT
;
7443 if (vlan_tx_tag_present(skb
)) {
7444 base_flags
|= TXD_FLAG_VLAN
;
7445 vlan
= vlan_tx_tag_get(skb
);
7448 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7449 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7450 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7451 base_flags
|= TXD_FLAG_HWTSTAMP
;
7454 len
= skb_headlen(skb
);
7456 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7457 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7461 tnapi
->tx_buffers
[entry
].skb
= skb
;
7462 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7464 would_hit_hwbug
= 0;
7466 if (tg3_flag(tp
, 5701_DMA_BUG
))
7467 would_hit_hwbug
= 1;
7469 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7470 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7472 would_hit_hwbug
= 1;
7473 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7476 if (!tg3_flag(tp
, HW_TSO_1
) &&
7477 !tg3_flag(tp
, HW_TSO_2
) &&
7478 !tg3_flag(tp
, HW_TSO_3
))
7481 /* Now loop through additional data
7482 * fragments, and queue them.
7484 last
= skb_shinfo(skb
)->nr_frags
- 1;
7485 for (i
= 0; i
<= last
; i
++) {
7486 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7488 len
= skb_frag_size(frag
);
7489 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7490 len
, DMA_TO_DEVICE
);
7492 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7493 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7495 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7499 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7501 ((i
== last
) ? TXD_FLAG_END
: 0),
7503 would_hit_hwbug
= 1;
7509 if (would_hit_hwbug
) {
7510 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7512 /* If the workaround fails due to memory/mapping
7513 * failure, silently drop this packet.
7515 entry
= tnapi
->tx_prod
;
7516 budget
= tg3_tx_avail(tnapi
);
7517 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7518 base_flags
, mss
, vlan
))
7522 skb_tx_timestamp(skb
);
7523 netdev_tx_sent_queue(txq
, skb
->len
);
7525 /* Sync BD data before updating mailbox */
7528 /* Packets are ready, update Tx producer idx local and on card. */
7529 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7531 tnapi
->tx_prod
= entry
;
7532 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7533 netif_tx_stop_queue(txq
);
7535 /* netif_tx_stop_queue() must be done before checking
7536 * checking tx index in tg3_tx_avail() below, because in
7537 * tg3_tx(), we update tx index before checking for
7538 * netif_tx_queue_stopped().
7541 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7542 netif_tx_wake_queue(txq
);
7546 return NETDEV_TX_OK
;
7549 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7550 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7555 return NETDEV_TX_OK
;
7558 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7561 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7562 MAC_MODE_PORT_MODE_MASK
);
7564 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7566 if (!tg3_flag(tp
, 5705_PLUS
))
7567 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7569 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7570 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7572 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7574 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7576 if (tg3_flag(tp
, 5705_PLUS
) ||
7577 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7578 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7579 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7582 tw32(MAC_MODE
, tp
->mac_mode
);
7586 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7588 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7590 tg3_phy_toggle_apd(tp
, false);
7591 tg3_phy_toggle_automdix(tp
, 0);
7593 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7596 bmcr
= BMCR_FULLDPLX
;
7601 bmcr
|= BMCR_SPEED100
;
7605 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7607 bmcr
|= BMCR_SPEED100
;
7610 bmcr
|= BMCR_SPEED1000
;
7615 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7616 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7617 val
|= CTL1000_AS_MASTER
|
7618 CTL1000_ENABLE_MASTER
;
7619 tg3_writephy(tp
, MII_CTRL1000
, val
);
7621 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7622 MII_TG3_FET_PTEST_TRIM_2
;
7623 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7626 bmcr
|= BMCR_LOOPBACK
;
7628 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7630 /* The write needs to be flushed for the FETs */
7631 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7632 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7636 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7637 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
7638 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7639 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7640 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7642 /* The write needs to be flushed for the AC131 */
7643 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7646 /* Reset to prevent losing 1st rx packet intermittently */
7647 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7648 tg3_flag(tp
, 5780_CLASS
)) {
7649 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7651 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7654 mac_mode
= tp
->mac_mode
&
7655 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7656 if (speed
== SPEED_1000
)
7657 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7659 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7661 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
7662 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7664 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7665 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7666 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7667 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7669 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7670 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7673 tw32(MAC_MODE
, mac_mode
);
7679 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7681 struct tg3
*tp
= netdev_priv(dev
);
7683 if (features
& NETIF_F_LOOPBACK
) {
7684 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7687 spin_lock_bh(&tp
->lock
);
7688 tg3_mac_loopback(tp
, true);
7689 netif_carrier_on(tp
->dev
);
7690 spin_unlock_bh(&tp
->lock
);
7691 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7693 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7696 spin_lock_bh(&tp
->lock
);
7697 tg3_mac_loopback(tp
, false);
7698 /* Force link status check */
7699 tg3_setup_phy(tp
, 1);
7700 spin_unlock_bh(&tp
->lock
);
7701 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7705 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7706 netdev_features_t features
)
7708 struct tg3
*tp
= netdev_priv(dev
);
7710 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7711 features
&= ~NETIF_F_ALL_TSO
;
7716 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7718 netdev_features_t changed
= dev
->features
^ features
;
7720 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7721 tg3_set_loopback(dev
, features
);
7726 static void tg3_rx_prodring_free(struct tg3
*tp
,
7727 struct tg3_rx_prodring_set
*tpr
)
7731 if (tpr
!= &tp
->napi
[0].prodring
) {
7732 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7733 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7734 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7737 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7738 for (i
= tpr
->rx_jmb_cons_idx
;
7739 i
!= tpr
->rx_jmb_prod_idx
;
7740 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7741 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7749 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7750 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7753 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7754 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7755 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7760 /* Initialize rx rings for packet processing.
7762 * The chip has been shut down and the driver detached from
7763 * the networking, so no interrupts or new tx packets will
7764 * end up in the driver. tp->{tx,}lock are held and thus
7767 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7768 struct tg3_rx_prodring_set
*tpr
)
7770 u32 i
, rx_pkt_dma_sz
;
7772 tpr
->rx_std_cons_idx
= 0;
7773 tpr
->rx_std_prod_idx
= 0;
7774 tpr
->rx_jmb_cons_idx
= 0;
7775 tpr
->rx_jmb_prod_idx
= 0;
7777 if (tpr
!= &tp
->napi
[0].prodring
) {
7778 memset(&tpr
->rx_std_buffers
[0], 0,
7779 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7780 if (tpr
->rx_jmb_buffers
)
7781 memset(&tpr
->rx_jmb_buffers
[0], 0,
7782 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7786 /* Zero out all descriptors. */
7787 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7789 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7790 if (tg3_flag(tp
, 5780_CLASS
) &&
7791 tp
->dev
->mtu
> ETH_DATA_LEN
)
7792 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7793 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7795 /* Initialize invariants of the rings, we only set this
7796 * stuff once. This works because the card does not
7797 * write into the rx buffer posting rings.
7799 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7800 struct tg3_rx_buffer_desc
*rxd
;
7802 rxd
= &tpr
->rx_std
[i
];
7803 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7804 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7805 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7806 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7809 /* Now allocate fresh SKBs for each rx ring. */
7810 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7811 unsigned int frag_size
;
7813 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7815 netdev_warn(tp
->dev
,
7816 "Using a smaller RX standard ring. Only "
7817 "%d out of %d buffers were allocated "
7818 "successfully\n", i
, tp
->rx_pending
);
7826 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7829 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7831 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7834 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7835 struct tg3_rx_buffer_desc
*rxd
;
7837 rxd
= &tpr
->rx_jmb
[i
].std
;
7838 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7839 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7841 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7842 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7845 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7846 unsigned int frag_size
;
7848 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7850 netdev_warn(tp
->dev
,
7851 "Using a smaller RX jumbo ring. Only %d "
7852 "out of %d buffers were allocated "
7853 "successfully\n", i
, tp
->rx_jumbo_pending
);
7856 tp
->rx_jumbo_pending
= i
;
7865 tg3_rx_prodring_free(tp
, tpr
);
7869 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7870 struct tg3_rx_prodring_set
*tpr
)
7872 kfree(tpr
->rx_std_buffers
);
7873 tpr
->rx_std_buffers
= NULL
;
7874 kfree(tpr
->rx_jmb_buffers
);
7875 tpr
->rx_jmb_buffers
= NULL
;
7877 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7878 tpr
->rx_std
, tpr
->rx_std_mapping
);
7882 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7883 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7888 static int tg3_rx_prodring_init(struct tg3
*tp
,
7889 struct tg3_rx_prodring_set
*tpr
)
7891 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7893 if (!tpr
->rx_std_buffers
)
7896 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7897 TG3_RX_STD_RING_BYTES(tp
),
7898 &tpr
->rx_std_mapping
,
7903 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7904 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7906 if (!tpr
->rx_jmb_buffers
)
7909 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7910 TG3_RX_JMB_RING_BYTES(tp
),
7911 &tpr
->rx_jmb_mapping
,
7920 tg3_rx_prodring_fini(tp
, tpr
);
7924 /* Free up pending packets in all rx/tx rings.
7926 * The chip has been shut down and the driver detached from
7927 * the networking, so no interrupts or new tx packets will
7928 * end up in the driver. tp->{tx,}lock is not held and we are not
7929 * in an interrupt context and thus may sleep.
7931 static void tg3_free_rings(struct tg3
*tp
)
7935 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7936 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7938 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7940 if (!tnapi
->tx_buffers
)
7943 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7944 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7949 tg3_tx_skb_unmap(tnapi
, i
,
7950 skb_shinfo(skb
)->nr_frags
- 1);
7952 dev_kfree_skb_any(skb
);
7954 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7958 /* Initialize tx/rx rings for packet processing.
7960 * The chip has been shut down and the driver detached from
7961 * the networking, so no interrupts or new tx packets will
7962 * end up in the driver. tp->{tx,}lock are held and thus
7965 static int tg3_init_rings(struct tg3
*tp
)
7969 /* Free up all the SKBs. */
7972 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7973 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7975 tnapi
->last_tag
= 0;
7976 tnapi
->last_irq_tag
= 0;
7977 tnapi
->hw_status
->status
= 0;
7978 tnapi
->hw_status
->status_tag
= 0;
7979 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7984 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7986 tnapi
->rx_rcb_ptr
= 0;
7988 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7990 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7999 static void tg3_mem_tx_release(struct tg3
*tp
)
8003 for (i
= 0; i
< tp
->irq_max
; i
++) {
8004 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8006 if (tnapi
->tx_ring
) {
8007 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8008 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8009 tnapi
->tx_ring
= NULL
;
8012 kfree(tnapi
->tx_buffers
);
8013 tnapi
->tx_buffers
= NULL
;
8017 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8020 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8022 /* If multivector TSS is enabled, vector 0 does not handle
8023 * tx interrupts. Don't allocate any resources for it.
8025 if (tg3_flag(tp
, ENABLE_TSS
))
8028 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8029 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8030 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8031 if (!tnapi
->tx_buffers
)
8034 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8036 &tnapi
->tx_desc_mapping
,
8038 if (!tnapi
->tx_ring
)
8045 tg3_mem_tx_release(tp
);
8049 static void tg3_mem_rx_release(struct tg3
*tp
)
8053 for (i
= 0; i
< tp
->irq_max
; i
++) {
8054 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8056 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8061 dma_free_coherent(&tp
->pdev
->dev
,
8062 TG3_RX_RCB_RING_BYTES(tp
),
8064 tnapi
->rx_rcb_mapping
);
8065 tnapi
->rx_rcb
= NULL
;
8069 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8071 unsigned int i
, limit
;
8073 limit
= tp
->rxq_cnt
;
8075 /* If RSS is enabled, we need a (dummy) producer ring
8076 * set on vector zero. This is the true hw prodring.
8078 if (tg3_flag(tp
, ENABLE_RSS
))
8081 for (i
= 0; i
< limit
; i
++) {
8082 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8084 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8087 /* If multivector RSS is enabled, vector 0
8088 * does not handle rx or tx interrupts.
8089 * Don't allocate any resources for it.
8091 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8094 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8095 TG3_RX_RCB_RING_BYTES(tp
),
8096 &tnapi
->rx_rcb_mapping
,
8101 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8107 tg3_mem_rx_release(tp
);
8112 * Must not be invoked with interrupt sources disabled and
8113 * the hardware shutdown down.
8115 static void tg3_free_consistent(struct tg3
*tp
)
8119 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8120 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8122 if (tnapi
->hw_status
) {
8123 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8125 tnapi
->status_mapping
);
8126 tnapi
->hw_status
= NULL
;
8130 tg3_mem_rx_release(tp
);
8131 tg3_mem_tx_release(tp
);
8134 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8135 tp
->hw_stats
, tp
->stats_mapping
);
8136 tp
->hw_stats
= NULL
;
8141 * Must not be invoked with interrupt sources disabled and
8142 * the hardware shutdown down. Can sleep.
8144 static int tg3_alloc_consistent(struct tg3
*tp
)
8148 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8149 sizeof(struct tg3_hw_stats
),
8155 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8157 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8158 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8159 struct tg3_hw_status
*sblk
;
8161 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8163 &tnapi
->status_mapping
,
8165 if (!tnapi
->hw_status
)
8168 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8169 sblk
= tnapi
->hw_status
;
8171 if (tg3_flag(tp
, ENABLE_RSS
)) {
8172 u16
*prodptr
= NULL
;
8175 * When RSS is enabled, the status block format changes
8176 * slightly. The "rx_jumbo_consumer", "reserved",
8177 * and "rx_mini_consumer" members get mapped to the
8178 * other three rx return ring producer indexes.
8182 prodptr
= &sblk
->idx
[0].rx_producer
;
8185 prodptr
= &sblk
->rx_jumbo_consumer
;
8188 prodptr
= &sblk
->reserved
;
8191 prodptr
= &sblk
->rx_mini_consumer
;
8194 tnapi
->rx_rcb_prod_idx
= prodptr
;
8196 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8200 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8206 tg3_free_consistent(tp
);
8210 #define MAX_WAIT_CNT 1000
8212 /* To stop a block, clear the enable bit and poll till it
8213 * clears. tp->lock is held.
8215 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
8220 if (tg3_flag(tp
, 5705_PLUS
)) {
8227 /* We can't enable/disable these bits of the
8228 * 5705/5750, just say success.
8241 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8244 if ((val
& enable_bit
) == 0)
8248 if (i
== MAX_WAIT_CNT
&& !silent
) {
8249 dev_err(&tp
->pdev
->dev
,
8250 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8258 /* tp->lock is held. */
8259 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
8263 tg3_disable_ints(tp
);
8265 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8266 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8269 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8270 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8271 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8272 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8273 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8274 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8276 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8277 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8278 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8279 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8280 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8281 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8282 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8284 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8285 tw32_f(MAC_MODE
, tp
->mac_mode
);
8288 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8289 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8291 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8293 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8296 if (i
>= MAX_WAIT_CNT
) {
8297 dev_err(&tp
->pdev
->dev
,
8298 "%s timed out, TX_MODE_ENABLE will not clear "
8299 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8303 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8304 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8305 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8307 tw32(FTQ_RESET
, 0xffffffff);
8308 tw32(FTQ_RESET
, 0x00000000);
8310 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8311 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8313 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8314 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8315 if (tnapi
->hw_status
)
8316 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8322 /* Save PCI command register before chip reset */
8323 static void tg3_save_pci_state(struct tg3
*tp
)
8325 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8328 /* Restore PCI state after chip reset */
8329 static void tg3_restore_pci_state(struct tg3
*tp
)
8333 /* Re-enable indirect register accesses. */
8334 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8335 tp
->misc_host_ctrl
);
8337 /* Set MAX PCI retry to zero. */
8338 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8339 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8340 tg3_flag(tp
, PCIX_MODE
))
8341 val
|= PCISTATE_RETRY_SAME_DMA
;
8342 /* Allow reads and writes to the APE register and memory space. */
8343 if (tg3_flag(tp
, ENABLE_APE
))
8344 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8345 PCISTATE_ALLOW_APE_SHMEM_WR
|
8346 PCISTATE_ALLOW_APE_PSPACE_WR
;
8347 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8349 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8351 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8352 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8353 tp
->pci_cacheline_sz
);
8354 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8358 /* Make sure PCI-X relaxed ordering bit is clear. */
8359 if (tg3_flag(tp
, PCIX_MODE
)) {
8362 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8364 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8365 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8369 if (tg3_flag(tp
, 5780_CLASS
)) {
8371 /* Chip reset on 5780 will reset MSI enable bit,
8372 * so need to restore it.
8374 if (tg3_flag(tp
, USING_MSI
)) {
8377 pci_read_config_word(tp
->pdev
,
8378 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8380 pci_write_config_word(tp
->pdev
,
8381 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8382 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8383 val
= tr32(MSGINT_MODE
);
8384 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8389 /* tp->lock is held. */
8390 static int tg3_chip_reset(struct tg3
*tp
)
8393 void (*write_op
)(struct tg3
*, u32
, u32
);
8398 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8400 /* No matching tg3_nvram_unlock() after this because
8401 * chip reset below will undo the nvram lock.
8403 tp
->nvram_lock_cnt
= 0;
8405 /* GRC_MISC_CFG core clock reset will clear the memory
8406 * enable bit in PCI register 4 and the MSI enable bit
8407 * on some chips, so we save relevant registers here.
8409 tg3_save_pci_state(tp
);
8411 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8412 tg3_flag(tp
, 5755_PLUS
))
8413 tw32(GRC_FASTBOOT_PC
, 0);
8416 * We must avoid the readl() that normally takes place.
8417 * It locks machines, causes machine checks, and other
8418 * fun things. So, temporarily disable the 5701
8419 * hardware workaround, while we do the reset.
8421 write_op
= tp
->write32
;
8422 if (write_op
== tg3_write_flush_reg32
)
8423 tp
->write32
= tg3_write32
;
8425 /* Prevent the irq handler from reading or writing PCI registers
8426 * during chip reset when the memory enable bit in the PCI command
8427 * register may be cleared. The chip does not generate interrupt
8428 * at this time, but the irq handler may still be called due to irq
8429 * sharing or irqpoll.
8431 tg3_flag_set(tp
, CHIP_RESETTING
);
8432 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8433 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8434 if (tnapi
->hw_status
) {
8435 tnapi
->hw_status
->status
= 0;
8436 tnapi
->hw_status
->status_tag
= 0;
8438 tnapi
->last_tag
= 0;
8439 tnapi
->last_irq_tag
= 0;
8443 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8444 synchronize_irq(tp
->napi
[i
].irq_vec
);
8446 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8447 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8448 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8452 val
= GRC_MISC_CFG_CORECLK_RESET
;
8454 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8455 /* Force PCIe 1.0a mode */
8456 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8457 !tg3_flag(tp
, 57765_PLUS
) &&
8458 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8459 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8460 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8462 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8463 tw32(GRC_MISC_CFG
, (1 << 29));
8468 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8469 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8470 tw32(GRC_VCPU_EXT_CTRL
,
8471 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8474 /* Manage gphy power for all CPMU absent PCIe devices. */
8475 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8476 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8478 tw32(GRC_MISC_CFG
, val
);
8480 /* restore 5701 hardware bug workaround write method */
8481 tp
->write32
= write_op
;
8483 /* Unfortunately, we have to delay before the PCI read back.
8484 * Some 575X chips even will not respond to a PCI cfg access
8485 * when the reset command is given to the chip.
8487 * How do these hardware designers expect things to work
8488 * properly if the PCI write is posted for a long period
8489 * of time? It is always necessary to have some method by
8490 * which a register read back can occur to push the write
8491 * out which does the reset.
8493 * For most tg3 variants the trick below was working.
8498 /* Flush PCI posted writes. The normal MMIO registers
8499 * are inaccessible at this time so this is the only
8500 * way to make this reliably (actually, this is no longer
8501 * the case, see above). I tried to use indirect
8502 * register read/write but this upset some 5701 variants.
8504 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8508 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8511 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8515 /* Wait for link training to complete. */
8516 for (j
= 0; j
< 5000; j
++)
8519 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8520 pci_write_config_dword(tp
->pdev
, 0xc4,
8521 cfg_val
| (1 << 15));
8524 /* Clear the "no snoop" and "relaxed ordering" bits. */
8525 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8527 * Older PCIe devices only support the 128 byte
8528 * MPS setting. Enforce the restriction.
8530 if (!tg3_flag(tp
, CPMU_PRESENT
))
8531 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8532 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8534 /* Clear error status */
8535 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8536 PCI_EXP_DEVSTA_CED
|
8537 PCI_EXP_DEVSTA_NFED
|
8538 PCI_EXP_DEVSTA_FED
|
8539 PCI_EXP_DEVSTA_URD
);
8542 tg3_restore_pci_state(tp
);
8544 tg3_flag_clear(tp
, CHIP_RESETTING
);
8545 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8548 if (tg3_flag(tp
, 5780_CLASS
))
8549 val
= tr32(MEMARB_MODE
);
8550 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8552 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8554 tw32(0x5000, 0x400);
8557 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8559 * BCM4785: In order to avoid repercussions from using
8560 * potentially defective internal ROM, stop the Rx RISC CPU,
8561 * which is not required.
8564 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8567 tw32(GRC_MODE
, tp
->grc_mode
);
8569 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8572 tw32(0xc4, val
| (1 << 15));
8575 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8576 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8577 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8578 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8579 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8580 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8583 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8584 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8586 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8587 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8592 tw32_f(MAC_MODE
, val
);
8595 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8597 err
= tg3_poll_fw(tp
);
8603 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8604 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
8605 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8606 !tg3_flag(tp
, 57765_PLUS
)) {
8609 tw32(0x7c00, val
| (1 << 25));
8612 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
8613 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8614 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8617 /* Reprobe ASF enable state. */
8618 tg3_flag_clear(tp
, ENABLE_ASF
);
8619 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8620 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8621 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8624 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8625 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8626 tg3_flag_set(tp
, ENABLE_ASF
);
8627 tp
->last_event_jiffies
= jiffies
;
8628 if (tg3_flag(tp
, 5750_PLUS
))
8629 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8636 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8637 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8639 /* tp->lock is held. */
8640 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8646 tg3_write_sig_pre_reset(tp
, kind
);
8648 tg3_abort_hw(tp
, silent
);
8649 err
= tg3_chip_reset(tp
);
8651 __tg3_set_mac_addr(tp
, 0);
8653 tg3_write_sig_legacy(tp
, kind
);
8654 tg3_write_sig_post_reset(tp
, kind
);
8657 /* Save the stats across chip resets... */
8658 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8659 tg3_get_estats(tp
, &tp
->estats_prev
);
8661 /* And make sure the next sample is new data */
8662 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8671 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8673 struct tg3
*tp
= netdev_priv(dev
);
8674 struct sockaddr
*addr
= p
;
8675 int err
= 0, skip_mac_1
= 0;
8677 if (!is_valid_ether_addr(addr
->sa_data
))
8678 return -EADDRNOTAVAIL
;
8680 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8682 if (!netif_running(dev
))
8685 if (tg3_flag(tp
, ENABLE_ASF
)) {
8686 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8688 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8689 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8690 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8691 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8693 /* Skip MAC addr 1 if ASF is using it. */
8694 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8695 !(addr1_high
== 0 && addr1_low
== 0))
8698 spin_lock_bh(&tp
->lock
);
8699 __tg3_set_mac_addr(tp
, skip_mac_1
);
8700 spin_unlock_bh(&tp
->lock
);
8705 /* tp->lock is held. */
8706 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8707 dma_addr_t mapping
, u32 maxlen_flags
,
8711 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8712 ((u64
) mapping
>> 32));
8714 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8715 ((u64
) mapping
& 0xffffffff));
8717 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8720 if (!tg3_flag(tp
, 5705_PLUS
))
8722 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8727 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8731 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8732 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8733 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8734 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8736 tw32(HOSTCC_TXCOL_TICKS
, 0);
8737 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8738 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8740 for (; i
< tp
->txq_cnt
; i
++) {
8743 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8744 tw32(reg
, ec
->tx_coalesce_usecs
);
8745 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8746 tw32(reg
, ec
->tx_max_coalesced_frames
);
8747 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8748 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8752 for (; i
< tp
->irq_max
- 1; i
++) {
8753 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8754 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8755 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8759 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8762 u32 limit
= tp
->rxq_cnt
;
8764 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8765 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8766 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8767 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8770 tw32(HOSTCC_RXCOL_TICKS
, 0);
8771 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8772 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8775 for (; i
< limit
; i
++) {
8778 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8779 tw32(reg
, ec
->rx_coalesce_usecs
);
8780 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8781 tw32(reg
, ec
->rx_max_coalesced_frames
);
8782 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8783 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8786 for (; i
< tp
->irq_max
- 1; i
++) {
8787 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8788 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8789 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8793 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8795 tg3_coal_tx_init(tp
, ec
);
8796 tg3_coal_rx_init(tp
, ec
);
8798 if (!tg3_flag(tp
, 5705_PLUS
)) {
8799 u32 val
= ec
->stats_block_coalesce_usecs
;
8801 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8802 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8807 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8811 /* tp->lock is held. */
8812 static void tg3_rings_reset(struct tg3
*tp
)
8815 u32 stblk
, txrcb
, rxrcb
, limit
;
8816 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8818 /* Disable all transmit rings but the first. */
8819 if (!tg3_flag(tp
, 5705_PLUS
))
8820 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8821 else if (tg3_flag(tp
, 5717_PLUS
))
8822 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8823 else if (tg3_flag(tp
, 57765_CLASS
) ||
8824 tg3_asic_rev(tp
) == ASIC_REV_5762
)
8825 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8827 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8829 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8830 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8831 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8832 BDINFO_FLAGS_DISABLED
);
8835 /* Disable all receive return rings but the first. */
8836 if (tg3_flag(tp
, 5717_PLUS
))
8837 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8838 else if (!tg3_flag(tp
, 5705_PLUS
))
8839 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8840 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8841 tg3_asic_rev(tp
) == ASIC_REV_5762
||
8842 tg3_flag(tp
, 57765_CLASS
))
8843 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8845 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8847 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8848 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8849 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8850 BDINFO_FLAGS_DISABLED
);
8852 /* Disable interrupts */
8853 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8854 tp
->napi
[0].chk_msi_cnt
= 0;
8855 tp
->napi
[0].last_rx_cons
= 0;
8856 tp
->napi
[0].last_tx_cons
= 0;
8858 /* Zero mailbox registers. */
8859 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8860 for (i
= 1; i
< tp
->irq_max
; i
++) {
8861 tp
->napi
[i
].tx_prod
= 0;
8862 tp
->napi
[i
].tx_cons
= 0;
8863 if (tg3_flag(tp
, ENABLE_TSS
))
8864 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8865 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8866 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8867 tp
->napi
[i
].chk_msi_cnt
= 0;
8868 tp
->napi
[i
].last_rx_cons
= 0;
8869 tp
->napi
[i
].last_tx_cons
= 0;
8871 if (!tg3_flag(tp
, ENABLE_TSS
))
8872 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8874 tp
->napi
[0].tx_prod
= 0;
8875 tp
->napi
[0].tx_cons
= 0;
8876 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8877 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8880 /* Make sure the NIC-based send BD rings are disabled. */
8881 if (!tg3_flag(tp
, 5705_PLUS
)) {
8882 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8883 for (i
= 0; i
< 16; i
++)
8884 tw32_tx_mbox(mbox
+ i
* 8, 0);
8887 txrcb
= NIC_SRAM_SEND_RCB
;
8888 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8890 /* Clear status block in ram. */
8891 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8893 /* Set status block DMA address */
8894 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8895 ((u64
) tnapi
->status_mapping
>> 32));
8896 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8897 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8899 if (tnapi
->tx_ring
) {
8900 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8901 (TG3_TX_RING_SIZE
<<
8902 BDINFO_FLAGS_MAXLEN_SHIFT
),
8903 NIC_SRAM_TX_BUFFER_DESC
);
8904 txrcb
+= TG3_BDINFO_SIZE
;
8907 if (tnapi
->rx_rcb
) {
8908 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8909 (tp
->rx_ret_ring_mask
+ 1) <<
8910 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8911 rxrcb
+= TG3_BDINFO_SIZE
;
8914 stblk
= HOSTCC_STATBLCK_RING1
;
8916 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8917 u64 mapping
= (u64
)tnapi
->status_mapping
;
8918 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8919 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8921 /* Clear status block in ram. */
8922 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8924 if (tnapi
->tx_ring
) {
8925 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8926 (TG3_TX_RING_SIZE
<<
8927 BDINFO_FLAGS_MAXLEN_SHIFT
),
8928 NIC_SRAM_TX_BUFFER_DESC
);
8929 txrcb
+= TG3_BDINFO_SIZE
;
8932 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8933 ((tp
->rx_ret_ring_mask
+ 1) <<
8934 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8937 rxrcb
+= TG3_BDINFO_SIZE
;
8941 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8943 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8945 if (!tg3_flag(tp
, 5750_PLUS
) ||
8946 tg3_flag(tp
, 5780_CLASS
) ||
8947 tg3_asic_rev(tp
) == ASIC_REV_5750
||
8948 tg3_asic_rev(tp
) == ASIC_REV_5752
||
8949 tg3_flag(tp
, 57765_PLUS
))
8950 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8951 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8952 tg3_asic_rev(tp
) == ASIC_REV_5787
)
8953 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8955 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8957 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8958 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8960 val
= min(nic_rep_thresh
, host_rep_thresh
);
8961 tw32(RCVBDI_STD_THRESH
, val
);
8963 if (tg3_flag(tp
, 57765_PLUS
))
8964 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8966 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8969 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8971 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8973 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8974 tw32(RCVBDI_JUMBO_THRESH
, val
);
8976 if (tg3_flag(tp
, 57765_PLUS
))
8977 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8980 static inline u32
calc_crc(unsigned char *buf
, int len
)
8988 for (j
= 0; j
< len
; j
++) {
8991 for (k
= 0; k
< 8; k
++) {
9004 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9006 /* accept or reject all multicast frames */
9007 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9008 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9009 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9010 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9013 static void __tg3_set_rx_mode(struct net_device
*dev
)
9015 struct tg3
*tp
= netdev_priv(dev
);
9018 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9019 RX_MODE_KEEP_VLAN_TAG
);
9021 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9022 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9025 if (!tg3_flag(tp
, ENABLE_ASF
))
9026 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9029 if (dev
->flags
& IFF_PROMISC
) {
9030 /* Promiscuous mode. */
9031 rx_mode
|= RX_MODE_PROMISC
;
9032 } else if (dev
->flags
& IFF_ALLMULTI
) {
9033 /* Accept all multicast. */
9034 tg3_set_multi(tp
, 1);
9035 } else if (netdev_mc_empty(dev
)) {
9036 /* Reject all multicast. */
9037 tg3_set_multi(tp
, 0);
9039 /* Accept one or more multicast(s). */
9040 struct netdev_hw_addr
*ha
;
9041 u32 mc_filter
[4] = { 0, };
9046 netdev_for_each_mc_addr(ha
, dev
) {
9047 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9049 regidx
= (bit
& 0x60) >> 5;
9051 mc_filter
[regidx
] |= (1 << bit
);
9054 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9055 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9056 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9057 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9060 if (rx_mode
!= tp
->rx_mode
) {
9061 tp
->rx_mode
= rx_mode
;
9062 tw32_f(MAC_RX_MODE
, rx_mode
);
9067 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9071 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9072 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9075 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9079 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9082 if (tp
->rxq_cnt
== 1) {
9083 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9087 /* Validate table against current IRQ count */
9088 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9089 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9093 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9094 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9097 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9100 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9102 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9103 u32 val
= tp
->rss_ind_tbl
[i
];
9105 for (; i
% 8; i
++) {
9107 val
|= tp
->rss_ind_tbl
[i
];
9114 /* tp->lock is held. */
9115 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
9117 u32 val
, rdmac_mode
;
9119 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9121 tg3_disable_ints(tp
);
9125 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9127 if (tg3_flag(tp
, INIT_COMPLETE
))
9128 tg3_abort_hw(tp
, 1);
9130 /* Enable MAC control of LPI */
9131 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9132 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9133 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9134 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9135 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9137 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9139 tw32_f(TG3_CPMU_EEE_CTRL
,
9140 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9142 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9143 TG3_CPMU_EEEMD_LPI_IN_TX
|
9144 TG3_CPMU_EEEMD_LPI_IN_RX
|
9145 TG3_CPMU_EEEMD_EEE_ENABLE
;
9147 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9148 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9150 if (tg3_flag(tp
, ENABLE_APE
))
9151 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9153 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9155 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9156 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9157 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9159 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9160 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9161 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9167 err
= tg3_chip_reset(tp
);
9171 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9173 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9174 val
= tr32(TG3_CPMU_CTRL
);
9175 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9176 tw32(TG3_CPMU_CTRL
, val
);
9178 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9179 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9180 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9181 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9183 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9184 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9185 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9186 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9188 val
= tr32(TG3_CPMU_HST_ACC
);
9189 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9190 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9191 tw32(TG3_CPMU_HST_ACC
, val
);
9194 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9195 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9196 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9197 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9198 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9200 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9201 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9203 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9205 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9206 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9209 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9210 u32 grc_mode
= tr32(GRC_MODE
);
9212 /* Access the lower 1K of PL PCIE block registers. */
9213 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9214 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9216 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9217 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9218 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9220 tw32(GRC_MODE
, grc_mode
);
9223 if (tg3_flag(tp
, 57765_CLASS
)) {
9224 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9225 u32 grc_mode
= tr32(GRC_MODE
);
9227 /* Access the lower 1K of PL PCIE block registers. */
9228 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9229 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9231 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9232 TG3_PCIE_PL_LO_PHYCTL5
);
9233 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9234 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9236 tw32(GRC_MODE
, grc_mode
);
9239 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9242 /* Fix transmit hangs */
9243 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9244 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9245 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9247 grc_mode
= tr32(GRC_MODE
);
9249 /* Access the lower 1K of DL PCIE block registers. */
9250 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9251 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9253 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9254 TG3_PCIE_DL_LO_FTSMAX
);
9255 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9256 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9257 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9259 tw32(GRC_MODE
, grc_mode
);
9262 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9263 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9264 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9265 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9268 /* This works around an issue with Athlon chipsets on
9269 * B3 tigon3 silicon. This bit has no effect on any
9270 * other revision. But do not set this on PCI Express
9271 * chips and don't even touch the clocks if the CPMU is present.
9273 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9274 if (!tg3_flag(tp
, PCI_EXPRESS
))
9275 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9276 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9279 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9280 tg3_flag(tp
, PCIX_MODE
)) {
9281 val
= tr32(TG3PCI_PCISTATE
);
9282 val
|= PCISTATE_RETRY_SAME_DMA
;
9283 tw32(TG3PCI_PCISTATE
, val
);
9286 if (tg3_flag(tp
, ENABLE_APE
)) {
9287 /* Allow reads and writes to the
9288 * APE register and memory space.
9290 val
= tr32(TG3PCI_PCISTATE
);
9291 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9292 PCISTATE_ALLOW_APE_SHMEM_WR
|
9293 PCISTATE_ALLOW_APE_PSPACE_WR
;
9294 tw32(TG3PCI_PCISTATE
, val
);
9297 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9298 /* Enable some hw fixes. */
9299 val
= tr32(TG3PCI_MSI_DATA
);
9300 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9301 tw32(TG3PCI_MSI_DATA
, val
);
9304 /* Descriptor ring init may make accesses to the
9305 * NIC SRAM area to setup the TX descriptors, so we
9306 * can only do this after the hardware has been
9307 * successfully reset.
9309 err
= tg3_init_rings(tp
);
9313 if (tg3_flag(tp
, 57765_PLUS
)) {
9314 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9315 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9316 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9317 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9318 if (!tg3_flag(tp
, 57765_CLASS
) &&
9319 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9320 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9321 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9322 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9323 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9324 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9325 /* This value is determined during the probe time DMA
9326 * engine test, tg3_test_dma.
9328 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9331 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9332 GRC_MODE_4X_NIC_SEND_RINGS
|
9333 GRC_MODE_NO_TX_PHDR_CSUM
|
9334 GRC_MODE_NO_RX_PHDR_CSUM
);
9335 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9337 /* Pseudo-header checksum is done by hardware logic and not
9338 * the offload processers, so make the chip do the pseudo-
9339 * header checksums on receive. For transmit it is more
9340 * convenient to do the pseudo-header checksum in software
9341 * as Linux does that on transmit for us in all cases.
9343 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9345 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9347 tw32(TG3_RX_PTP_CTL
,
9348 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9350 if (tg3_flag(tp
, PTP_CAPABLE
))
9351 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9353 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9355 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9356 val
= tr32(GRC_MISC_CFG
);
9358 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9359 tw32(GRC_MISC_CFG
, val
);
9361 /* Initialize MBUF/DESC pool. */
9362 if (tg3_flag(tp
, 5750_PLUS
)) {
9364 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9365 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9366 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9367 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9369 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9370 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9371 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9372 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9375 fw_len
= tp
->fw_len
;
9376 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9377 tw32(BUFMGR_MB_POOL_ADDR
,
9378 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9379 tw32(BUFMGR_MB_POOL_SIZE
,
9380 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9383 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9384 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9385 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9386 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9387 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9388 tw32(BUFMGR_MB_HIGH_WATER
,
9389 tp
->bufmgr_config
.mbuf_high_water
);
9391 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9392 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9393 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9394 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9395 tw32(BUFMGR_MB_HIGH_WATER
,
9396 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9398 tw32(BUFMGR_DMA_LOW_WATER
,
9399 tp
->bufmgr_config
.dma_low_water
);
9400 tw32(BUFMGR_DMA_HIGH_WATER
,
9401 tp
->bufmgr_config
.dma_high_water
);
9403 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9404 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9405 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9406 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9407 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9408 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9409 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9410 tw32(BUFMGR_MODE
, val
);
9411 for (i
= 0; i
< 2000; i
++) {
9412 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9417 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9421 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9422 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9424 tg3_setup_rxbd_thresholds(tp
);
9426 /* Initialize TG3_BDINFO's at:
9427 * RCVDBDI_STD_BD: standard eth size rx ring
9428 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9429 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9432 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9433 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9434 * ring attribute flags
9435 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9437 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9438 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9440 * The size of each ring is fixed in the firmware, but the location is
9443 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9444 ((u64
) tpr
->rx_std_mapping
>> 32));
9445 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9446 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9447 if (!tg3_flag(tp
, 5717_PLUS
))
9448 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9449 NIC_SRAM_RX_BUFFER_DESC
);
9451 /* Disable the mini ring */
9452 if (!tg3_flag(tp
, 5705_PLUS
))
9453 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9454 BDINFO_FLAGS_DISABLED
);
9456 /* Program the jumbo buffer descriptor ring control
9457 * blocks on those devices that have them.
9459 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9460 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9462 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9463 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9464 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9465 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9466 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9467 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9468 BDINFO_FLAGS_MAXLEN_SHIFT
;
9469 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9470 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9471 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9472 tg3_flag(tp
, 57765_CLASS
) ||
9473 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9474 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9475 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9477 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9478 BDINFO_FLAGS_DISABLED
);
9481 if (tg3_flag(tp
, 57765_PLUS
)) {
9482 val
= TG3_RX_STD_RING_SIZE(tp
);
9483 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9484 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9486 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9488 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9490 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9492 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9493 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9495 tpr
->rx_jmb_prod_idx
=
9496 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9497 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9499 tg3_rings_reset(tp
);
9501 /* Initialize MAC address and backoff seed. */
9502 __tg3_set_mac_addr(tp
, 0);
9504 /* MTU + ethernet header + FCS + optional VLAN tag */
9505 tw32(MAC_RX_MTU_SIZE
,
9506 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9508 /* The slot time is changed by tg3_setup_phy if we
9509 * run at gigabit with half duplex.
9511 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9512 (6 << TX_LENGTHS_IPG_SHIFT
) |
9513 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9515 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9516 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9517 val
|= tr32(MAC_TX_LENGTHS
) &
9518 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9519 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9521 tw32(MAC_TX_LENGTHS
, val
);
9523 /* Receive rules. */
9524 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9525 tw32(RCVLPC_CONFIG
, 0x0181);
9527 /* Calculate RDMAC_MODE setting early, we need it to determine
9528 * the RCVLPC_STATE_ENABLE mask.
9530 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9531 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9532 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9533 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9534 RDMAC_MODE_LNGREAD_ENAB
);
9536 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9537 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9539 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9540 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9541 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9542 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9543 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9544 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9546 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9547 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9548 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9549 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9550 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9551 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9552 !tg3_flag(tp
, IS_5788
)) {
9553 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9557 if (tg3_flag(tp
, PCI_EXPRESS
))
9558 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9560 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9562 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9563 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9564 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9568 if (tg3_flag(tp
, HW_TSO_1
) ||
9569 tg3_flag(tp
, HW_TSO_2
) ||
9570 tg3_flag(tp
, HW_TSO_3
))
9571 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9573 if (tg3_flag(tp
, 57765_PLUS
) ||
9574 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9575 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9576 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9578 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9579 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9580 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9582 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
9583 tg3_asic_rev(tp
) == ASIC_REV_5784
||
9584 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9585 tg3_asic_rev(tp
) == ASIC_REV_57780
||
9586 tg3_flag(tp
, 57765_PLUS
)) {
9589 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9590 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
9592 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
9595 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9596 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9597 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9598 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9599 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9600 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9601 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9602 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9604 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9607 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
9608 tg3_asic_rev(tp
) == ASIC_REV_5720
||
9609 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9612 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9613 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
9615 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
9619 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9620 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9623 /* Receive/send statistics. */
9624 if (tg3_flag(tp
, 5750_PLUS
)) {
9625 val
= tr32(RCVLPC_STATS_ENABLE
);
9626 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9627 tw32(RCVLPC_STATS_ENABLE
, val
);
9628 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9629 tg3_flag(tp
, TSO_CAPABLE
)) {
9630 val
= tr32(RCVLPC_STATS_ENABLE
);
9631 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9632 tw32(RCVLPC_STATS_ENABLE
, val
);
9634 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9636 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9637 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9638 tw32(SNDDATAI_STATSCTRL
,
9639 (SNDDATAI_SCTRL_ENABLE
|
9640 SNDDATAI_SCTRL_FASTUPD
));
9642 /* Setup host coalescing engine. */
9643 tw32(HOSTCC_MODE
, 0);
9644 for (i
= 0; i
< 2000; i
++) {
9645 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9650 __tg3_set_coalesce(tp
, &tp
->coal
);
9652 if (!tg3_flag(tp
, 5705_PLUS
)) {
9653 /* Status/statistics block address. See tg3_timer,
9654 * the tg3_periodic_fetch_stats call there, and
9655 * tg3_get_stats to see how this works for 5705/5750 chips.
9657 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9658 ((u64
) tp
->stats_mapping
>> 32));
9659 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9660 ((u64
) tp
->stats_mapping
& 0xffffffff));
9661 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9663 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9665 /* Clear statistics and status block memory areas */
9666 for (i
= NIC_SRAM_STATS_BLK
;
9667 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9669 tg3_write_mem(tp
, i
, 0);
9674 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9676 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9677 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9678 if (!tg3_flag(tp
, 5705_PLUS
))
9679 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9681 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9682 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9683 /* reset to prevent losing 1st rx packet intermittently */
9684 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9688 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9689 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9690 MAC_MODE_FHDE_ENABLE
;
9691 if (tg3_flag(tp
, ENABLE_APE
))
9692 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9693 if (!tg3_flag(tp
, 5705_PLUS
) &&
9694 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9695 tg3_asic_rev(tp
) != ASIC_REV_5700
)
9696 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9697 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9700 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9701 * If TG3_FLAG_IS_NIC is zero, we should read the
9702 * register to preserve the GPIO settings for LOMs. The GPIOs,
9703 * whether used as inputs or outputs, are set by boot code after
9706 if (!tg3_flag(tp
, IS_NIC
)) {
9709 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9710 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9711 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9713 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
9714 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9715 GRC_LCLCTRL_GPIO_OUTPUT3
;
9717 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
9718 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9720 tp
->grc_local_ctrl
&= ~gpio_mask
;
9721 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9723 /* GPIO1 must be driven high for eeprom write protect */
9724 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9725 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9726 GRC_LCLCTRL_GPIO_OUTPUT1
);
9728 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9731 if (tg3_flag(tp
, USING_MSIX
)) {
9732 val
= tr32(MSGINT_MODE
);
9733 val
|= MSGINT_MODE_ENABLE
;
9734 if (tp
->irq_cnt
> 1)
9735 val
|= MSGINT_MODE_MULTIVEC_EN
;
9736 if (!tg3_flag(tp
, 1SHOT_MSI
))
9737 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9738 tw32(MSGINT_MODE
, val
);
9741 if (!tg3_flag(tp
, 5705_PLUS
)) {
9742 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9746 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9747 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9748 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9749 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9750 WDMAC_MODE_LNGREAD_ENAB
);
9752 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9753 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9754 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9755 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
9756 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
9758 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9759 !tg3_flag(tp
, IS_5788
)) {
9760 val
|= WDMAC_MODE_RX_ACCEL
;
9764 /* Enable host coalescing bug fix */
9765 if (tg3_flag(tp
, 5755_PLUS
))
9766 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9768 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
9769 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9771 tw32_f(WDMAC_MODE
, val
);
9774 if (tg3_flag(tp
, PCIX_MODE
)) {
9777 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9779 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
9780 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9781 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9782 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
9783 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9784 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9786 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9790 tw32_f(RDMAC_MODE
, rdmac_mode
);
9793 if (tg3_asic_rev(tp
) == ASIC_REV_5719
) {
9794 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9795 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9798 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9799 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9800 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9801 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9802 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9806 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9807 if (!tg3_flag(tp
, 5705_PLUS
))
9808 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9810 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
9812 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9814 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9816 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9817 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9818 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9819 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9820 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9821 tw32(RCVDBDI_MODE
, val
);
9822 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9823 if (tg3_flag(tp
, HW_TSO_1
) ||
9824 tg3_flag(tp
, HW_TSO_2
) ||
9825 tg3_flag(tp
, HW_TSO_3
))
9826 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9827 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9828 if (tg3_flag(tp
, ENABLE_TSS
))
9829 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9830 tw32(SNDBDI_MODE
, val
);
9831 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9833 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
9834 err
= tg3_load_5701_a0_firmware_fix(tp
);
9839 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9840 err
= tg3_load_tso_firmware(tp
);
9845 tp
->tx_mode
= TX_MODE_ENABLE
;
9847 if (tg3_flag(tp
, 5755_PLUS
) ||
9848 tg3_asic_rev(tp
) == ASIC_REV_5906
)
9849 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9851 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9852 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9853 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9854 tp
->tx_mode
&= ~val
;
9855 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9858 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9861 if (tg3_flag(tp
, ENABLE_RSS
)) {
9862 tg3_rss_write_indir_tbl(tp
);
9864 /* Setup the "secret" hash key. */
9865 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9866 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9867 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9868 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9869 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9870 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9871 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9872 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9873 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9874 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9877 tp
->rx_mode
= RX_MODE_ENABLE
;
9878 if (tg3_flag(tp
, 5755_PLUS
))
9879 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9881 if (tg3_flag(tp
, ENABLE_RSS
))
9882 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9883 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9884 RX_MODE_RSS_IPV6_HASH_EN
|
9885 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9886 RX_MODE_RSS_IPV4_HASH_EN
|
9887 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9889 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9892 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9894 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9895 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9896 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9899 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9902 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9903 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
9904 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9905 /* Set drive transmission level to 1.2V */
9906 /* only if the signal pre-emphasis bit is not set */
9907 val
= tr32(MAC_SERDES_CFG
);
9910 tw32(MAC_SERDES_CFG
, val
);
9912 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
9913 tw32(MAC_SERDES_CFG
, 0x616000);
9916 /* Prevent chip from dropping frames when flow control
9919 if (tg3_flag(tp
, 57765_CLASS
))
9923 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9925 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
9926 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9927 /* Use hardware link auto-negotiation */
9928 tg3_flag_set(tp
, HW_AUTONEG
);
9931 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9932 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
9935 tmp
= tr32(SERDES_RX_CTRL
);
9936 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9937 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9938 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9939 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9942 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9943 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9944 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9946 err
= tg3_setup_phy(tp
, 0);
9950 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9951 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9954 /* Clear CRC stats. */
9955 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9956 tg3_writephy(tp
, MII_TG3_TEST1
,
9957 tmp
| MII_TG3_TEST1_CRC_EN
);
9958 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9963 __tg3_set_rx_mode(tp
->dev
);
9965 /* Initialize receive rules. */
9966 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9967 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9968 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9969 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9971 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9975 if (tg3_flag(tp
, ENABLE_ASF
))
9979 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9981 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9983 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9985 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9987 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9989 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9991 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9993 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9995 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9997 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9999 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10001 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10003 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10005 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10013 if (tg3_flag(tp
, ENABLE_APE
))
10014 /* Write our heartbeat update interval to APE. */
10015 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10016 APE_HOST_HEARTBEAT_INT_DISABLE
);
10018 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10023 /* Called at device open time to get the chip ready for
10024 * packet processing. Invoked with tp->lock held.
10026 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
10028 tg3_switch_clocks(tp
);
10030 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10032 return tg3_reset_hw(tp
, reset_phy
);
10035 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10039 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10040 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10042 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10045 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10046 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10047 memset(ocir
, 0, TG3_OCIR_LEN
);
10051 /* sysfs attributes for hwmon */
10052 static ssize_t
tg3_show_temp(struct device
*dev
,
10053 struct device_attribute
*devattr
, char *buf
)
10055 struct pci_dev
*pdev
= to_pci_dev(dev
);
10056 struct net_device
*netdev
= pci_get_drvdata(pdev
);
10057 struct tg3
*tp
= netdev_priv(netdev
);
10058 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10061 spin_lock_bh(&tp
->lock
);
10062 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10063 sizeof(temperature
));
10064 spin_unlock_bh(&tp
->lock
);
10065 return sprintf(buf
, "%u\n", temperature
);
10069 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10070 TG3_TEMP_SENSOR_OFFSET
);
10071 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10072 TG3_TEMP_CAUTION_OFFSET
);
10073 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10074 TG3_TEMP_MAX_OFFSET
);
10076 static struct attribute
*tg3_attributes
[] = {
10077 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10078 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10079 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10083 static const struct attribute_group tg3_group
= {
10084 .attrs
= tg3_attributes
,
10087 static void tg3_hwmon_close(struct tg3
*tp
)
10089 if (tp
->hwmon_dev
) {
10090 hwmon_device_unregister(tp
->hwmon_dev
);
10091 tp
->hwmon_dev
= NULL
;
10092 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10096 static void tg3_hwmon_open(struct tg3
*tp
)
10100 struct pci_dev
*pdev
= tp
->pdev
;
10101 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10103 tg3_sd_scan_scratchpad(tp
, ocirs
);
10105 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10106 if (!ocirs
[i
].src_data_length
)
10109 size
+= ocirs
[i
].src_hdr_length
;
10110 size
+= ocirs
[i
].src_data_length
;
10116 /* Register hwmon sysfs hooks */
10117 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10119 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10123 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10124 if (IS_ERR(tp
->hwmon_dev
)) {
10125 tp
->hwmon_dev
= NULL
;
10126 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10127 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10132 #define TG3_STAT_ADD32(PSTAT, REG) \
10133 do { u32 __val = tr32(REG); \
10134 (PSTAT)->low += __val; \
10135 if ((PSTAT)->low < __val) \
10136 (PSTAT)->high += 1; \
10139 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10141 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10146 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10147 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10148 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10149 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10150 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10151 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10152 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10153 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10154 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10155 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10156 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10157 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10158 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10159 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
10160 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10161 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10164 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10165 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
10166 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10167 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
10170 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10171 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10172 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10173 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10174 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10175 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10176 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10177 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10178 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10179 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10180 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10181 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10182 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10183 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10185 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10186 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10187 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10188 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10189 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10191 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10192 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10194 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10195 sp
->rx_discards
.low
+= val
;
10196 if (sp
->rx_discards
.low
< val
)
10197 sp
->rx_discards
.high
+= 1;
10199 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10201 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10204 static void tg3_chk_missed_msi(struct tg3
*tp
)
10208 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10209 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10211 if (tg3_has_work(tnapi
)) {
10212 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10213 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10214 if (tnapi
->chk_msi_cnt
< 1) {
10215 tnapi
->chk_msi_cnt
++;
10221 tnapi
->chk_msi_cnt
= 0;
10222 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10223 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10227 static void tg3_timer(unsigned long __opaque
)
10229 struct tg3
*tp
= (struct tg3
*) __opaque
;
10231 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10232 goto restart_timer
;
10234 spin_lock(&tp
->lock
);
10236 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10237 tg3_flag(tp
, 57765_CLASS
))
10238 tg3_chk_missed_msi(tp
);
10240 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10241 /* BCM4785: Flush posted writes from GbE to host memory. */
10245 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10246 /* All of this garbage is because when using non-tagged
10247 * IRQ status the mailbox/status_block protocol the chip
10248 * uses with the cpu is race prone.
10250 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10251 tw32(GRC_LOCAL_CTRL
,
10252 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10254 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10255 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10258 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10259 spin_unlock(&tp
->lock
);
10260 tg3_reset_task_schedule(tp
);
10261 goto restart_timer
;
10265 /* This part only runs once per second. */
10266 if (!--tp
->timer_counter
) {
10267 if (tg3_flag(tp
, 5705_PLUS
))
10268 tg3_periodic_fetch_stats(tp
);
10270 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10271 tg3_phy_eee_enable(tp
);
10273 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10277 mac_stat
= tr32(MAC_STATUS
);
10280 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10281 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10283 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10287 tg3_setup_phy(tp
, 0);
10288 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10289 u32 mac_stat
= tr32(MAC_STATUS
);
10290 int need_setup
= 0;
10293 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10296 if (!tp
->link_up
&&
10297 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10298 MAC_STATUS_SIGNAL_DET
))) {
10302 if (!tp
->serdes_counter
) {
10305 ~MAC_MODE_PORT_MODE_MASK
));
10307 tw32_f(MAC_MODE
, tp
->mac_mode
);
10310 tg3_setup_phy(tp
, 0);
10312 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10313 tg3_flag(tp
, 5780_CLASS
)) {
10314 tg3_serdes_parallel_detect(tp
);
10317 tp
->timer_counter
= tp
->timer_multiplier
;
10320 /* Heartbeat is only sent once every 2 seconds.
10322 * The heartbeat is to tell the ASF firmware that the host
10323 * driver is still alive. In the event that the OS crashes,
10324 * ASF needs to reset the hardware to free up the FIFO space
10325 * that may be filled with rx packets destined for the host.
10326 * If the FIFO is full, ASF will no longer function properly.
10328 * Unintended resets have been reported on real time kernels
10329 * where the timer doesn't run on time. Netpoll will also have
10332 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10333 * to check the ring condition when the heartbeat is expiring
10334 * before doing the reset. This will prevent most unintended
10337 if (!--tp
->asf_counter
) {
10338 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10339 tg3_wait_for_event_ack(tp
);
10341 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10342 FWCMD_NICDRV_ALIVE3
);
10343 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10344 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10345 TG3_FW_UPDATE_TIMEOUT_SEC
);
10347 tg3_generate_fw_event(tp
);
10349 tp
->asf_counter
= tp
->asf_multiplier
;
10352 spin_unlock(&tp
->lock
);
10355 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10356 add_timer(&tp
->timer
);
10359 static void tg3_timer_init(struct tg3
*tp
)
10361 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10362 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10363 !tg3_flag(tp
, 57765_CLASS
))
10364 tp
->timer_offset
= HZ
;
10366 tp
->timer_offset
= HZ
/ 10;
10368 BUG_ON(tp
->timer_offset
> HZ
);
10370 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10371 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10372 TG3_FW_UPDATE_FREQ_SEC
;
10374 init_timer(&tp
->timer
);
10375 tp
->timer
.data
= (unsigned long) tp
;
10376 tp
->timer
.function
= tg3_timer
;
10379 static void tg3_timer_start(struct tg3
*tp
)
10381 tp
->asf_counter
= tp
->asf_multiplier
;
10382 tp
->timer_counter
= tp
->timer_multiplier
;
10384 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10385 add_timer(&tp
->timer
);
10388 static void tg3_timer_stop(struct tg3
*tp
)
10390 del_timer_sync(&tp
->timer
);
10393 /* Restart hardware after configuration changes, self-test, etc.
10394 * Invoked with tp->lock held.
10396 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
10397 __releases(tp
->lock
)
10398 __acquires(tp
->lock
)
10402 err
= tg3_init_hw(tp
, reset_phy
);
10404 netdev_err(tp
->dev
,
10405 "Failed to re-initialize device, aborting\n");
10406 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10407 tg3_full_unlock(tp
);
10408 tg3_timer_stop(tp
);
10410 tg3_napi_enable(tp
);
10411 dev_close(tp
->dev
);
10412 tg3_full_lock(tp
, 0);
10417 static void tg3_reset_task(struct work_struct
*work
)
10419 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10422 tg3_full_lock(tp
, 0);
10424 if (!netif_running(tp
->dev
)) {
10425 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10426 tg3_full_unlock(tp
);
10430 tg3_full_unlock(tp
);
10434 tg3_netif_stop(tp
);
10436 tg3_full_lock(tp
, 1);
10438 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10439 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10440 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10441 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10442 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10445 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10446 err
= tg3_init_hw(tp
, 1);
10450 tg3_netif_start(tp
);
10453 tg3_full_unlock(tp
);
10458 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10461 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10464 unsigned long flags
;
10466 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10468 if (tp
->irq_cnt
== 1)
10469 name
= tp
->dev
->name
;
10471 name
= &tnapi
->irq_lbl
[0];
10472 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10473 name
[IFNAMSIZ
-1] = 0;
10476 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10478 if (tg3_flag(tp
, 1SHOT_MSI
))
10479 fn
= tg3_msi_1shot
;
10482 fn
= tg3_interrupt
;
10483 if (tg3_flag(tp
, TAGGED_STATUS
))
10484 fn
= tg3_interrupt_tagged
;
10485 flags
= IRQF_SHARED
;
10488 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10491 static int tg3_test_interrupt(struct tg3
*tp
)
10493 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10494 struct net_device
*dev
= tp
->dev
;
10495 int err
, i
, intr_ok
= 0;
10498 if (!netif_running(dev
))
10501 tg3_disable_ints(tp
);
10503 free_irq(tnapi
->irq_vec
, tnapi
);
10506 * Turn off MSI one shot mode. Otherwise this test has no
10507 * observable way to know whether the interrupt was delivered.
10509 if (tg3_flag(tp
, 57765_PLUS
)) {
10510 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10511 tw32(MSGINT_MODE
, val
);
10514 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10515 IRQF_SHARED
, dev
->name
, tnapi
);
10519 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10520 tg3_enable_ints(tp
);
10522 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10525 for (i
= 0; i
< 5; i
++) {
10526 u32 int_mbox
, misc_host_ctrl
;
10528 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10529 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10531 if ((int_mbox
!= 0) ||
10532 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10537 if (tg3_flag(tp
, 57765_PLUS
) &&
10538 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10539 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10544 tg3_disable_ints(tp
);
10546 free_irq(tnapi
->irq_vec
, tnapi
);
10548 err
= tg3_request_irq(tp
, 0);
10554 /* Reenable MSI one shot mode. */
10555 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10556 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10557 tw32(MSGINT_MODE
, val
);
10565 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10566 * successfully restored
10568 static int tg3_test_msi(struct tg3
*tp
)
10573 if (!tg3_flag(tp
, USING_MSI
))
10576 /* Turn off SERR reporting in case MSI terminates with Master
10579 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10580 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10581 pci_cmd
& ~PCI_COMMAND_SERR
);
10583 err
= tg3_test_interrupt(tp
);
10585 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10590 /* other failures */
10594 /* MSI test failed, go back to INTx mode */
10595 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10596 "to INTx mode. Please report this failure to the PCI "
10597 "maintainer and include system chipset information\n");
10599 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10601 pci_disable_msi(tp
->pdev
);
10603 tg3_flag_clear(tp
, USING_MSI
);
10604 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10606 err
= tg3_request_irq(tp
, 0);
10610 /* Need to reset the chip because the MSI cycle may have terminated
10611 * with Master Abort.
10613 tg3_full_lock(tp
, 1);
10615 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10616 err
= tg3_init_hw(tp
, 1);
10618 tg3_full_unlock(tp
);
10621 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10626 static int tg3_request_firmware(struct tg3
*tp
)
10628 const struct tg3_firmware_hdr
*fw_hdr
;
10630 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10631 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10636 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
10638 /* Firmware blob starts with version numbers, followed by
10639 * start address and _full_ length including BSS sections
10640 * (which must be longer than the actual data, of course
10643 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
10644 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
10645 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10646 tp
->fw_len
, tp
->fw_needed
);
10647 release_firmware(tp
->fw
);
10652 /* We no longer need firmware; we have it. */
10653 tp
->fw_needed
= NULL
;
10657 static u32
tg3_irq_count(struct tg3
*tp
)
10659 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
10662 /* We want as many rx rings enabled as there are cpus.
10663 * In multiqueue MSI-X mode, the first MSI-X vector
10664 * only deals with link interrupts, etc, so we add
10665 * one to the number of vectors we are requesting.
10667 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
10673 static bool tg3_enable_msix(struct tg3
*tp
)
10676 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
10678 tp
->txq_cnt
= tp
->txq_req
;
10679 tp
->rxq_cnt
= tp
->rxq_req
;
10681 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
10682 if (tp
->rxq_cnt
> tp
->rxq_max
)
10683 tp
->rxq_cnt
= tp
->rxq_max
;
10685 /* Disable multiple TX rings by default. Simple round-robin hardware
10686 * scheduling of the TX rings can cause starvation of rings with
10687 * small packets when other rings have TSO or jumbo packets.
10692 tp
->irq_cnt
= tg3_irq_count(tp
);
10694 for (i
= 0; i
< tp
->irq_max
; i
++) {
10695 msix_ent
[i
].entry
= i
;
10696 msix_ent
[i
].vector
= 0;
10699 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10702 } else if (rc
!= 0) {
10703 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10705 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10708 tp
->rxq_cnt
= max(rc
- 1, 1);
10710 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
10713 for (i
= 0; i
< tp
->irq_max
; i
++)
10714 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10716 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
10717 pci_disable_msix(tp
->pdev
);
10721 if (tp
->irq_cnt
== 1)
10724 tg3_flag_set(tp
, ENABLE_RSS
);
10726 if (tp
->txq_cnt
> 1)
10727 tg3_flag_set(tp
, ENABLE_TSS
);
10729 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
10734 static void tg3_ints_init(struct tg3
*tp
)
10736 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10737 !tg3_flag(tp
, TAGGED_STATUS
)) {
10738 /* All MSI supporting chips should support tagged
10739 * status. Assert that this is the case.
10741 netdev_warn(tp
->dev
,
10742 "MSI without TAGGED_STATUS? Not using MSI\n");
10746 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10747 tg3_flag_set(tp
, USING_MSIX
);
10748 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10749 tg3_flag_set(tp
, USING_MSI
);
10751 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10752 u32 msi_mode
= tr32(MSGINT_MODE
);
10753 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10754 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10755 if (!tg3_flag(tp
, 1SHOT_MSI
))
10756 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10757 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10760 if (!tg3_flag(tp
, USING_MSIX
)) {
10762 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10765 if (tp
->irq_cnt
== 1) {
10768 netif_set_real_num_tx_queues(tp
->dev
, 1);
10769 netif_set_real_num_rx_queues(tp
->dev
, 1);
10773 static void tg3_ints_fini(struct tg3
*tp
)
10775 if (tg3_flag(tp
, USING_MSIX
))
10776 pci_disable_msix(tp
->pdev
);
10777 else if (tg3_flag(tp
, USING_MSI
))
10778 pci_disable_msi(tp
->pdev
);
10779 tg3_flag_clear(tp
, USING_MSI
);
10780 tg3_flag_clear(tp
, USING_MSIX
);
10781 tg3_flag_clear(tp
, ENABLE_RSS
);
10782 tg3_flag_clear(tp
, ENABLE_TSS
);
10785 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
10788 struct net_device
*dev
= tp
->dev
;
10792 * Setup interrupts first so we know how
10793 * many NAPI resources to allocate
10797 tg3_rss_check_indir_tbl(tp
);
10799 /* The placement of this call is tied
10800 * to the setup and use of Host TX descriptors.
10802 err
= tg3_alloc_consistent(tp
);
10808 tg3_napi_enable(tp
);
10810 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10811 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10812 err
= tg3_request_irq(tp
, i
);
10814 for (i
--; i
>= 0; i
--) {
10815 tnapi
= &tp
->napi
[i
];
10816 free_irq(tnapi
->irq_vec
, tnapi
);
10822 tg3_full_lock(tp
, 0);
10824 err
= tg3_init_hw(tp
, reset_phy
);
10826 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10827 tg3_free_rings(tp
);
10830 tg3_full_unlock(tp
);
10835 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
10836 err
= tg3_test_msi(tp
);
10839 tg3_full_lock(tp
, 0);
10840 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10841 tg3_free_rings(tp
);
10842 tg3_full_unlock(tp
);
10847 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10848 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10850 tw32(PCIE_TRANSACTION_CFG
,
10851 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10857 tg3_hwmon_open(tp
);
10859 tg3_full_lock(tp
, 0);
10861 tg3_timer_start(tp
);
10862 tg3_flag_set(tp
, INIT_COMPLETE
);
10863 tg3_enable_ints(tp
);
10868 tg3_ptp_resume(tp
);
10871 tg3_full_unlock(tp
);
10873 netif_tx_start_all_queues(dev
);
10876 * Reset loopback feature if it was turned on while the device was down
10877 * make sure that it's installed properly now.
10879 if (dev
->features
& NETIF_F_LOOPBACK
)
10880 tg3_set_loopback(dev
, dev
->features
);
10885 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10886 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10887 free_irq(tnapi
->irq_vec
, tnapi
);
10891 tg3_napi_disable(tp
);
10893 tg3_free_consistent(tp
);
10901 static void tg3_stop(struct tg3
*tp
)
10905 tg3_reset_task_cancel(tp
);
10906 tg3_netif_stop(tp
);
10908 tg3_timer_stop(tp
);
10910 tg3_hwmon_close(tp
);
10914 tg3_full_lock(tp
, 1);
10916 tg3_disable_ints(tp
);
10918 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10919 tg3_free_rings(tp
);
10920 tg3_flag_clear(tp
, INIT_COMPLETE
);
10922 tg3_full_unlock(tp
);
10924 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10925 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10926 free_irq(tnapi
->irq_vec
, tnapi
);
10933 tg3_free_consistent(tp
);
10936 static int tg3_open(struct net_device
*dev
)
10938 struct tg3
*tp
= netdev_priv(dev
);
10941 if (tp
->fw_needed
) {
10942 err
= tg3_request_firmware(tp
);
10943 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10947 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10948 tg3_flag_clear(tp
, TSO_CAPABLE
);
10949 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10950 netdev_notice(tp
->dev
, "TSO capability restored\n");
10951 tg3_flag_set(tp
, TSO_CAPABLE
);
10955 tg3_carrier_off(tp
);
10957 err
= tg3_power_up(tp
);
10961 tg3_full_lock(tp
, 0);
10963 tg3_disable_ints(tp
);
10964 tg3_flag_clear(tp
, INIT_COMPLETE
);
10966 tg3_full_unlock(tp
);
10968 err
= tg3_start(tp
, true, true, true);
10970 tg3_frob_aux_power(tp
, false);
10971 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10974 if (tg3_flag(tp
, PTP_CAPABLE
)) {
10975 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
10977 if (IS_ERR(tp
->ptp_clock
))
10978 tp
->ptp_clock
= NULL
;
10984 static int tg3_close(struct net_device
*dev
)
10986 struct tg3
*tp
= netdev_priv(dev
);
10992 /* Clear stats across close / open calls */
10993 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10994 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10996 tg3_power_down(tp
);
10998 tg3_carrier_off(tp
);
11003 static inline u64
get_stat64(tg3_stat64_t
*val
)
11005 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11008 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11010 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11012 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11013 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11014 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11017 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11018 tg3_writephy(tp
, MII_TG3_TEST1
,
11019 val
| MII_TG3_TEST1_CRC_EN
);
11020 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11024 tp
->phy_crc_errors
+= val
;
11026 return tp
->phy_crc_errors
;
11029 return get_stat64(&hw_stats
->rx_fcs_errors
);
11032 #define ESTAT_ADD(member) \
11033 estats->member = old_estats->member + \
11034 get_stat64(&hw_stats->member)
11036 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11038 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11039 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11041 ESTAT_ADD(rx_octets
);
11042 ESTAT_ADD(rx_fragments
);
11043 ESTAT_ADD(rx_ucast_packets
);
11044 ESTAT_ADD(rx_mcast_packets
);
11045 ESTAT_ADD(rx_bcast_packets
);
11046 ESTAT_ADD(rx_fcs_errors
);
11047 ESTAT_ADD(rx_align_errors
);
11048 ESTAT_ADD(rx_xon_pause_rcvd
);
11049 ESTAT_ADD(rx_xoff_pause_rcvd
);
11050 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11051 ESTAT_ADD(rx_xoff_entered
);
11052 ESTAT_ADD(rx_frame_too_long_errors
);
11053 ESTAT_ADD(rx_jabbers
);
11054 ESTAT_ADD(rx_undersize_packets
);
11055 ESTAT_ADD(rx_in_length_errors
);
11056 ESTAT_ADD(rx_out_length_errors
);
11057 ESTAT_ADD(rx_64_or_less_octet_packets
);
11058 ESTAT_ADD(rx_65_to_127_octet_packets
);
11059 ESTAT_ADD(rx_128_to_255_octet_packets
);
11060 ESTAT_ADD(rx_256_to_511_octet_packets
);
11061 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11062 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11063 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11064 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11065 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11066 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11068 ESTAT_ADD(tx_octets
);
11069 ESTAT_ADD(tx_collisions
);
11070 ESTAT_ADD(tx_xon_sent
);
11071 ESTAT_ADD(tx_xoff_sent
);
11072 ESTAT_ADD(tx_flow_control
);
11073 ESTAT_ADD(tx_mac_errors
);
11074 ESTAT_ADD(tx_single_collisions
);
11075 ESTAT_ADD(tx_mult_collisions
);
11076 ESTAT_ADD(tx_deferred
);
11077 ESTAT_ADD(tx_excessive_collisions
);
11078 ESTAT_ADD(tx_late_collisions
);
11079 ESTAT_ADD(tx_collide_2times
);
11080 ESTAT_ADD(tx_collide_3times
);
11081 ESTAT_ADD(tx_collide_4times
);
11082 ESTAT_ADD(tx_collide_5times
);
11083 ESTAT_ADD(tx_collide_6times
);
11084 ESTAT_ADD(tx_collide_7times
);
11085 ESTAT_ADD(tx_collide_8times
);
11086 ESTAT_ADD(tx_collide_9times
);
11087 ESTAT_ADD(tx_collide_10times
);
11088 ESTAT_ADD(tx_collide_11times
);
11089 ESTAT_ADD(tx_collide_12times
);
11090 ESTAT_ADD(tx_collide_13times
);
11091 ESTAT_ADD(tx_collide_14times
);
11092 ESTAT_ADD(tx_collide_15times
);
11093 ESTAT_ADD(tx_ucast_packets
);
11094 ESTAT_ADD(tx_mcast_packets
);
11095 ESTAT_ADD(tx_bcast_packets
);
11096 ESTAT_ADD(tx_carrier_sense_errors
);
11097 ESTAT_ADD(tx_discards
);
11098 ESTAT_ADD(tx_errors
);
11100 ESTAT_ADD(dma_writeq_full
);
11101 ESTAT_ADD(dma_write_prioq_full
);
11102 ESTAT_ADD(rxbds_empty
);
11103 ESTAT_ADD(rx_discards
);
11104 ESTAT_ADD(rx_errors
);
11105 ESTAT_ADD(rx_threshold_hit
);
11107 ESTAT_ADD(dma_readq_full
);
11108 ESTAT_ADD(dma_read_prioq_full
);
11109 ESTAT_ADD(tx_comp_queue_full
);
11111 ESTAT_ADD(ring_set_send_prod_index
);
11112 ESTAT_ADD(ring_status_update
);
11113 ESTAT_ADD(nic_irqs
);
11114 ESTAT_ADD(nic_avoided_irqs
);
11115 ESTAT_ADD(nic_tx_threshold_hit
);
11117 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11120 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11122 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11123 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11125 stats
->rx_packets
= old_stats
->rx_packets
+
11126 get_stat64(&hw_stats
->rx_ucast_packets
) +
11127 get_stat64(&hw_stats
->rx_mcast_packets
) +
11128 get_stat64(&hw_stats
->rx_bcast_packets
);
11130 stats
->tx_packets
= old_stats
->tx_packets
+
11131 get_stat64(&hw_stats
->tx_ucast_packets
) +
11132 get_stat64(&hw_stats
->tx_mcast_packets
) +
11133 get_stat64(&hw_stats
->tx_bcast_packets
);
11135 stats
->rx_bytes
= old_stats
->rx_bytes
+
11136 get_stat64(&hw_stats
->rx_octets
);
11137 stats
->tx_bytes
= old_stats
->tx_bytes
+
11138 get_stat64(&hw_stats
->tx_octets
);
11140 stats
->rx_errors
= old_stats
->rx_errors
+
11141 get_stat64(&hw_stats
->rx_errors
);
11142 stats
->tx_errors
= old_stats
->tx_errors
+
11143 get_stat64(&hw_stats
->tx_errors
) +
11144 get_stat64(&hw_stats
->tx_mac_errors
) +
11145 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11146 get_stat64(&hw_stats
->tx_discards
);
11148 stats
->multicast
= old_stats
->multicast
+
11149 get_stat64(&hw_stats
->rx_mcast_packets
);
11150 stats
->collisions
= old_stats
->collisions
+
11151 get_stat64(&hw_stats
->tx_collisions
);
11153 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11154 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11155 get_stat64(&hw_stats
->rx_undersize_packets
);
11157 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11158 get_stat64(&hw_stats
->rxbds_empty
);
11159 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11160 get_stat64(&hw_stats
->rx_align_errors
);
11161 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11162 get_stat64(&hw_stats
->tx_discards
);
11163 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11164 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11166 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11167 tg3_calc_crc_errors(tp
);
11169 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11170 get_stat64(&hw_stats
->rx_discards
);
11172 stats
->rx_dropped
= tp
->rx_dropped
;
11173 stats
->tx_dropped
= tp
->tx_dropped
;
11176 static int tg3_get_regs_len(struct net_device
*dev
)
11178 return TG3_REG_BLK_SIZE
;
11181 static void tg3_get_regs(struct net_device
*dev
,
11182 struct ethtool_regs
*regs
, void *_p
)
11184 struct tg3
*tp
= netdev_priv(dev
);
11188 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11190 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11193 tg3_full_lock(tp
, 0);
11195 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11197 tg3_full_unlock(tp
);
11200 static int tg3_get_eeprom_len(struct net_device
*dev
)
11202 struct tg3
*tp
= netdev_priv(dev
);
11204 return tp
->nvram_size
;
11207 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11209 struct tg3
*tp
= netdev_priv(dev
);
11212 u32 i
, offset
, len
, b_offset
, b_count
;
11215 if (tg3_flag(tp
, NO_NVRAM
))
11218 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11221 offset
= eeprom
->offset
;
11225 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11228 /* adjustments to start on required 4 byte boundary */
11229 b_offset
= offset
& 3;
11230 b_count
= 4 - b_offset
;
11231 if (b_count
> len
) {
11232 /* i.e. offset=1 len=2 */
11235 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11238 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11241 eeprom
->len
+= b_count
;
11244 /* read bytes up to the last 4 byte boundary */
11245 pd
= &data
[eeprom
->len
];
11246 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11247 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11252 memcpy(pd
+ i
, &val
, 4);
11257 /* read last bytes not ending on 4 byte boundary */
11258 pd
= &data
[eeprom
->len
];
11260 b_offset
= offset
+ len
- b_count
;
11261 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11264 memcpy(pd
, &val
, b_count
);
11265 eeprom
->len
+= b_count
;
11270 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11272 struct tg3
*tp
= netdev_priv(dev
);
11274 u32 offset
, len
, b_offset
, odd_len
;
11278 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11281 if (tg3_flag(tp
, NO_NVRAM
) ||
11282 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11285 offset
= eeprom
->offset
;
11288 if ((b_offset
= (offset
& 3))) {
11289 /* adjustments to start on required 4 byte boundary */
11290 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11301 /* adjustments to end on required 4 byte boundary */
11303 len
= (len
+ 3) & ~3;
11304 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11310 if (b_offset
|| odd_len
) {
11311 buf
= kmalloc(len
, GFP_KERNEL
);
11315 memcpy(buf
, &start
, 4);
11317 memcpy(buf
+len
-4, &end
, 4);
11318 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11321 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11329 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11331 struct tg3
*tp
= netdev_priv(dev
);
11333 if (tg3_flag(tp
, USE_PHYLIB
)) {
11334 struct phy_device
*phydev
;
11335 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11337 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11338 return phy_ethtool_gset(phydev
, cmd
);
11341 cmd
->supported
= (SUPPORTED_Autoneg
);
11343 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11344 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11345 SUPPORTED_1000baseT_Full
);
11347 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11348 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11349 SUPPORTED_100baseT_Full
|
11350 SUPPORTED_10baseT_Half
|
11351 SUPPORTED_10baseT_Full
|
11353 cmd
->port
= PORT_TP
;
11355 cmd
->supported
|= SUPPORTED_FIBRE
;
11356 cmd
->port
= PORT_FIBRE
;
11359 cmd
->advertising
= tp
->link_config
.advertising
;
11360 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11361 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11362 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11363 cmd
->advertising
|= ADVERTISED_Pause
;
11365 cmd
->advertising
|= ADVERTISED_Pause
|
11366 ADVERTISED_Asym_Pause
;
11368 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11369 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11372 if (netif_running(dev
) && tp
->link_up
) {
11373 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11374 cmd
->duplex
= tp
->link_config
.active_duplex
;
11375 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11376 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11377 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11378 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11380 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11383 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11384 cmd
->duplex
= DUPLEX_UNKNOWN
;
11385 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11387 cmd
->phy_address
= tp
->phy_addr
;
11388 cmd
->transceiver
= XCVR_INTERNAL
;
11389 cmd
->autoneg
= tp
->link_config
.autoneg
;
11395 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11397 struct tg3
*tp
= netdev_priv(dev
);
11398 u32 speed
= ethtool_cmd_speed(cmd
);
11400 if (tg3_flag(tp
, USE_PHYLIB
)) {
11401 struct phy_device
*phydev
;
11402 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11404 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11405 return phy_ethtool_sset(phydev
, cmd
);
11408 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11409 cmd
->autoneg
!= AUTONEG_DISABLE
)
11412 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11413 cmd
->duplex
!= DUPLEX_FULL
&&
11414 cmd
->duplex
!= DUPLEX_HALF
)
11417 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11418 u32 mask
= ADVERTISED_Autoneg
|
11420 ADVERTISED_Asym_Pause
;
11422 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11423 mask
|= ADVERTISED_1000baseT_Half
|
11424 ADVERTISED_1000baseT_Full
;
11426 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11427 mask
|= ADVERTISED_100baseT_Half
|
11428 ADVERTISED_100baseT_Full
|
11429 ADVERTISED_10baseT_Half
|
11430 ADVERTISED_10baseT_Full
|
11433 mask
|= ADVERTISED_FIBRE
;
11435 if (cmd
->advertising
& ~mask
)
11438 mask
&= (ADVERTISED_1000baseT_Half
|
11439 ADVERTISED_1000baseT_Full
|
11440 ADVERTISED_100baseT_Half
|
11441 ADVERTISED_100baseT_Full
|
11442 ADVERTISED_10baseT_Half
|
11443 ADVERTISED_10baseT_Full
);
11445 cmd
->advertising
&= mask
;
11447 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11448 if (speed
!= SPEED_1000
)
11451 if (cmd
->duplex
!= DUPLEX_FULL
)
11454 if (speed
!= SPEED_100
&&
11460 tg3_full_lock(tp
, 0);
11462 tp
->link_config
.autoneg
= cmd
->autoneg
;
11463 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11464 tp
->link_config
.advertising
= (cmd
->advertising
|
11465 ADVERTISED_Autoneg
);
11466 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11467 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11469 tp
->link_config
.advertising
= 0;
11470 tp
->link_config
.speed
= speed
;
11471 tp
->link_config
.duplex
= cmd
->duplex
;
11474 if (netif_running(dev
))
11475 tg3_setup_phy(tp
, 1);
11477 tg3_full_unlock(tp
);
11482 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11484 struct tg3
*tp
= netdev_priv(dev
);
11486 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11487 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11488 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11489 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11492 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11494 struct tg3
*tp
= netdev_priv(dev
);
11496 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11497 wol
->supported
= WAKE_MAGIC
;
11499 wol
->supported
= 0;
11501 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11502 wol
->wolopts
= WAKE_MAGIC
;
11503 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11506 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11508 struct tg3
*tp
= netdev_priv(dev
);
11509 struct device
*dp
= &tp
->pdev
->dev
;
11511 if (wol
->wolopts
& ~WAKE_MAGIC
)
11513 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11514 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11517 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11519 spin_lock_bh(&tp
->lock
);
11520 if (device_may_wakeup(dp
))
11521 tg3_flag_set(tp
, WOL_ENABLE
);
11523 tg3_flag_clear(tp
, WOL_ENABLE
);
11524 spin_unlock_bh(&tp
->lock
);
11529 static u32
tg3_get_msglevel(struct net_device
*dev
)
11531 struct tg3
*tp
= netdev_priv(dev
);
11532 return tp
->msg_enable
;
11535 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11537 struct tg3
*tp
= netdev_priv(dev
);
11538 tp
->msg_enable
= value
;
11541 static int tg3_nway_reset(struct net_device
*dev
)
11543 struct tg3
*tp
= netdev_priv(dev
);
11546 if (!netif_running(dev
))
11549 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11552 if (tg3_flag(tp
, USE_PHYLIB
)) {
11553 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11555 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
11559 spin_lock_bh(&tp
->lock
);
11561 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
11562 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
11563 ((bmcr
& BMCR_ANENABLE
) ||
11564 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
11565 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
11569 spin_unlock_bh(&tp
->lock
);
11575 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11577 struct tg3
*tp
= netdev_priv(dev
);
11579 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
11580 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11581 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
11583 ering
->rx_jumbo_max_pending
= 0;
11585 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
11587 ering
->rx_pending
= tp
->rx_pending
;
11588 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11589 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
11591 ering
->rx_jumbo_pending
= 0;
11593 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11596 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11598 struct tg3
*tp
= netdev_priv(dev
);
11599 int i
, irq_sync
= 0, err
= 0;
11601 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11602 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11603 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11604 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11605 (tg3_flag(tp
, TSO_BUG
) &&
11606 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11609 if (netif_running(dev
)) {
11611 tg3_netif_stop(tp
);
11615 tg3_full_lock(tp
, irq_sync
);
11617 tp
->rx_pending
= ering
->rx_pending
;
11619 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11620 tp
->rx_pending
> 63)
11621 tp
->rx_pending
= 63;
11622 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11624 for (i
= 0; i
< tp
->irq_max
; i
++)
11625 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11627 if (netif_running(dev
)) {
11628 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11629 err
= tg3_restart_hw(tp
, 1);
11631 tg3_netif_start(tp
);
11634 tg3_full_unlock(tp
);
11636 if (irq_sync
&& !err
)
11642 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11644 struct tg3
*tp
= netdev_priv(dev
);
11646 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11648 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11649 epause
->rx_pause
= 1;
11651 epause
->rx_pause
= 0;
11653 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11654 epause
->tx_pause
= 1;
11656 epause
->tx_pause
= 0;
11659 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11661 struct tg3
*tp
= netdev_priv(dev
);
11664 if (tg3_flag(tp
, USE_PHYLIB
)) {
11666 struct phy_device
*phydev
;
11668 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11670 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11671 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11672 (epause
->rx_pause
!= epause
->tx_pause
)))
11675 tp
->link_config
.flowctrl
= 0;
11676 if (epause
->rx_pause
) {
11677 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11679 if (epause
->tx_pause
) {
11680 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11681 newadv
= ADVERTISED_Pause
;
11683 newadv
= ADVERTISED_Pause
|
11684 ADVERTISED_Asym_Pause
;
11685 } else if (epause
->tx_pause
) {
11686 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11687 newadv
= ADVERTISED_Asym_Pause
;
11691 if (epause
->autoneg
)
11692 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11694 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11696 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11697 u32 oldadv
= phydev
->advertising
&
11698 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11699 if (oldadv
!= newadv
) {
11700 phydev
->advertising
&=
11701 ~(ADVERTISED_Pause
|
11702 ADVERTISED_Asym_Pause
);
11703 phydev
->advertising
|= newadv
;
11704 if (phydev
->autoneg
) {
11706 * Always renegotiate the link to
11707 * inform our link partner of our
11708 * flow control settings, even if the
11709 * flow control is forced. Let
11710 * tg3_adjust_link() do the final
11711 * flow control setup.
11713 return phy_start_aneg(phydev
);
11717 if (!epause
->autoneg
)
11718 tg3_setup_flow_control(tp
, 0, 0);
11720 tp
->link_config
.advertising
&=
11721 ~(ADVERTISED_Pause
|
11722 ADVERTISED_Asym_Pause
);
11723 tp
->link_config
.advertising
|= newadv
;
11728 if (netif_running(dev
)) {
11729 tg3_netif_stop(tp
);
11733 tg3_full_lock(tp
, irq_sync
);
11735 if (epause
->autoneg
)
11736 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11738 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11739 if (epause
->rx_pause
)
11740 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11742 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11743 if (epause
->tx_pause
)
11744 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11746 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11748 if (netif_running(dev
)) {
11749 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11750 err
= tg3_restart_hw(tp
, 1);
11752 tg3_netif_start(tp
);
11755 tg3_full_unlock(tp
);
11761 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11765 return TG3_NUM_TEST
;
11767 return TG3_NUM_STATS
;
11769 return -EOPNOTSUPP
;
11773 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11774 u32
*rules __always_unused
)
11776 struct tg3
*tp
= netdev_priv(dev
);
11778 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11779 return -EOPNOTSUPP
;
11781 switch (info
->cmd
) {
11782 case ETHTOOL_GRXRINGS
:
11783 if (netif_running(tp
->dev
))
11784 info
->data
= tp
->rxq_cnt
;
11786 info
->data
= num_online_cpus();
11787 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
11788 info
->data
= TG3_RSS_MAX_NUM_QS
;
11791 /* The first interrupt vector only
11792 * handles link interrupts.
11798 return -EOPNOTSUPP
;
11802 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11805 struct tg3
*tp
= netdev_priv(dev
);
11807 if (tg3_flag(tp
, SUPPORT_MSIX
))
11808 size
= TG3_RSS_INDIR_TBL_SIZE
;
11813 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11815 struct tg3
*tp
= netdev_priv(dev
);
11818 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11819 indir
[i
] = tp
->rss_ind_tbl
[i
];
11824 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11826 struct tg3
*tp
= netdev_priv(dev
);
11829 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11830 tp
->rss_ind_tbl
[i
] = indir
[i
];
11832 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11835 /* It is legal to write the indirection
11836 * table while the device is running.
11838 tg3_full_lock(tp
, 0);
11839 tg3_rss_write_indir_tbl(tp
);
11840 tg3_full_unlock(tp
);
11845 static void tg3_get_channels(struct net_device
*dev
,
11846 struct ethtool_channels
*channel
)
11848 struct tg3
*tp
= netdev_priv(dev
);
11849 u32 deflt_qs
= netif_get_num_default_rss_queues();
11851 channel
->max_rx
= tp
->rxq_max
;
11852 channel
->max_tx
= tp
->txq_max
;
11854 if (netif_running(dev
)) {
11855 channel
->rx_count
= tp
->rxq_cnt
;
11856 channel
->tx_count
= tp
->txq_cnt
;
11859 channel
->rx_count
= tp
->rxq_req
;
11861 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
11864 channel
->tx_count
= tp
->txq_req
;
11866 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
11870 static int tg3_set_channels(struct net_device
*dev
,
11871 struct ethtool_channels
*channel
)
11873 struct tg3
*tp
= netdev_priv(dev
);
11875 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11876 return -EOPNOTSUPP
;
11878 if (channel
->rx_count
> tp
->rxq_max
||
11879 channel
->tx_count
> tp
->txq_max
)
11882 tp
->rxq_req
= channel
->rx_count
;
11883 tp
->txq_req
= channel
->tx_count
;
11885 if (!netif_running(dev
))
11890 tg3_carrier_off(tp
);
11892 tg3_start(tp
, true, false, false);
11897 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11899 switch (stringset
) {
11901 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11904 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11907 WARN_ON(1); /* we need a WARN() */
11912 static int tg3_set_phys_id(struct net_device
*dev
,
11913 enum ethtool_phys_id_state state
)
11915 struct tg3
*tp
= netdev_priv(dev
);
11917 if (!netif_running(tp
->dev
))
11921 case ETHTOOL_ID_ACTIVE
:
11922 return 1; /* cycle on/off once per second */
11924 case ETHTOOL_ID_ON
:
11925 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11926 LED_CTRL_1000MBPS_ON
|
11927 LED_CTRL_100MBPS_ON
|
11928 LED_CTRL_10MBPS_ON
|
11929 LED_CTRL_TRAFFIC_OVERRIDE
|
11930 LED_CTRL_TRAFFIC_BLINK
|
11931 LED_CTRL_TRAFFIC_LED
);
11934 case ETHTOOL_ID_OFF
:
11935 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11936 LED_CTRL_TRAFFIC_OVERRIDE
);
11939 case ETHTOOL_ID_INACTIVE
:
11940 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11947 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11948 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11950 struct tg3
*tp
= netdev_priv(dev
);
11953 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11955 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11958 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11962 u32 offset
= 0, len
= 0;
11965 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11968 if (magic
== TG3_EEPROM_MAGIC
) {
11969 for (offset
= TG3_NVM_DIR_START
;
11970 offset
< TG3_NVM_DIR_END
;
11971 offset
+= TG3_NVM_DIRENT_SIZE
) {
11972 if (tg3_nvram_read(tp
, offset
, &val
))
11975 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11976 TG3_NVM_DIRTYPE_EXTVPD
)
11980 if (offset
!= TG3_NVM_DIR_END
) {
11981 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11982 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11985 offset
= tg3_nvram_logical_addr(tp
, offset
);
11989 if (!offset
|| !len
) {
11990 offset
= TG3_NVM_VPD_OFF
;
11991 len
= TG3_NVM_VPD_LEN
;
11994 buf
= kmalloc(len
, GFP_KERNEL
);
11998 if (magic
== TG3_EEPROM_MAGIC
) {
11999 for (i
= 0; i
< len
; i
+= 4) {
12000 /* The data is in little-endian format in NVRAM.
12001 * Use the big-endian read routines to preserve
12002 * the byte order as it exists in NVRAM.
12004 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12010 unsigned int pos
= 0;
12012 ptr
= (u8
*)&buf
[0];
12013 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12014 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12016 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12034 #define NVRAM_TEST_SIZE 0x100
12035 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12036 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12037 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12038 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12039 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12040 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12041 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12042 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12044 static int tg3_test_nvram(struct tg3
*tp
)
12046 u32 csum
, magic
, len
;
12048 int i
, j
, k
, err
= 0, size
;
12050 if (tg3_flag(tp
, NO_NVRAM
))
12053 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12056 if (magic
== TG3_EEPROM_MAGIC
)
12057 size
= NVRAM_TEST_SIZE
;
12058 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12059 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12060 TG3_EEPROM_SB_FORMAT_1
) {
12061 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12062 case TG3_EEPROM_SB_REVISION_0
:
12063 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12065 case TG3_EEPROM_SB_REVISION_2
:
12066 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12068 case TG3_EEPROM_SB_REVISION_3
:
12069 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12071 case TG3_EEPROM_SB_REVISION_4
:
12072 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12074 case TG3_EEPROM_SB_REVISION_5
:
12075 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12077 case TG3_EEPROM_SB_REVISION_6
:
12078 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12085 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12086 size
= NVRAM_SELFBOOT_HW_SIZE
;
12090 buf
= kmalloc(size
, GFP_KERNEL
);
12095 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12096 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12103 /* Selfboot format */
12104 magic
= be32_to_cpu(buf
[0]);
12105 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12106 TG3_EEPROM_MAGIC_FW
) {
12107 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12109 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12110 TG3_EEPROM_SB_REVISION_2
) {
12111 /* For rev 2, the csum doesn't include the MBA. */
12112 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12114 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12117 for (i
= 0; i
< size
; i
++)
12130 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12131 TG3_EEPROM_MAGIC_HW
) {
12132 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12133 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12134 u8
*buf8
= (u8
*) buf
;
12136 /* Separate the parity bits and the data bytes. */
12137 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12138 if ((i
== 0) || (i
== 8)) {
12142 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12143 parity
[k
++] = buf8
[i
] & msk
;
12145 } else if (i
== 16) {
12149 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12150 parity
[k
++] = buf8
[i
] & msk
;
12153 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12154 parity
[k
++] = buf8
[i
] & msk
;
12157 data
[j
++] = buf8
[i
];
12161 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12162 u8 hw8
= hweight8(data
[i
]);
12164 if ((hw8
& 0x1) && parity
[i
])
12166 else if (!(hw8
& 0x1) && !parity
[i
])
12175 /* Bootstrap checksum at offset 0x10 */
12176 csum
= calc_crc((unsigned char *) buf
, 0x10);
12177 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12180 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12181 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12182 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12187 buf
= tg3_vpd_readblock(tp
, &len
);
12191 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12193 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12197 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12200 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12201 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12202 PCI_VPD_RO_KEYWORD_CHKSUM
);
12206 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12208 for (i
= 0; i
<= j
; i
++)
12209 csum8
+= ((u8
*)buf
)[i
];
12223 #define TG3_SERDES_TIMEOUT_SEC 2
12224 #define TG3_COPPER_TIMEOUT_SEC 6
12226 static int tg3_test_link(struct tg3
*tp
)
12230 if (!netif_running(tp
->dev
))
12233 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12234 max
= TG3_SERDES_TIMEOUT_SEC
;
12236 max
= TG3_COPPER_TIMEOUT_SEC
;
12238 for (i
= 0; i
< max
; i
++) {
12242 if (msleep_interruptible(1000))
12249 /* Only test the commonly used registers */
12250 static int tg3_test_registers(struct tg3
*tp
)
12252 int i
, is_5705
, is_5750
;
12253 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12257 #define TG3_FL_5705 0x1
12258 #define TG3_FL_NOT_5705 0x2
12259 #define TG3_FL_NOT_5788 0x4
12260 #define TG3_FL_NOT_5750 0x8
12264 /* MAC Control Registers */
12265 { MAC_MODE
, TG3_FL_NOT_5705
,
12266 0x00000000, 0x00ef6f8c },
12267 { MAC_MODE
, TG3_FL_5705
,
12268 0x00000000, 0x01ef6b8c },
12269 { MAC_STATUS
, TG3_FL_NOT_5705
,
12270 0x03800107, 0x00000000 },
12271 { MAC_STATUS
, TG3_FL_5705
,
12272 0x03800100, 0x00000000 },
12273 { MAC_ADDR_0_HIGH
, 0x0000,
12274 0x00000000, 0x0000ffff },
12275 { MAC_ADDR_0_LOW
, 0x0000,
12276 0x00000000, 0xffffffff },
12277 { MAC_RX_MTU_SIZE
, 0x0000,
12278 0x00000000, 0x0000ffff },
12279 { MAC_TX_MODE
, 0x0000,
12280 0x00000000, 0x00000070 },
12281 { MAC_TX_LENGTHS
, 0x0000,
12282 0x00000000, 0x00003fff },
12283 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12284 0x00000000, 0x000007fc },
12285 { MAC_RX_MODE
, TG3_FL_5705
,
12286 0x00000000, 0x000007dc },
12287 { MAC_HASH_REG_0
, 0x0000,
12288 0x00000000, 0xffffffff },
12289 { MAC_HASH_REG_1
, 0x0000,
12290 0x00000000, 0xffffffff },
12291 { MAC_HASH_REG_2
, 0x0000,
12292 0x00000000, 0xffffffff },
12293 { MAC_HASH_REG_3
, 0x0000,
12294 0x00000000, 0xffffffff },
12296 /* Receive Data and Receive BD Initiator Control Registers. */
12297 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12298 0x00000000, 0xffffffff },
12299 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12300 0x00000000, 0xffffffff },
12301 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12302 0x00000000, 0x00000003 },
12303 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12304 0x00000000, 0xffffffff },
12305 { RCVDBDI_STD_BD
+0, 0x0000,
12306 0x00000000, 0xffffffff },
12307 { RCVDBDI_STD_BD
+4, 0x0000,
12308 0x00000000, 0xffffffff },
12309 { RCVDBDI_STD_BD
+8, 0x0000,
12310 0x00000000, 0xffff0002 },
12311 { RCVDBDI_STD_BD
+0xc, 0x0000,
12312 0x00000000, 0xffffffff },
12314 /* Receive BD Initiator Control Registers. */
12315 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12316 0x00000000, 0xffffffff },
12317 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12318 0x00000000, 0x000003ff },
12319 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12320 0x00000000, 0xffffffff },
12322 /* Host Coalescing Control Registers. */
12323 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12324 0x00000000, 0x00000004 },
12325 { HOSTCC_MODE
, TG3_FL_5705
,
12326 0x00000000, 0x000000f6 },
12327 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12328 0x00000000, 0xffffffff },
12329 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12330 0x00000000, 0x000003ff },
12331 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12332 0x00000000, 0xffffffff },
12333 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12334 0x00000000, 0x000003ff },
12335 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12336 0x00000000, 0xffffffff },
12337 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12338 0x00000000, 0x000000ff },
12339 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12340 0x00000000, 0xffffffff },
12341 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12342 0x00000000, 0x000000ff },
12343 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12344 0x00000000, 0xffffffff },
12345 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12346 0x00000000, 0xffffffff },
12347 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12348 0x00000000, 0xffffffff },
12349 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12350 0x00000000, 0x000000ff },
12351 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12352 0x00000000, 0xffffffff },
12353 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12354 0x00000000, 0x000000ff },
12355 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12356 0x00000000, 0xffffffff },
12357 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12358 0x00000000, 0xffffffff },
12359 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12360 0x00000000, 0xffffffff },
12361 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12362 0x00000000, 0xffffffff },
12363 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12364 0x00000000, 0xffffffff },
12365 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12366 0xffffffff, 0x00000000 },
12367 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12368 0xffffffff, 0x00000000 },
12370 /* Buffer Manager Control Registers. */
12371 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12372 0x00000000, 0x007fff80 },
12373 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12374 0x00000000, 0x007fffff },
12375 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12376 0x00000000, 0x0000003f },
12377 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12378 0x00000000, 0x000001ff },
12379 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12380 0x00000000, 0x000001ff },
12381 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12382 0xffffffff, 0x00000000 },
12383 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12384 0xffffffff, 0x00000000 },
12386 /* Mailbox Registers */
12387 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12388 0x00000000, 0x000001ff },
12389 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12390 0x00000000, 0x000001ff },
12391 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12392 0x00000000, 0x000007ff },
12393 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12394 0x00000000, 0x000001ff },
12396 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12399 is_5705
= is_5750
= 0;
12400 if (tg3_flag(tp
, 5705_PLUS
)) {
12402 if (tg3_flag(tp
, 5750_PLUS
))
12406 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12407 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12410 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12413 if (tg3_flag(tp
, IS_5788
) &&
12414 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12417 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12420 offset
= (u32
) reg_tbl
[i
].offset
;
12421 read_mask
= reg_tbl
[i
].read_mask
;
12422 write_mask
= reg_tbl
[i
].write_mask
;
12424 /* Save the original register content */
12425 save_val
= tr32(offset
);
12427 /* Determine the read-only value. */
12428 read_val
= save_val
& read_mask
;
12430 /* Write zero to the register, then make sure the read-only bits
12431 * are not changed and the read/write bits are all zeros.
12435 val
= tr32(offset
);
12437 /* Test the read-only and read/write bits. */
12438 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12441 /* Write ones to all the bits defined by RdMask and WrMask, then
12442 * make sure the read-only bits are not changed and the
12443 * read/write bits are all ones.
12445 tw32(offset
, read_mask
| write_mask
);
12447 val
= tr32(offset
);
12449 /* Test the read-only bits. */
12450 if ((val
& read_mask
) != read_val
)
12453 /* Test the read/write bits. */
12454 if ((val
& write_mask
) != write_mask
)
12457 tw32(offset
, save_val
);
12463 if (netif_msg_hw(tp
))
12464 netdev_err(tp
->dev
,
12465 "Register test failed at offset %x\n", offset
);
12466 tw32(offset
, save_val
);
12470 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12472 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12476 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12477 for (j
= 0; j
< len
; j
+= 4) {
12480 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12481 tg3_read_mem(tp
, offset
+ j
, &val
);
12482 if (val
!= test_pattern
[i
])
12489 static int tg3_test_memory(struct tg3
*tp
)
12491 static struct mem_entry
{
12494 } mem_tbl_570x
[] = {
12495 { 0x00000000, 0x00b50},
12496 { 0x00002000, 0x1c000},
12497 { 0xffffffff, 0x00000}
12498 }, mem_tbl_5705
[] = {
12499 { 0x00000100, 0x0000c},
12500 { 0x00000200, 0x00008},
12501 { 0x00004000, 0x00800},
12502 { 0x00006000, 0x01000},
12503 { 0x00008000, 0x02000},
12504 { 0x00010000, 0x0e000},
12505 { 0xffffffff, 0x00000}
12506 }, mem_tbl_5755
[] = {
12507 { 0x00000200, 0x00008},
12508 { 0x00004000, 0x00800},
12509 { 0x00006000, 0x00800},
12510 { 0x00008000, 0x02000},
12511 { 0x00010000, 0x0c000},
12512 { 0xffffffff, 0x00000}
12513 }, mem_tbl_5906
[] = {
12514 { 0x00000200, 0x00008},
12515 { 0x00004000, 0x00400},
12516 { 0x00006000, 0x00400},
12517 { 0x00008000, 0x01000},
12518 { 0x00010000, 0x01000},
12519 { 0xffffffff, 0x00000}
12520 }, mem_tbl_5717
[] = {
12521 { 0x00000200, 0x00008},
12522 { 0x00010000, 0x0a000},
12523 { 0x00020000, 0x13c00},
12524 { 0xffffffff, 0x00000}
12525 }, mem_tbl_57765
[] = {
12526 { 0x00000200, 0x00008},
12527 { 0x00004000, 0x00800},
12528 { 0x00006000, 0x09800},
12529 { 0x00010000, 0x0a000},
12530 { 0xffffffff, 0x00000}
12532 struct mem_entry
*mem_tbl
;
12536 if (tg3_flag(tp
, 5717_PLUS
))
12537 mem_tbl
= mem_tbl_5717
;
12538 else if (tg3_flag(tp
, 57765_CLASS
) ||
12539 tg3_asic_rev(tp
) == ASIC_REV_5762
)
12540 mem_tbl
= mem_tbl_57765
;
12541 else if (tg3_flag(tp
, 5755_PLUS
))
12542 mem_tbl
= mem_tbl_5755
;
12543 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
12544 mem_tbl
= mem_tbl_5906
;
12545 else if (tg3_flag(tp
, 5705_PLUS
))
12546 mem_tbl
= mem_tbl_5705
;
12548 mem_tbl
= mem_tbl_570x
;
12550 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12551 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
12559 #define TG3_TSO_MSS 500
12561 #define TG3_TSO_IP_HDR_LEN 20
12562 #define TG3_TSO_TCP_HDR_LEN 20
12563 #define TG3_TSO_TCP_OPT_LEN 12
12565 static const u8 tg3_tso_header
[] = {
12567 0x45, 0x00, 0x00, 0x00,
12568 0x00, 0x00, 0x40, 0x00,
12569 0x40, 0x06, 0x00, 0x00,
12570 0x0a, 0x00, 0x00, 0x01,
12571 0x0a, 0x00, 0x00, 0x02,
12572 0x0d, 0x00, 0xe0, 0x00,
12573 0x00, 0x00, 0x01, 0x00,
12574 0x00, 0x00, 0x02, 0x00,
12575 0x80, 0x10, 0x10, 0x00,
12576 0x14, 0x09, 0x00, 0x00,
12577 0x01, 0x01, 0x08, 0x0a,
12578 0x11, 0x11, 0x11, 0x11,
12579 0x11, 0x11, 0x11, 0x11,
12582 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
12584 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
12585 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
12587 struct sk_buff
*skb
;
12588 u8
*tx_data
, *rx_data
;
12590 int num_pkts
, tx_len
, rx_len
, i
, err
;
12591 struct tg3_rx_buffer_desc
*desc
;
12592 struct tg3_napi
*tnapi
, *rnapi
;
12593 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
12595 tnapi
= &tp
->napi
[0];
12596 rnapi
= &tp
->napi
[0];
12597 if (tp
->irq_cnt
> 1) {
12598 if (tg3_flag(tp
, ENABLE_RSS
))
12599 rnapi
= &tp
->napi
[1];
12600 if (tg3_flag(tp
, ENABLE_TSS
))
12601 tnapi
= &tp
->napi
[1];
12603 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
12608 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
12612 tx_data
= skb_put(skb
, tx_len
);
12613 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
12614 memset(tx_data
+ 6, 0x0, 8);
12616 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
12618 if (tso_loopback
) {
12619 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
12621 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
12622 TG3_TSO_TCP_OPT_LEN
;
12624 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
12625 sizeof(tg3_tso_header
));
12628 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
12629 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
12631 /* Set the total length field in the IP header */
12632 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
12634 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
12635 TXD_FLAG_CPU_POST_DMA
);
12637 if (tg3_flag(tp
, HW_TSO_1
) ||
12638 tg3_flag(tp
, HW_TSO_2
) ||
12639 tg3_flag(tp
, HW_TSO_3
)) {
12641 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
12642 th
= (struct tcphdr
*)&tx_data
[val
];
12645 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
12647 if (tg3_flag(tp
, HW_TSO_3
)) {
12648 mss
|= (hdr_len
& 0xc) << 12;
12649 if (hdr_len
& 0x10)
12650 base_flags
|= 0x00000010;
12651 base_flags
|= (hdr_len
& 0x3e0) << 5;
12652 } else if (tg3_flag(tp
, HW_TSO_2
))
12653 mss
|= hdr_len
<< 9;
12654 else if (tg3_flag(tp
, HW_TSO_1
) ||
12655 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
12656 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12658 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12661 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12664 data_off
= ETH_HLEN
;
12666 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12667 tx_len
> VLAN_ETH_FRAME_LEN
)
12668 base_flags
|= TXD_FLAG_JMB_PKT
;
12671 for (i
= data_off
; i
< tx_len
; i
++)
12672 tx_data
[i
] = (u8
) (i
& 0xff);
12674 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12675 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12676 dev_kfree_skb(skb
);
12680 val
= tnapi
->tx_prod
;
12681 tnapi
->tx_buffers
[val
].skb
= skb
;
12682 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12684 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12689 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12691 budget
= tg3_tx_avail(tnapi
);
12692 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12693 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12694 tnapi
->tx_buffers
[val
].skb
= NULL
;
12695 dev_kfree_skb(skb
);
12701 /* Sync BD data before updating mailbox */
12704 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12705 tr32_mailbox(tnapi
->prodmbox
);
12709 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12710 for (i
= 0; i
< 35; i
++) {
12711 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12716 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12717 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12718 if ((tx_idx
== tnapi
->tx_prod
) &&
12719 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12723 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12724 dev_kfree_skb(skb
);
12726 if (tx_idx
!= tnapi
->tx_prod
)
12729 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12733 while (rx_idx
!= rx_start_idx
) {
12734 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12735 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12736 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12738 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12739 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12742 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12745 if (!tso_loopback
) {
12746 if (rx_len
!= tx_len
)
12749 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12750 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12753 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12756 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12757 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12758 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12762 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12763 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12764 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12766 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12767 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12768 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12773 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12774 PCI_DMA_FROMDEVICE
);
12776 rx_data
+= TG3_RX_OFFSET(tp
);
12777 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12778 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12785 /* tg3_free_rings will unmap and free the rx_data */
12790 #define TG3_STD_LOOPBACK_FAILED 1
12791 #define TG3_JMB_LOOPBACK_FAILED 2
12792 #define TG3_TSO_LOOPBACK_FAILED 4
12793 #define TG3_LOOPBACK_FAILED \
12794 (TG3_STD_LOOPBACK_FAILED | \
12795 TG3_JMB_LOOPBACK_FAILED | \
12796 TG3_TSO_LOOPBACK_FAILED)
12798 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12802 u32 jmb_pkt_sz
= 9000;
12805 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12807 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12808 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12810 if (!netif_running(tp
->dev
)) {
12811 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12812 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12814 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12818 err
= tg3_reset_hw(tp
, 1);
12820 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12821 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12823 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12827 if (tg3_flag(tp
, ENABLE_RSS
)) {
12830 /* Reroute all rx packets to the 1st queue */
12831 for (i
= MAC_RSS_INDIR_TBL_0
;
12832 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12836 /* HW errata - mac loopback fails in some cases on 5780.
12837 * Normal traffic and PHY loopback are not affected by
12838 * errata. Also, the MAC loopback test is deprecated for
12839 * all newer ASIC revisions.
12841 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
12842 !tg3_flag(tp
, CPMU_PRESENT
)) {
12843 tg3_mac_loopback(tp
, true);
12845 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12846 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12848 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12849 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12850 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12852 tg3_mac_loopback(tp
, false);
12855 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12856 !tg3_flag(tp
, USE_PHYLIB
)) {
12859 tg3_phy_lpbk_set(tp
, 0, false);
12861 /* Wait for link */
12862 for (i
= 0; i
< 100; i
++) {
12863 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12868 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12869 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12870 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12871 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12872 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
12873 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12874 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12875 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12878 tg3_phy_lpbk_set(tp
, 0, true);
12880 /* All link indications report up, but the hardware
12881 * isn't really ready for about 20 msec. Double it
12886 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12887 data
[TG3_EXT_LOOPB_TEST
] |=
12888 TG3_STD_LOOPBACK_FAILED
;
12889 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12890 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12891 data
[TG3_EXT_LOOPB_TEST
] |=
12892 TG3_TSO_LOOPBACK_FAILED
;
12893 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12894 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12895 data
[TG3_EXT_LOOPB_TEST
] |=
12896 TG3_JMB_LOOPBACK_FAILED
;
12899 /* Re-enable gphy autopowerdown. */
12900 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12901 tg3_phy_toggle_apd(tp
, true);
12904 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
12905 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
12908 tp
->phy_flags
|= eee_cap
;
12913 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12916 struct tg3
*tp
= netdev_priv(dev
);
12917 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12919 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12920 tg3_power_up(tp
)) {
12921 etest
->flags
|= ETH_TEST_FL_FAILED
;
12922 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12926 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12928 if (tg3_test_nvram(tp
) != 0) {
12929 etest
->flags
|= ETH_TEST_FL_FAILED
;
12930 data
[TG3_NVRAM_TEST
] = 1;
12932 if (!doextlpbk
&& tg3_test_link(tp
)) {
12933 etest
->flags
|= ETH_TEST_FL_FAILED
;
12934 data
[TG3_LINK_TEST
] = 1;
12936 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12937 int err
, err2
= 0, irq_sync
= 0;
12939 if (netif_running(dev
)) {
12941 tg3_netif_stop(tp
);
12945 tg3_full_lock(tp
, irq_sync
);
12946 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12947 err
= tg3_nvram_lock(tp
);
12948 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12949 if (!tg3_flag(tp
, 5705_PLUS
))
12950 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12952 tg3_nvram_unlock(tp
);
12954 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12957 if (tg3_test_registers(tp
) != 0) {
12958 etest
->flags
|= ETH_TEST_FL_FAILED
;
12959 data
[TG3_REGISTER_TEST
] = 1;
12962 if (tg3_test_memory(tp
) != 0) {
12963 etest
->flags
|= ETH_TEST_FL_FAILED
;
12964 data
[TG3_MEMORY_TEST
] = 1;
12968 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12970 if (tg3_test_loopback(tp
, data
, doextlpbk
))
12971 etest
->flags
|= ETH_TEST_FL_FAILED
;
12973 tg3_full_unlock(tp
);
12975 if (tg3_test_interrupt(tp
) != 0) {
12976 etest
->flags
|= ETH_TEST_FL_FAILED
;
12977 data
[TG3_INTERRUPT_TEST
] = 1;
12980 tg3_full_lock(tp
, 0);
12982 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12983 if (netif_running(dev
)) {
12984 tg3_flag_set(tp
, INIT_COMPLETE
);
12985 err2
= tg3_restart_hw(tp
, 1);
12987 tg3_netif_start(tp
);
12990 tg3_full_unlock(tp
);
12992 if (irq_sync
&& !err2
)
12995 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12996 tg3_power_down(tp
);
13000 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
13001 struct ifreq
*ifr
, int cmd
)
13003 struct tg3
*tp
= netdev_priv(dev
);
13004 struct hwtstamp_config stmpconf
;
13006 if (!tg3_flag(tp
, PTP_CAPABLE
))
13009 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13012 if (stmpconf
.flags
)
13015 switch (stmpconf
.tx_type
) {
13016 case HWTSTAMP_TX_ON
:
13017 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13019 case HWTSTAMP_TX_OFF
:
13020 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13026 switch (stmpconf
.rx_filter
) {
13027 case HWTSTAMP_FILTER_NONE
:
13030 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13031 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13032 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13034 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13035 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13036 TG3_RX_PTP_CTL_SYNC_EVNT
;
13038 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13039 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13040 TG3_RX_PTP_CTL_DELAY_REQ
;
13042 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13043 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13044 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13046 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13047 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13048 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13050 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13051 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13052 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13054 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13055 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13056 TG3_RX_PTP_CTL_SYNC_EVNT
;
13058 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13059 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13060 TG3_RX_PTP_CTL_SYNC_EVNT
;
13062 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13063 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13064 TG3_RX_PTP_CTL_SYNC_EVNT
;
13066 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13067 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13068 TG3_RX_PTP_CTL_DELAY_REQ
;
13070 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13071 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13072 TG3_RX_PTP_CTL_DELAY_REQ
;
13074 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13075 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13076 TG3_RX_PTP_CTL_DELAY_REQ
;
13082 if (netif_running(dev
) && tp
->rxptpctl
)
13083 tw32(TG3_RX_PTP_CTL
,
13084 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13086 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13090 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13092 struct mii_ioctl_data
*data
= if_mii(ifr
);
13093 struct tg3
*tp
= netdev_priv(dev
);
13096 if (tg3_flag(tp
, USE_PHYLIB
)) {
13097 struct phy_device
*phydev
;
13098 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13100 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13101 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13106 data
->phy_id
= tp
->phy_addr
;
13109 case SIOCGMIIREG
: {
13112 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13113 break; /* We have no PHY */
13115 if (!netif_running(dev
))
13118 spin_lock_bh(&tp
->lock
);
13119 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13120 data
->reg_num
& 0x1f, &mii_regval
);
13121 spin_unlock_bh(&tp
->lock
);
13123 data
->val_out
= mii_regval
;
13129 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13130 break; /* We have no PHY */
13132 if (!netif_running(dev
))
13135 spin_lock_bh(&tp
->lock
);
13136 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13137 data
->reg_num
& 0x1f, data
->val_in
);
13138 spin_unlock_bh(&tp
->lock
);
13142 case SIOCSHWTSTAMP
:
13143 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13149 return -EOPNOTSUPP
;
13152 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13154 struct tg3
*tp
= netdev_priv(dev
);
13156 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13160 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13162 struct tg3
*tp
= netdev_priv(dev
);
13163 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13164 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13166 if (!tg3_flag(tp
, 5705_PLUS
)) {
13167 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13168 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13169 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13170 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13173 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13174 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13175 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13176 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13177 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13178 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13179 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13180 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13181 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13182 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13185 /* No rx interrupts will be generated if both are zero */
13186 if ((ec
->rx_coalesce_usecs
== 0) &&
13187 (ec
->rx_max_coalesced_frames
== 0))
13190 /* No tx interrupts will be generated if both are zero */
13191 if ((ec
->tx_coalesce_usecs
== 0) &&
13192 (ec
->tx_max_coalesced_frames
== 0))
13195 /* Only copy relevant parameters, ignore all others. */
13196 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13197 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13198 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13199 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13200 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13201 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13202 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13203 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13204 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13206 if (netif_running(dev
)) {
13207 tg3_full_lock(tp
, 0);
13208 __tg3_set_coalesce(tp
, &tp
->coal
);
13209 tg3_full_unlock(tp
);
13214 static const struct ethtool_ops tg3_ethtool_ops
= {
13215 .get_settings
= tg3_get_settings
,
13216 .set_settings
= tg3_set_settings
,
13217 .get_drvinfo
= tg3_get_drvinfo
,
13218 .get_regs_len
= tg3_get_regs_len
,
13219 .get_regs
= tg3_get_regs
,
13220 .get_wol
= tg3_get_wol
,
13221 .set_wol
= tg3_set_wol
,
13222 .get_msglevel
= tg3_get_msglevel
,
13223 .set_msglevel
= tg3_set_msglevel
,
13224 .nway_reset
= tg3_nway_reset
,
13225 .get_link
= ethtool_op_get_link
,
13226 .get_eeprom_len
= tg3_get_eeprom_len
,
13227 .get_eeprom
= tg3_get_eeprom
,
13228 .set_eeprom
= tg3_set_eeprom
,
13229 .get_ringparam
= tg3_get_ringparam
,
13230 .set_ringparam
= tg3_set_ringparam
,
13231 .get_pauseparam
= tg3_get_pauseparam
,
13232 .set_pauseparam
= tg3_set_pauseparam
,
13233 .self_test
= tg3_self_test
,
13234 .get_strings
= tg3_get_strings
,
13235 .set_phys_id
= tg3_set_phys_id
,
13236 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13237 .get_coalesce
= tg3_get_coalesce
,
13238 .set_coalesce
= tg3_set_coalesce
,
13239 .get_sset_count
= tg3_get_sset_count
,
13240 .get_rxnfc
= tg3_get_rxnfc
,
13241 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13242 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13243 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13244 .get_channels
= tg3_get_channels
,
13245 .set_channels
= tg3_set_channels
,
13246 .get_ts_info
= tg3_get_ts_info
,
13249 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13250 struct rtnl_link_stats64
*stats
)
13252 struct tg3
*tp
= netdev_priv(dev
);
13254 spin_lock_bh(&tp
->lock
);
13255 if (!tp
->hw_stats
) {
13256 spin_unlock_bh(&tp
->lock
);
13257 return &tp
->net_stats_prev
;
13260 tg3_get_nstats(tp
, stats
);
13261 spin_unlock_bh(&tp
->lock
);
13266 static void tg3_set_rx_mode(struct net_device
*dev
)
13268 struct tg3
*tp
= netdev_priv(dev
);
13270 if (!netif_running(dev
))
13273 tg3_full_lock(tp
, 0);
13274 __tg3_set_rx_mode(dev
);
13275 tg3_full_unlock(tp
);
13278 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13281 dev
->mtu
= new_mtu
;
13283 if (new_mtu
> ETH_DATA_LEN
) {
13284 if (tg3_flag(tp
, 5780_CLASS
)) {
13285 netdev_update_features(dev
);
13286 tg3_flag_clear(tp
, TSO_CAPABLE
);
13288 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13291 if (tg3_flag(tp
, 5780_CLASS
)) {
13292 tg3_flag_set(tp
, TSO_CAPABLE
);
13293 netdev_update_features(dev
);
13295 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13299 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13301 struct tg3
*tp
= netdev_priv(dev
);
13302 int err
, reset_phy
= 0;
13304 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13307 if (!netif_running(dev
)) {
13308 /* We'll just catch it later when the
13311 tg3_set_mtu(dev
, tp
, new_mtu
);
13317 tg3_netif_stop(tp
);
13319 tg3_full_lock(tp
, 1);
13321 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13323 tg3_set_mtu(dev
, tp
, new_mtu
);
13325 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13326 * breaks all requests to 256 bytes.
13328 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13331 err
= tg3_restart_hw(tp
, reset_phy
);
13334 tg3_netif_start(tp
);
13336 tg3_full_unlock(tp
);
13344 static const struct net_device_ops tg3_netdev_ops
= {
13345 .ndo_open
= tg3_open
,
13346 .ndo_stop
= tg3_close
,
13347 .ndo_start_xmit
= tg3_start_xmit
,
13348 .ndo_get_stats64
= tg3_get_stats64
,
13349 .ndo_validate_addr
= eth_validate_addr
,
13350 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13351 .ndo_set_mac_address
= tg3_set_mac_addr
,
13352 .ndo_do_ioctl
= tg3_ioctl
,
13353 .ndo_tx_timeout
= tg3_tx_timeout
,
13354 .ndo_change_mtu
= tg3_change_mtu
,
13355 .ndo_fix_features
= tg3_fix_features
,
13356 .ndo_set_features
= tg3_set_features
,
13357 #ifdef CONFIG_NET_POLL_CONTROLLER
13358 .ndo_poll_controller
= tg3_poll_controller
,
13362 static void tg3_get_eeprom_size(struct tg3
*tp
)
13364 u32 cursize
, val
, magic
;
13366 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13368 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13371 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13372 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13373 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13377 * Size the chip by reading offsets at increasing powers of two.
13378 * When we encounter our validation signature, we know the addressing
13379 * has wrapped around, and thus have our chip size.
13383 while (cursize
< tp
->nvram_size
) {
13384 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13393 tp
->nvram_size
= cursize
;
13396 static void tg3_get_nvram_size(struct tg3
*tp
)
13400 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13403 /* Selfboot format */
13404 if (val
!= TG3_EEPROM_MAGIC
) {
13405 tg3_get_eeprom_size(tp
);
13409 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13411 /* This is confusing. We want to operate on the
13412 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13413 * call will read from NVRAM and byteswap the data
13414 * according to the byteswapping settings for all
13415 * other register accesses. This ensures the data we
13416 * want will always reside in the lower 16-bits.
13417 * However, the data in NVRAM is in LE format, which
13418 * means the data from the NVRAM read will always be
13419 * opposite the endianness of the CPU. The 16-bit
13420 * byteswap then brings the data to CPU endianness.
13422 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13426 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13429 static void tg3_get_nvram_info(struct tg3
*tp
)
13433 nvcfg1
= tr32(NVRAM_CFG1
);
13434 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13435 tg3_flag_set(tp
, FLASH
);
13437 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13438 tw32(NVRAM_CFG1
, nvcfg1
);
13441 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13442 tg3_flag(tp
, 5780_CLASS
)) {
13443 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13444 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13445 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13446 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13447 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13449 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13450 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13451 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13453 case FLASH_VENDOR_ATMEL_EEPROM
:
13454 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13455 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13456 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13458 case FLASH_VENDOR_ST
:
13459 tp
->nvram_jedecnum
= JEDEC_ST
;
13460 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13461 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13463 case FLASH_VENDOR_SAIFUN
:
13464 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13465 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13467 case FLASH_VENDOR_SST_SMALL
:
13468 case FLASH_VENDOR_SST_LARGE
:
13469 tp
->nvram_jedecnum
= JEDEC_SST
;
13470 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13474 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13475 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13476 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13480 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13482 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13483 case FLASH_5752PAGE_SIZE_256
:
13484 tp
->nvram_pagesize
= 256;
13486 case FLASH_5752PAGE_SIZE_512
:
13487 tp
->nvram_pagesize
= 512;
13489 case FLASH_5752PAGE_SIZE_1K
:
13490 tp
->nvram_pagesize
= 1024;
13492 case FLASH_5752PAGE_SIZE_2K
:
13493 tp
->nvram_pagesize
= 2048;
13495 case FLASH_5752PAGE_SIZE_4K
:
13496 tp
->nvram_pagesize
= 4096;
13498 case FLASH_5752PAGE_SIZE_264
:
13499 tp
->nvram_pagesize
= 264;
13501 case FLASH_5752PAGE_SIZE_528
:
13502 tp
->nvram_pagesize
= 528;
13507 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13511 nvcfg1
= tr32(NVRAM_CFG1
);
13513 /* NVRAM protection for TPM */
13514 if (nvcfg1
& (1 << 27))
13515 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13517 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13518 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13519 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13520 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13521 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13523 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13524 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13525 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13526 tg3_flag_set(tp
, FLASH
);
13528 case FLASH_5752VENDOR_ST_M45PE10
:
13529 case FLASH_5752VENDOR_ST_M45PE20
:
13530 case FLASH_5752VENDOR_ST_M45PE40
:
13531 tp
->nvram_jedecnum
= JEDEC_ST
;
13532 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13533 tg3_flag_set(tp
, FLASH
);
13537 if (tg3_flag(tp
, FLASH
)) {
13538 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13540 /* For eeprom, set pagesize to maximum eeprom size */
13541 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13543 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13544 tw32(NVRAM_CFG1
, nvcfg1
);
13548 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13550 u32 nvcfg1
, protect
= 0;
13552 nvcfg1
= tr32(NVRAM_CFG1
);
13554 /* NVRAM protection for TPM */
13555 if (nvcfg1
& (1 << 27)) {
13556 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13560 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13562 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13563 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13564 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13565 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
13566 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13567 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13568 tg3_flag_set(tp
, FLASH
);
13569 tp
->nvram_pagesize
= 264;
13570 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
13571 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
13572 tp
->nvram_size
= (protect
? 0x3e200 :
13573 TG3_NVRAM_SIZE_512KB
);
13574 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
13575 tp
->nvram_size
= (protect
? 0x1f200 :
13576 TG3_NVRAM_SIZE_256KB
);
13578 tp
->nvram_size
= (protect
? 0x1f200 :
13579 TG3_NVRAM_SIZE_128KB
);
13581 case FLASH_5752VENDOR_ST_M45PE10
:
13582 case FLASH_5752VENDOR_ST_M45PE20
:
13583 case FLASH_5752VENDOR_ST_M45PE40
:
13584 tp
->nvram_jedecnum
= JEDEC_ST
;
13585 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13586 tg3_flag_set(tp
, FLASH
);
13587 tp
->nvram_pagesize
= 256;
13588 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
13589 tp
->nvram_size
= (protect
?
13590 TG3_NVRAM_SIZE_64KB
:
13591 TG3_NVRAM_SIZE_128KB
);
13592 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
13593 tp
->nvram_size
= (protect
?
13594 TG3_NVRAM_SIZE_64KB
:
13595 TG3_NVRAM_SIZE_256KB
);
13597 tp
->nvram_size
= (protect
?
13598 TG3_NVRAM_SIZE_128KB
:
13599 TG3_NVRAM_SIZE_512KB
);
13604 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
13608 nvcfg1
= tr32(NVRAM_CFG1
);
13610 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13611 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
13612 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13613 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
13614 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13615 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13616 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13617 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13619 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13620 tw32(NVRAM_CFG1
, nvcfg1
);
13622 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13623 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13624 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13625 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13626 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13627 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13628 tg3_flag_set(tp
, FLASH
);
13629 tp
->nvram_pagesize
= 264;
13631 case FLASH_5752VENDOR_ST_M45PE10
:
13632 case FLASH_5752VENDOR_ST_M45PE20
:
13633 case FLASH_5752VENDOR_ST_M45PE40
:
13634 tp
->nvram_jedecnum
= JEDEC_ST
;
13635 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13636 tg3_flag_set(tp
, FLASH
);
13637 tp
->nvram_pagesize
= 256;
13642 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
13644 u32 nvcfg1
, protect
= 0;
13646 nvcfg1
= tr32(NVRAM_CFG1
);
13648 /* NVRAM protection for TPM */
13649 if (nvcfg1
& (1 << 27)) {
13650 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13654 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13656 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13657 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13658 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13659 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13660 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13661 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13662 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13663 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13664 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13665 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13666 tg3_flag_set(tp
, FLASH
);
13667 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13668 tp
->nvram_pagesize
= 256;
13670 case FLASH_5761VENDOR_ST_A_M45PE20
:
13671 case FLASH_5761VENDOR_ST_A_M45PE40
:
13672 case FLASH_5761VENDOR_ST_A_M45PE80
:
13673 case FLASH_5761VENDOR_ST_A_M45PE16
:
13674 case FLASH_5761VENDOR_ST_M_M45PE20
:
13675 case FLASH_5761VENDOR_ST_M_M45PE40
:
13676 case FLASH_5761VENDOR_ST_M_M45PE80
:
13677 case FLASH_5761VENDOR_ST_M_M45PE16
:
13678 tp
->nvram_jedecnum
= JEDEC_ST
;
13679 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13680 tg3_flag_set(tp
, FLASH
);
13681 tp
->nvram_pagesize
= 256;
13686 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
13689 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13690 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13691 case FLASH_5761VENDOR_ST_A_M45PE16
:
13692 case FLASH_5761VENDOR_ST_M_M45PE16
:
13693 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
13695 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13696 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13697 case FLASH_5761VENDOR_ST_A_M45PE80
:
13698 case FLASH_5761VENDOR_ST_M_M45PE80
:
13699 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13701 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13702 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13703 case FLASH_5761VENDOR_ST_A_M45PE40
:
13704 case FLASH_5761VENDOR_ST_M_M45PE40
:
13705 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13707 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13708 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13709 case FLASH_5761VENDOR_ST_A_M45PE20
:
13710 case FLASH_5761VENDOR_ST_M_M45PE20
:
13711 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13717 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
13719 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13720 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13721 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13724 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
13728 nvcfg1
= tr32(NVRAM_CFG1
);
13730 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13731 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13732 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13733 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13734 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13735 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13737 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13738 tw32(NVRAM_CFG1
, nvcfg1
);
13740 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13741 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13742 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13743 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13744 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13745 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13746 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13747 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13748 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13749 tg3_flag_set(tp
, FLASH
);
13751 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13752 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13753 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13754 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13755 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13757 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13758 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13759 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13761 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13762 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13763 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13767 case FLASH_5752VENDOR_ST_M45PE10
:
13768 case FLASH_5752VENDOR_ST_M45PE20
:
13769 case FLASH_5752VENDOR_ST_M45PE40
:
13770 tp
->nvram_jedecnum
= JEDEC_ST
;
13771 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13772 tg3_flag_set(tp
, FLASH
);
13774 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13775 case FLASH_5752VENDOR_ST_M45PE10
:
13776 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13778 case FLASH_5752VENDOR_ST_M45PE20
:
13779 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13781 case FLASH_5752VENDOR_ST_M45PE40
:
13782 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13787 tg3_flag_set(tp
, NO_NVRAM
);
13791 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13792 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13793 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13797 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
13801 nvcfg1
= tr32(NVRAM_CFG1
);
13803 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13804 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13805 case FLASH_5717VENDOR_MICRO_EEPROM
:
13806 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13807 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13808 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13810 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13811 tw32(NVRAM_CFG1
, nvcfg1
);
13813 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13814 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13815 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13816 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13817 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13818 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13819 case FLASH_5717VENDOR_ATMEL_45USPT
:
13820 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13821 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13822 tg3_flag_set(tp
, FLASH
);
13824 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13825 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13826 /* Detect size with tg3_nvram_get_size() */
13828 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13829 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13830 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13833 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13837 case FLASH_5717VENDOR_ST_M_M25PE10
:
13838 case FLASH_5717VENDOR_ST_A_M25PE10
:
13839 case FLASH_5717VENDOR_ST_M_M45PE10
:
13840 case FLASH_5717VENDOR_ST_A_M45PE10
:
13841 case FLASH_5717VENDOR_ST_M_M25PE20
:
13842 case FLASH_5717VENDOR_ST_A_M25PE20
:
13843 case FLASH_5717VENDOR_ST_M_M45PE20
:
13844 case FLASH_5717VENDOR_ST_A_M45PE20
:
13845 case FLASH_5717VENDOR_ST_25USPT
:
13846 case FLASH_5717VENDOR_ST_45USPT
:
13847 tp
->nvram_jedecnum
= JEDEC_ST
;
13848 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13849 tg3_flag_set(tp
, FLASH
);
13851 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13852 case FLASH_5717VENDOR_ST_M_M25PE20
:
13853 case FLASH_5717VENDOR_ST_M_M45PE20
:
13854 /* Detect size with tg3_nvram_get_size() */
13856 case FLASH_5717VENDOR_ST_A_M25PE20
:
13857 case FLASH_5717VENDOR_ST_A_M45PE20
:
13858 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13861 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13866 tg3_flag_set(tp
, NO_NVRAM
);
13870 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13871 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13872 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13875 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
13877 u32 nvcfg1
, nvmpinstrp
;
13879 nvcfg1
= tr32(NVRAM_CFG1
);
13880 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13882 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13883 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
13884 tg3_flag_set(tp
, NO_NVRAM
);
13888 switch (nvmpinstrp
) {
13889 case FLASH_5762_EEPROM_HD
:
13890 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
13892 case FLASH_5762_EEPROM_LD
:
13893 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
13898 switch (nvmpinstrp
) {
13899 case FLASH_5720_EEPROM_HD
:
13900 case FLASH_5720_EEPROM_LD
:
13901 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13902 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13904 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13905 tw32(NVRAM_CFG1
, nvcfg1
);
13906 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
13907 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13909 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
13911 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
13912 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
13913 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
13914 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13915 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13916 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13917 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13918 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13919 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13920 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13921 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13922 case FLASH_5720VENDOR_ATMEL_45USPT
:
13923 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13924 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13925 tg3_flag_set(tp
, FLASH
);
13927 switch (nvmpinstrp
) {
13928 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13929 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13930 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13931 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13933 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13934 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13935 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13936 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13938 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13939 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13940 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13943 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13944 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13948 case FLASH_5720VENDOR_M_ST_M25PE10
:
13949 case FLASH_5720VENDOR_M_ST_M45PE10
:
13950 case FLASH_5720VENDOR_A_ST_M25PE10
:
13951 case FLASH_5720VENDOR_A_ST_M45PE10
:
13952 case FLASH_5720VENDOR_M_ST_M25PE20
:
13953 case FLASH_5720VENDOR_M_ST_M45PE20
:
13954 case FLASH_5720VENDOR_A_ST_M25PE20
:
13955 case FLASH_5720VENDOR_A_ST_M45PE20
:
13956 case FLASH_5720VENDOR_M_ST_M25PE40
:
13957 case FLASH_5720VENDOR_M_ST_M45PE40
:
13958 case FLASH_5720VENDOR_A_ST_M25PE40
:
13959 case FLASH_5720VENDOR_A_ST_M45PE40
:
13960 case FLASH_5720VENDOR_M_ST_M25PE80
:
13961 case FLASH_5720VENDOR_M_ST_M45PE80
:
13962 case FLASH_5720VENDOR_A_ST_M25PE80
:
13963 case FLASH_5720VENDOR_A_ST_M45PE80
:
13964 case FLASH_5720VENDOR_ST_25USPT
:
13965 case FLASH_5720VENDOR_ST_45USPT
:
13966 tp
->nvram_jedecnum
= JEDEC_ST
;
13967 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13968 tg3_flag_set(tp
, FLASH
);
13970 switch (nvmpinstrp
) {
13971 case FLASH_5720VENDOR_M_ST_M25PE20
:
13972 case FLASH_5720VENDOR_M_ST_M45PE20
:
13973 case FLASH_5720VENDOR_A_ST_M25PE20
:
13974 case FLASH_5720VENDOR_A_ST_M45PE20
:
13975 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13977 case FLASH_5720VENDOR_M_ST_M25PE40
:
13978 case FLASH_5720VENDOR_M_ST_M45PE40
:
13979 case FLASH_5720VENDOR_A_ST_M25PE40
:
13980 case FLASH_5720VENDOR_A_ST_M45PE40
:
13981 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13983 case FLASH_5720VENDOR_M_ST_M25PE80
:
13984 case FLASH_5720VENDOR_M_ST_M45PE80
:
13985 case FLASH_5720VENDOR_A_ST_M25PE80
:
13986 case FLASH_5720VENDOR_A_ST_M45PE80
:
13987 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13990 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13991 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13996 tg3_flag_set(tp
, NO_NVRAM
);
14000 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14001 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14002 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14004 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14007 if (tg3_nvram_read(tp
, 0, &val
))
14010 if (val
!= TG3_EEPROM_MAGIC
&&
14011 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14012 tg3_flag_set(tp
, NO_NVRAM
);
14016 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14017 static void tg3_nvram_init(struct tg3
*tp
)
14019 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14020 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14021 tg3_flag_clear(tp
, NVRAM
);
14022 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14023 tg3_flag_set(tp
, NO_NVRAM
);
14027 tw32_f(GRC_EEPROM_ADDR
,
14028 (EEPROM_ADDR_FSM_RESET
|
14029 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14030 EEPROM_ADDR_CLKPERD_SHIFT
)));
14034 /* Enable seeprom accesses. */
14035 tw32_f(GRC_LOCAL_CTRL
,
14036 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14039 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14040 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14041 tg3_flag_set(tp
, NVRAM
);
14043 if (tg3_nvram_lock(tp
)) {
14044 netdev_warn(tp
->dev
,
14045 "Cannot get nvram lock, %s failed\n",
14049 tg3_enable_nvram_access(tp
);
14051 tp
->nvram_size
= 0;
14053 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14054 tg3_get_5752_nvram_info(tp
);
14055 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14056 tg3_get_5755_nvram_info(tp
);
14057 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14058 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14059 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14060 tg3_get_5787_nvram_info(tp
);
14061 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14062 tg3_get_5761_nvram_info(tp
);
14063 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14064 tg3_get_5906_nvram_info(tp
);
14065 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14066 tg3_flag(tp
, 57765_CLASS
))
14067 tg3_get_57780_nvram_info(tp
);
14068 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14069 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14070 tg3_get_5717_nvram_info(tp
);
14071 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14072 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14073 tg3_get_5720_nvram_info(tp
);
14075 tg3_get_nvram_info(tp
);
14077 if (tp
->nvram_size
== 0)
14078 tg3_get_nvram_size(tp
);
14080 tg3_disable_nvram_access(tp
);
14081 tg3_nvram_unlock(tp
);
14084 tg3_flag_clear(tp
, NVRAM
);
14085 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14087 tg3_get_eeprom_size(tp
);
14091 struct subsys_tbl_ent
{
14092 u16 subsys_vendor
, subsys_devid
;
14096 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14097 /* Broadcom boards. */
14098 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14099 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14100 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14101 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14102 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14103 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14104 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14105 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14106 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14107 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14108 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14109 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14110 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14111 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14112 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14113 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14114 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14115 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14116 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14117 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14118 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14119 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14122 { TG3PCI_SUBVENDOR_ID_3COM
,
14123 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14124 { TG3PCI_SUBVENDOR_ID_3COM
,
14125 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14126 { TG3PCI_SUBVENDOR_ID_3COM
,
14127 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14128 { TG3PCI_SUBVENDOR_ID_3COM
,
14129 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14130 { TG3PCI_SUBVENDOR_ID_3COM
,
14131 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14134 { TG3PCI_SUBVENDOR_ID_DELL
,
14135 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14136 { TG3PCI_SUBVENDOR_ID_DELL
,
14137 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14138 { TG3PCI_SUBVENDOR_ID_DELL
,
14139 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14140 { TG3PCI_SUBVENDOR_ID_DELL
,
14141 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14143 /* Compaq boards. */
14144 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14145 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14146 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14147 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14148 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14149 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14150 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14151 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14152 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14153 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14156 { TG3PCI_SUBVENDOR_ID_IBM
,
14157 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14160 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14164 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14165 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14166 tp
->pdev
->subsystem_vendor
) &&
14167 (subsys_id_to_phy_id
[i
].subsys_devid
==
14168 tp
->pdev
->subsystem_device
))
14169 return &subsys_id_to_phy_id
[i
];
14174 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14178 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14179 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14181 /* Assume an onboard device and WOL capable by default. */
14182 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14183 tg3_flag_set(tp
, WOL_CAP
);
14185 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14186 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14187 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14188 tg3_flag_set(tp
, IS_NIC
);
14190 val
= tr32(VCPU_CFGSHDW
);
14191 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14192 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14193 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14194 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14195 tg3_flag_set(tp
, WOL_ENABLE
);
14196 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14201 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14202 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14203 u32 nic_cfg
, led_cfg
;
14204 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14205 int eeprom_phy_serdes
= 0;
14207 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14208 tp
->nic_sram_data_cfg
= nic_cfg
;
14210 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14211 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14212 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14213 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14214 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14215 (ver
> 0) && (ver
< 0x100))
14216 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14218 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14219 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14221 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14222 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14223 eeprom_phy_serdes
= 1;
14225 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14226 if (nic_phy_id
!= 0) {
14227 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14228 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14230 eeprom_phy_id
= (id1
>> 16) << 10;
14231 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14232 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14236 tp
->phy_id
= eeprom_phy_id
;
14237 if (eeprom_phy_serdes
) {
14238 if (!tg3_flag(tp
, 5705_PLUS
))
14239 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14241 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14244 if (tg3_flag(tp
, 5750_PLUS
))
14245 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14246 SHASTA_EXT_LED_MODE_MASK
);
14248 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14252 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14253 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14256 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14257 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14260 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14261 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14263 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14264 * read on some older 5700/5701 bootcode.
14266 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14267 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14268 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14272 case SHASTA_EXT_LED_SHARED
:
14273 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14274 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14275 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14276 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14277 LED_CTRL_MODE_PHY_2
);
14280 case SHASTA_EXT_LED_MAC
:
14281 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14284 case SHASTA_EXT_LED_COMBO
:
14285 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14286 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14287 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14288 LED_CTRL_MODE_PHY_2
);
14293 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14294 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14295 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14296 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14298 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14299 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14301 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14302 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14303 if ((tp
->pdev
->subsystem_vendor
==
14304 PCI_VENDOR_ID_ARIMA
) &&
14305 (tp
->pdev
->subsystem_device
== 0x205a ||
14306 tp
->pdev
->subsystem_device
== 0x2063))
14307 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14309 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14310 tg3_flag_set(tp
, IS_NIC
);
14313 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14314 tg3_flag_set(tp
, ENABLE_ASF
);
14315 if (tg3_flag(tp
, 5750_PLUS
))
14316 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14319 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14320 tg3_flag(tp
, 5750_PLUS
))
14321 tg3_flag_set(tp
, ENABLE_APE
);
14323 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14324 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14325 tg3_flag_clear(tp
, WOL_CAP
);
14327 if (tg3_flag(tp
, WOL_CAP
) &&
14328 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14329 tg3_flag_set(tp
, WOL_ENABLE
);
14330 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14333 if (cfg2
& (1 << 17))
14334 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14336 /* serdes signal pre-emphasis in register 0x590 set by */
14337 /* bootcode if bit 18 is set */
14338 if (cfg2
& (1 << 18))
14339 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14341 if ((tg3_flag(tp
, 57765_PLUS
) ||
14342 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14343 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14344 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14345 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14347 if (tg3_flag(tp
, PCI_EXPRESS
) &&
14348 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14349 !tg3_flag(tp
, 57765_PLUS
)) {
14352 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14353 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
14354 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14357 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14358 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14359 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14360 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14361 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14362 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14365 if (tg3_flag(tp
, WOL_CAP
))
14366 device_set_wakeup_enable(&tp
->pdev
->dev
,
14367 tg3_flag(tp
, WOL_ENABLE
));
14369 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14372 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14375 u32 val2
, off
= offset
* 8;
14377 err
= tg3_nvram_lock(tp
);
14381 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14382 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14383 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14384 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14387 for (i
= 0; i
< 100; i
++) {
14388 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14389 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14390 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14396 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14398 tg3_nvram_unlock(tp
);
14399 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14405 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14410 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14411 tw32(OTP_CTRL
, cmd
);
14413 /* Wait for up to 1 ms for command to execute. */
14414 for (i
= 0; i
< 100; i
++) {
14415 val
= tr32(OTP_STATUS
);
14416 if (val
& OTP_STATUS_CMD_DONE
)
14421 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14424 /* Read the gphy configuration from the OTP region of the chip. The gphy
14425 * configuration is a 32-bit value that straddles the alignment boundary.
14426 * We do two 32-bit reads and then shift and merge the results.
14428 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14430 u32 bhalf_otp
, thalf_otp
;
14432 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14434 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14437 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14439 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14442 thalf_otp
= tr32(OTP_READ_DATA
);
14444 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14446 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14449 bhalf_otp
= tr32(OTP_READ_DATA
);
14451 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14454 static void tg3_phy_init_link_config(struct tg3
*tp
)
14456 u32 adv
= ADVERTISED_Autoneg
;
14458 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14459 adv
|= ADVERTISED_1000baseT_Half
|
14460 ADVERTISED_1000baseT_Full
;
14462 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14463 adv
|= ADVERTISED_100baseT_Half
|
14464 ADVERTISED_100baseT_Full
|
14465 ADVERTISED_10baseT_Half
|
14466 ADVERTISED_10baseT_Full
|
14469 adv
|= ADVERTISED_FIBRE
;
14471 tp
->link_config
.advertising
= adv
;
14472 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14473 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14474 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14475 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14476 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14481 static int tg3_phy_probe(struct tg3
*tp
)
14483 u32 hw_phy_id_1
, hw_phy_id_2
;
14484 u32 hw_phy_id
, hw_phy_id_masked
;
14487 /* flow control autonegotiation is default behavior */
14488 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14489 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14491 if (tg3_flag(tp
, ENABLE_APE
)) {
14492 switch (tp
->pci_fn
) {
14494 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14497 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14500 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14503 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14508 if (tg3_flag(tp
, USE_PHYLIB
))
14509 return tg3_phy_init(tp
);
14511 /* Reading the PHY ID register can conflict with ASF
14512 * firmware access to the PHY hardware.
14515 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14516 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14518 /* Now read the physical PHY_ID from the chip and verify
14519 * that it is sane. If it doesn't look good, we fall back
14520 * to either the hard-coded table based PHY_ID and failing
14521 * that the value found in the eeprom area.
14523 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14524 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14526 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14527 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14528 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14530 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14533 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14534 tp
->phy_id
= hw_phy_id
;
14535 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14536 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14538 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
14540 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
14541 /* Do nothing, phy ID already set up in
14542 * tg3_get_eeprom_hw_cfg().
14545 struct subsys_tbl_ent
*p
;
14547 /* No eeprom signature? Try the hardcoded
14548 * subsys device table.
14550 p
= tg3_lookup_by_subsys(tp
);
14552 tp
->phy_id
= p
->phy_id
;
14553 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
14554 /* For now we saw the IDs 0xbc050cd0,
14555 * 0xbc050f80 and 0xbc050c30 on devices
14556 * connected to an BCM4785 and there are
14557 * probably more. Just assume that the phy is
14558 * supported when it is connected to a SSB core
14565 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
14566 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14570 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14571 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
14572 tg3_asic_rev(tp
) == ASIC_REV_5720
||
14573 tg3_asic_rev(tp
) == ASIC_REV_5762
||
14574 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
14575 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
14576 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
14577 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
14578 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
14580 tg3_phy_init_link_config(tp
);
14582 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14583 !tg3_flag(tp
, ENABLE_APE
) &&
14584 !tg3_flag(tp
, ENABLE_ASF
)) {
14587 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
14588 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
14589 (bmsr
& BMSR_LSTATUS
))
14590 goto skip_phy_reset
;
14592 err
= tg3_phy_reset(tp
);
14596 tg3_phy_set_wirespeed(tp
);
14598 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
14599 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
14600 tp
->link_config
.flowctrl
);
14602 tg3_writephy(tp
, MII_BMCR
,
14603 BMCR_ANENABLE
| BMCR_ANRESTART
);
14608 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
14609 err
= tg3_init_5401phy_dsp(tp
);
14613 err
= tg3_init_5401phy_dsp(tp
);
14619 static void tg3_read_vpd(struct tg3
*tp
)
14622 unsigned int block_end
, rosize
, len
;
14626 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
14630 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
14632 goto out_not_found
;
14634 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
14635 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
14636 i
+= PCI_VPD_LRDT_TAG_SIZE
;
14638 if (block_end
> vpdlen
)
14639 goto out_not_found
;
14641 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14642 PCI_VPD_RO_KEYWORD_MFR_ID
);
14644 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14646 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14647 if (j
+ len
> block_end
|| len
!= 4 ||
14648 memcmp(&vpd_data
[j
], "1028", 4))
14651 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14652 PCI_VPD_RO_KEYWORD_VENDOR0
);
14656 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14658 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14659 if (j
+ len
> block_end
)
14662 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
14663 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
14667 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14668 PCI_VPD_RO_KEYWORD_PARTNO
);
14670 goto out_not_found
;
14672 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
14674 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14675 if (len
> TG3_BPN_SIZE
||
14676 (len
+ i
) > vpdlen
)
14677 goto out_not_found
;
14679 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
14683 if (tp
->board_part_number
[0])
14687 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
14688 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14689 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
14690 strcpy(tp
->board_part_number
, "BCM5717");
14691 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
14692 strcpy(tp
->board_part_number
, "BCM5718");
14695 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
14696 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
14697 strcpy(tp
->board_part_number
, "BCM57780");
14698 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
14699 strcpy(tp
->board_part_number
, "BCM57760");
14700 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
14701 strcpy(tp
->board_part_number
, "BCM57790");
14702 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
14703 strcpy(tp
->board_part_number
, "BCM57788");
14706 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
14707 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
14708 strcpy(tp
->board_part_number
, "BCM57761");
14709 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
14710 strcpy(tp
->board_part_number
, "BCM57765");
14711 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
14712 strcpy(tp
->board_part_number
, "BCM57781");
14713 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
14714 strcpy(tp
->board_part_number
, "BCM57785");
14715 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
14716 strcpy(tp
->board_part_number
, "BCM57791");
14717 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
14718 strcpy(tp
->board_part_number
, "BCM57795");
14721 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
14722 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
14723 strcpy(tp
->board_part_number
, "BCM57762");
14724 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
14725 strcpy(tp
->board_part_number
, "BCM57766");
14726 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
14727 strcpy(tp
->board_part_number
, "BCM57782");
14728 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14729 strcpy(tp
->board_part_number
, "BCM57786");
14732 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14733 strcpy(tp
->board_part_number
, "BCM95906");
14736 strcpy(tp
->board_part_number
, "none");
14740 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
14744 if (tg3_nvram_read(tp
, offset
, &val
) ||
14745 (val
& 0xfc000000) != 0x0c000000 ||
14746 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
14753 static void tg3_read_bc_ver(struct tg3
*tp
)
14755 u32 val
, offset
, start
, ver_offset
;
14757 bool newver
= false;
14759 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
14760 tg3_nvram_read(tp
, 0x4, &start
))
14763 offset
= tg3_nvram_logical_addr(tp
, offset
);
14765 if (tg3_nvram_read(tp
, offset
, &val
))
14768 if ((val
& 0xfc000000) == 0x0c000000) {
14769 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
14776 dst_off
= strlen(tp
->fw_ver
);
14779 if (TG3_VER_SIZE
- dst_off
< 16 ||
14780 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
14783 offset
= offset
+ ver_offset
- start
;
14784 for (i
= 0; i
< 16; i
+= 4) {
14786 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
14789 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
14794 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
14797 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
14798 TG3_NVM_BCVER_MAJSFT
;
14799 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
14800 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
14801 "v%d.%02d", major
, minor
);
14805 static void tg3_read_hwsb_ver(struct tg3
*tp
)
14807 u32 val
, major
, minor
;
14809 /* Use native endian representation */
14810 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
14813 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
14814 TG3_NVM_HWSB_CFG1_MAJSFT
;
14815 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
14816 TG3_NVM_HWSB_CFG1_MINSFT
;
14818 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
14821 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
14823 u32 offset
, major
, minor
, build
;
14825 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
14827 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14830 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14831 case TG3_EEPROM_SB_REVISION_0
:
14832 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14834 case TG3_EEPROM_SB_REVISION_2
:
14835 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14837 case TG3_EEPROM_SB_REVISION_3
:
14838 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14840 case TG3_EEPROM_SB_REVISION_4
:
14841 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14843 case TG3_EEPROM_SB_REVISION_5
:
14844 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14846 case TG3_EEPROM_SB_REVISION_6
:
14847 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14853 if (tg3_nvram_read(tp
, offset
, &val
))
14856 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14857 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14858 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14859 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14860 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14862 if (minor
> 99 || build
> 26)
14865 offset
= strlen(tp
->fw_ver
);
14866 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14867 " v%d.%02d", major
, minor
);
14870 offset
= strlen(tp
->fw_ver
);
14871 if (offset
< TG3_VER_SIZE
- 1)
14872 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14876 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
14878 u32 val
, offset
, start
;
14881 for (offset
= TG3_NVM_DIR_START
;
14882 offset
< TG3_NVM_DIR_END
;
14883 offset
+= TG3_NVM_DIRENT_SIZE
) {
14884 if (tg3_nvram_read(tp
, offset
, &val
))
14887 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14891 if (offset
== TG3_NVM_DIR_END
)
14894 if (!tg3_flag(tp
, 5705_PLUS
))
14895 start
= 0x08000000;
14896 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14899 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14900 !tg3_fw_img_is_valid(tp
, offset
) ||
14901 tg3_nvram_read(tp
, offset
+ 8, &val
))
14904 offset
+= val
- start
;
14906 vlen
= strlen(tp
->fw_ver
);
14908 tp
->fw_ver
[vlen
++] = ',';
14909 tp
->fw_ver
[vlen
++] = ' ';
14911 for (i
= 0; i
< 4; i
++) {
14913 if (tg3_nvram_read_be32(tp
, offset
, &v
))
14916 offset
+= sizeof(v
);
14918 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
14919 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
14923 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
14928 static void tg3_probe_ncsi(struct tg3
*tp
)
14932 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
14933 if (apedata
!= APE_SEG_SIG_MAGIC
)
14936 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
14937 if (!(apedata
& APE_FW_STATUS_READY
))
14940 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
14941 tg3_flag_set(tp
, APE_HAS_NCSI
);
14944 static void tg3_read_dash_ver(struct tg3
*tp
)
14950 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
14952 if (tg3_flag(tp
, APE_HAS_NCSI
))
14954 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
14959 vlen
= strlen(tp
->fw_ver
);
14961 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
14963 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
14964 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
14965 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
14966 (apedata
& APE_FW_VERSION_BLDMSK
));
14969 static void tg3_read_otp_ver(struct tg3
*tp
)
14973 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14976 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
14977 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
14978 TG3_OTP_MAGIC0_VALID(val
)) {
14979 u64 val64
= (u64
) val
<< 32 | val2
;
14983 for (i
= 0; i
< 7; i
++) {
14984 if ((val64
& 0xff) == 0)
14986 ver
= val64
& 0xff;
14989 vlen
= strlen(tp
->fw_ver
);
14990 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
14994 static void tg3_read_fw_ver(struct tg3
*tp
)
14997 bool vpd_vers
= false;
14999 if (tp
->fw_ver
[0] != 0)
15002 if (tg3_flag(tp
, NO_NVRAM
)) {
15003 strcat(tp
->fw_ver
, "sb");
15004 tg3_read_otp_ver(tp
);
15008 if (tg3_nvram_read(tp
, 0, &val
))
15011 if (val
== TG3_EEPROM_MAGIC
)
15012 tg3_read_bc_ver(tp
);
15013 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15014 tg3_read_sb_ver(tp
, val
);
15015 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15016 tg3_read_hwsb_ver(tp
);
15018 if (tg3_flag(tp
, ENABLE_ASF
)) {
15019 if (tg3_flag(tp
, ENABLE_APE
)) {
15020 tg3_probe_ncsi(tp
);
15022 tg3_read_dash_ver(tp
);
15023 } else if (!vpd_vers
) {
15024 tg3_read_mgmtfw_ver(tp
);
15028 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15031 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15033 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15034 return TG3_RX_RET_MAX_SIZE_5717
;
15035 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15036 return TG3_RX_RET_MAX_SIZE_5700
;
15038 return TG3_RX_RET_MAX_SIZE_5705
;
15041 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15042 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15043 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15044 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15048 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15050 struct pci_dev
*peer
;
15051 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15053 for (func
= 0; func
< 8; func
++) {
15054 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15055 if (peer
&& peer
!= tp
->pdev
)
15059 /* 5704 can be configured in single-port mode, set peer to
15060 * tp->pdev in that case.
15068 * We don't need to keep the refcount elevated; there's no way
15069 * to remove one half of this device without removing the other
15076 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15078 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15079 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15082 /* All devices that use the alternate
15083 * ASIC REV location have a CPMU.
15085 tg3_flag_set(tp
, CPMU_PRESENT
);
15087 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15088 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15089 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15090 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15091 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15092 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15093 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15094 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15095 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15096 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15097 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15098 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15099 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15100 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15101 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15102 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15103 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15104 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15105 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15106 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15108 reg
= TG3PCI_PRODID_ASICREV
;
15110 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15113 /* Wrong chip ID in 5752 A0. This code can be removed later
15114 * as A0 is not in production.
15116 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15117 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15119 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15120 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15122 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15123 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15124 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15125 tg3_flag_set(tp
, 5717_PLUS
);
15127 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15128 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15129 tg3_flag_set(tp
, 57765_CLASS
);
15131 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15132 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15133 tg3_flag_set(tp
, 57765_PLUS
);
15135 /* Intentionally exclude ASIC_REV_5906 */
15136 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15137 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15138 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15139 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15140 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15141 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15142 tg3_flag(tp
, 57765_PLUS
))
15143 tg3_flag_set(tp
, 5755_PLUS
);
15145 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15146 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15147 tg3_flag_set(tp
, 5780_CLASS
);
15149 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15150 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15151 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15152 tg3_flag(tp
, 5755_PLUS
) ||
15153 tg3_flag(tp
, 5780_CLASS
))
15154 tg3_flag_set(tp
, 5750_PLUS
);
15156 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15157 tg3_flag(tp
, 5750_PLUS
))
15158 tg3_flag_set(tp
, 5705_PLUS
);
15161 static bool tg3_10_100_only_device(struct tg3
*tp
,
15162 const struct pci_device_id
*ent
)
15164 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15166 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15167 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15168 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15171 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15172 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15173 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15183 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15186 u32 pci_state_reg
, grc_misc_cfg
;
15191 /* Force memory write invalidate off. If we leave it on,
15192 * then on 5700_BX chips we have to enable a workaround.
15193 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15194 * to match the cacheline size. The Broadcom driver have this
15195 * workaround but turns MWI off all the times so never uses
15196 * it. This seems to suggest that the workaround is insufficient.
15198 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15199 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15200 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15202 /* Important! -- Make sure register accesses are byteswapped
15203 * correctly. Also, for those chips that require it, make
15204 * sure that indirect register accesses are enabled before
15205 * the first operation.
15207 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15209 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15210 MISC_HOST_CTRL_CHIPREV
);
15211 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15212 tp
->misc_host_ctrl
);
15214 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15216 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15217 * we need to disable memory and use config. cycles
15218 * only to access all registers. The 5702/03 chips
15219 * can mistakenly decode the special cycles from the
15220 * ICH chipsets as memory write cycles, causing corruption
15221 * of register and memory space. Only certain ICH bridges
15222 * will drive special cycles with non-zero data during the
15223 * address phase which can fall within the 5703's address
15224 * range. This is not an ICH bug as the PCI spec allows
15225 * non-zero address during special cycles. However, only
15226 * these ICH bridges are known to drive non-zero addresses
15227 * during special cycles.
15229 * Since special cycles do not cross PCI bridges, we only
15230 * enable this workaround if the 5703 is on the secondary
15231 * bus of these ICH bridges.
15233 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15234 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15235 static struct tg3_dev_id
{
15239 } ich_chipsets
[] = {
15240 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15242 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15244 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15246 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15250 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15251 struct pci_dev
*bridge
= NULL
;
15253 while (pci_id
->vendor
!= 0) {
15254 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15260 if (pci_id
->rev
!= PCI_ANY_ID
) {
15261 if (bridge
->revision
> pci_id
->rev
)
15264 if (bridge
->subordinate
&&
15265 (bridge
->subordinate
->number
==
15266 tp
->pdev
->bus
->number
)) {
15267 tg3_flag_set(tp
, ICH_WORKAROUND
);
15268 pci_dev_put(bridge
);
15274 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15275 static struct tg3_dev_id
{
15278 } bridge_chipsets
[] = {
15279 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15280 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15283 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15284 struct pci_dev
*bridge
= NULL
;
15286 while (pci_id
->vendor
!= 0) {
15287 bridge
= pci_get_device(pci_id
->vendor
,
15294 if (bridge
->subordinate
&&
15295 (bridge
->subordinate
->number
<=
15296 tp
->pdev
->bus
->number
) &&
15297 (bridge
->subordinate
->busn_res
.end
>=
15298 tp
->pdev
->bus
->number
)) {
15299 tg3_flag_set(tp
, 5701_DMA_BUG
);
15300 pci_dev_put(bridge
);
15306 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15307 * DMA addresses > 40-bit. This bridge may have other additional
15308 * 57xx devices behind it in some 4-port NIC designs for example.
15309 * Any tg3 device found behind the bridge will also need the 40-bit
15312 if (tg3_flag(tp
, 5780_CLASS
)) {
15313 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15314 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15316 struct pci_dev
*bridge
= NULL
;
15319 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15320 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15322 if (bridge
&& bridge
->subordinate
&&
15323 (bridge
->subordinate
->number
<=
15324 tp
->pdev
->bus
->number
) &&
15325 (bridge
->subordinate
->busn_res
.end
>=
15326 tp
->pdev
->bus
->number
)) {
15327 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15328 pci_dev_put(bridge
);
15334 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15335 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15336 tp
->pdev_peer
= tg3_find_peer(tp
);
15338 /* Determine TSO capabilities */
15339 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15340 ; /* Do nothing. HW bug. */
15341 else if (tg3_flag(tp
, 57765_PLUS
))
15342 tg3_flag_set(tp
, HW_TSO_3
);
15343 else if (tg3_flag(tp
, 5755_PLUS
) ||
15344 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15345 tg3_flag_set(tp
, HW_TSO_2
);
15346 else if (tg3_flag(tp
, 5750_PLUS
)) {
15347 tg3_flag_set(tp
, HW_TSO_1
);
15348 tg3_flag_set(tp
, TSO_BUG
);
15349 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15350 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15351 tg3_flag_clear(tp
, TSO_BUG
);
15352 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15353 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15354 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15355 tg3_flag_set(tp
, FW_TSO
);
15356 tg3_flag_set(tp
, TSO_BUG
);
15357 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15358 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15360 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15363 /* Selectively allow TSO based on operating conditions */
15364 if (tg3_flag(tp
, HW_TSO_1
) ||
15365 tg3_flag(tp
, HW_TSO_2
) ||
15366 tg3_flag(tp
, HW_TSO_3
) ||
15367 tg3_flag(tp
, FW_TSO
)) {
15368 /* For firmware TSO, assume ASF is disabled.
15369 * We'll disable TSO later if we discover ASF
15370 * is enabled in tg3_get_eeprom_hw_cfg().
15372 tg3_flag_set(tp
, TSO_CAPABLE
);
15374 tg3_flag_clear(tp
, TSO_CAPABLE
);
15375 tg3_flag_clear(tp
, TSO_BUG
);
15376 tp
->fw_needed
= NULL
;
15379 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15380 tp
->fw_needed
= FIRMWARE_TG3
;
15384 if (tg3_flag(tp
, 5750_PLUS
)) {
15385 tg3_flag_set(tp
, SUPPORT_MSI
);
15386 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15387 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15388 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15389 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15390 tp
->pdev_peer
== tp
->pdev
))
15391 tg3_flag_clear(tp
, SUPPORT_MSI
);
15393 if (tg3_flag(tp
, 5755_PLUS
) ||
15394 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15395 tg3_flag_set(tp
, 1SHOT_MSI
);
15398 if (tg3_flag(tp
, 57765_PLUS
)) {
15399 tg3_flag_set(tp
, SUPPORT_MSIX
);
15400 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15406 if (tp
->irq_max
> 1) {
15407 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15408 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15410 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15411 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15412 tp
->txq_max
= tp
->irq_max
- 1;
15415 if (tg3_flag(tp
, 5755_PLUS
) ||
15416 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15417 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15419 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15420 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15422 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15423 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15424 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15425 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15426 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15428 if (tg3_flag(tp
, 57765_PLUS
) &&
15429 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15430 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15432 if (!tg3_flag(tp
, 5705_PLUS
) ||
15433 tg3_flag(tp
, 5780_CLASS
) ||
15434 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15435 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15437 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15440 if (pci_is_pcie(tp
->pdev
)) {
15443 tg3_flag_set(tp
, PCI_EXPRESS
);
15445 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15446 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15447 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15448 tg3_flag_clear(tp
, HW_TSO_2
);
15449 tg3_flag_clear(tp
, TSO_CAPABLE
);
15451 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15452 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15453 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15454 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15455 tg3_flag_set(tp
, CLKREQ_BUG
);
15456 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15457 tg3_flag_set(tp
, L1PLLPD_EN
);
15459 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15460 /* BCM5785 devices are effectively PCIe devices, and should
15461 * follow PCIe codepaths, but do not have a PCIe capabilities
15464 tg3_flag_set(tp
, PCI_EXPRESS
);
15465 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15466 tg3_flag(tp
, 5780_CLASS
)) {
15467 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15468 if (!tp
->pcix_cap
) {
15469 dev_err(&tp
->pdev
->dev
,
15470 "Cannot find PCI-X capability, aborting\n");
15474 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15475 tg3_flag_set(tp
, PCIX_MODE
);
15478 /* If we have an AMD 762 or VIA K8T800 chipset, write
15479 * reordering to the mailbox registers done by the host
15480 * controller can cause major troubles. We read back from
15481 * every mailbox register write to force the writes to be
15482 * posted to the chip in order.
15484 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15485 !tg3_flag(tp
, PCI_EXPRESS
))
15486 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15488 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15489 &tp
->pci_cacheline_sz
);
15490 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15491 &tp
->pci_lat_timer
);
15492 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15493 tp
->pci_lat_timer
< 64) {
15494 tp
->pci_lat_timer
= 64;
15495 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15496 tp
->pci_lat_timer
);
15499 /* Important! -- It is critical that the PCI-X hw workaround
15500 * situation is decided before the first MMIO register access.
15502 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15503 /* 5700 BX chips need to have their TX producer index
15504 * mailboxes written twice to workaround a bug.
15506 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15508 /* If we are in PCI-X mode, enable register write workaround.
15510 * The workaround is to use indirect register accesses
15511 * for all chip writes not to mailbox registers.
15513 if (tg3_flag(tp
, PCIX_MODE
)) {
15516 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15518 /* The chip can have it's power management PCI config
15519 * space registers clobbered due to this bug.
15520 * So explicitly force the chip into D0 here.
15522 pci_read_config_dword(tp
->pdev
,
15523 tp
->pm_cap
+ PCI_PM_CTRL
,
15525 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15526 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15527 pci_write_config_dword(tp
->pdev
,
15528 tp
->pm_cap
+ PCI_PM_CTRL
,
15531 /* Also, force SERR#/PERR# in PCI command. */
15532 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15533 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
15534 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15538 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
15539 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
15540 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
15541 tg3_flag_set(tp
, PCI_32BIT
);
15543 /* Chip-specific fixup from Broadcom driver */
15544 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
15545 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
15546 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
15547 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
15550 /* Default fast path register access methods */
15551 tp
->read32
= tg3_read32
;
15552 tp
->write32
= tg3_write32
;
15553 tp
->read32_mbox
= tg3_read32
;
15554 tp
->write32_mbox
= tg3_write32
;
15555 tp
->write32_tx_mbox
= tg3_write32
;
15556 tp
->write32_rx_mbox
= tg3_write32
;
15558 /* Various workaround register access methods */
15559 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
15560 tp
->write32
= tg3_write_indirect_reg32
;
15561 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
15562 (tg3_flag(tp
, PCI_EXPRESS
) &&
15563 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
15565 * Back to back register writes can cause problems on these
15566 * chips, the workaround is to read back all reg writes
15567 * except those to mailbox regs.
15569 * See tg3_write_indirect_reg32().
15571 tp
->write32
= tg3_write_flush_reg32
;
15574 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
15575 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
15576 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
15577 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15580 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
15581 tp
->read32
= tg3_read_indirect_reg32
;
15582 tp
->write32
= tg3_write_indirect_reg32
;
15583 tp
->read32_mbox
= tg3_read_indirect_mbox
;
15584 tp
->write32_mbox
= tg3_write_indirect_mbox
;
15585 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
15586 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
15591 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15592 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
15593 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15595 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15596 tp
->read32_mbox
= tg3_read32_mbox_5906
;
15597 tp
->write32_mbox
= tg3_write32_mbox_5906
;
15598 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
15599 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
15602 if (tp
->write32
== tg3_write_indirect_reg32
||
15603 (tg3_flag(tp
, PCIX_MODE
) &&
15604 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15605 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
15606 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
15608 /* The memory arbiter has to be enabled in order for SRAM accesses
15609 * to succeed. Normally on powerup the tg3 chip firmware will make
15610 * sure it is enabled, but other entities such as system netboot
15611 * code might disable it.
15613 val
= tr32(MEMARB_MODE
);
15614 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
15616 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
15617 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15618 tg3_flag(tp
, 5780_CLASS
)) {
15619 if (tg3_flag(tp
, PCIX_MODE
)) {
15620 pci_read_config_dword(tp
->pdev
,
15621 tp
->pcix_cap
+ PCI_X_STATUS
,
15623 tp
->pci_fn
= val
& 0x7;
15625 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15626 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15627 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
15628 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15629 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
15630 val
= tr32(TG3_CPMU_STATUS
);
15632 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
15633 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
15635 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
15636 TG3_CPMU_STATUS_FSHFT_5719
;
15639 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
15640 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
15641 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15644 /* Get eeprom hw config before calling tg3_set_power_state().
15645 * In particular, the TG3_FLAG_IS_NIC flag must be
15646 * determined before calling tg3_set_power_state() so that
15647 * we know whether or not to switch out of Vaux power.
15648 * When the flag is set, it means that GPIO1 is used for eeprom
15649 * write protect and also implies that it is a LOM where GPIOs
15650 * are not used to switch power.
15652 tg3_get_eeprom_hw_cfg(tp
);
15654 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
15655 tg3_flag_clear(tp
, TSO_CAPABLE
);
15656 tg3_flag_clear(tp
, TSO_BUG
);
15657 tp
->fw_needed
= NULL
;
15660 if (tg3_flag(tp
, ENABLE_APE
)) {
15661 /* Allow reads and writes to the
15662 * APE register and memory space.
15664 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
15665 PCISTATE_ALLOW_APE_SHMEM_WR
|
15666 PCISTATE_ALLOW_APE_PSPACE_WR
;
15667 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15670 tg3_ape_lock_init(tp
);
15673 /* Set up tp->grc_local_ctrl before calling
15674 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15675 * will bring 5700's external PHY out of reset.
15676 * It is also used as eeprom write protect on LOMs.
15678 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
15679 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15680 tg3_flag(tp
, EEPROM_WRITE_PROT
))
15681 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
15682 GRC_LCLCTRL_GPIO_OUTPUT1
);
15683 /* Unused GPIO3 must be driven as output on 5752 because there
15684 * are no pull-up resistors on unused GPIO pins.
15686 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15687 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
15689 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15690 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15691 tg3_flag(tp
, 57765_CLASS
))
15692 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15694 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15695 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
15696 /* Turn off the debug UART. */
15697 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15698 if (tg3_flag(tp
, IS_NIC
))
15699 /* Keep VMain power. */
15700 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
15701 GRC_LCLCTRL_GPIO_OUTPUT0
;
15704 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
15705 tp
->grc_local_ctrl
|=
15706 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
15708 /* Switch out of Vaux if it is a NIC */
15709 tg3_pwrsrc_switch_to_vmain(tp
);
15711 /* Derive initial jumbo mode from MTU assigned in
15712 * ether_setup() via the alloc_etherdev() call
15714 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
15715 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
15717 /* Determine WakeOnLan speed to use. */
15718 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15719 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15720 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15721 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
15722 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
15724 tg3_flag_set(tp
, WOL_SPEED_100MB
);
15727 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15728 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
15730 /* A few boards don't want Ethernet@WireSpeed phy feature */
15731 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15732 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15733 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
15734 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
15735 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
15736 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15737 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
15739 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
15740 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
15741 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
15742 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
15743 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
15745 if (tg3_flag(tp
, 5705_PLUS
) &&
15746 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
15747 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15748 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
15749 !tg3_flag(tp
, 57765_PLUS
)) {
15750 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15751 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15752 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15753 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
15754 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
15755 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
15756 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
15757 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
15758 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
15760 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
15763 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15764 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
15765 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
15766 if (tp
->phy_otp
== 0)
15767 tp
->phy_otp
= TG3_OTP_DEFAULT
;
15770 if (tg3_flag(tp
, CPMU_PRESENT
))
15771 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
15773 tp
->mi_mode
= MAC_MI_MODE_BASE
;
15775 tp
->coalesce_mode
= 0;
15776 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
15777 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
15778 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
15780 /* Set these bits to enable statistics workaround. */
15781 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15782 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
15783 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
15784 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
15785 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
15788 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
15789 tg3_asic_rev(tp
) == ASIC_REV_57780
)
15790 tg3_flag_set(tp
, USE_PHYLIB
);
15792 err
= tg3_mdio_init(tp
);
15796 /* Initialize data/descriptor byte/word swapping. */
15797 val
= tr32(GRC_MODE
);
15798 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15799 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15800 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
15801 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
15802 GRC_MODE_B2HRX_ENABLE
|
15803 GRC_MODE_HTX2B_ENABLE
|
15804 GRC_MODE_HOST_STACKUP
);
15806 val
&= GRC_MODE_HOST_STACKUP
;
15808 tw32(GRC_MODE
, val
| tp
->grc_mode
);
15810 tg3_switch_clocks(tp
);
15812 /* Clear this out for sanity. */
15813 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15815 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15817 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
15818 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
15819 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15820 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15821 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
15822 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
15823 void __iomem
*sram_base
;
15825 /* Write some dummy words into the SRAM status block
15826 * area, see if it reads back correctly. If the return
15827 * value is bad, force enable the PCIX workaround.
15829 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
15831 writel(0x00000000, sram_base
);
15832 writel(0x00000000, sram_base
+ 4);
15833 writel(0xffffffff, sram_base
+ 4);
15834 if (readl(sram_base
) != 0x00000000)
15835 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15840 tg3_nvram_init(tp
);
15842 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
15843 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
15845 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15846 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
15847 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
15848 tg3_flag_set(tp
, IS_5788
);
15850 if (!tg3_flag(tp
, IS_5788
) &&
15851 tg3_asic_rev(tp
) != ASIC_REV_5700
)
15852 tg3_flag_set(tp
, TAGGED_STATUS
);
15853 if (tg3_flag(tp
, TAGGED_STATUS
)) {
15854 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
15855 HOSTCC_MODE_CLRTICK_TXBD
);
15857 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
15858 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15859 tp
->misc_host_ctrl
);
15862 /* Preserve the APE MAC_MODE bits */
15863 if (tg3_flag(tp
, ENABLE_APE
))
15864 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
15868 if (tg3_10_100_only_device(tp
, ent
))
15869 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
15871 err
= tg3_phy_probe(tp
);
15873 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
15874 /* ... but do not return immediately ... */
15879 tg3_read_fw_ver(tp
);
15881 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
15882 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15884 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15885 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15887 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15890 /* 5700 {AX,BX} chips have a broken status block link
15891 * change bit implementation, so we must use the
15892 * status register in those cases.
15894 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15895 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15897 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
15899 /* The led_ctrl is set during tg3_phy_probe, here we might
15900 * have to force the link status polling mechanism based
15901 * upon subsystem IDs.
15903 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
15904 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15905 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
15906 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15907 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15910 /* For all SERDES we poll the MAC status register. */
15911 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
15912 tg3_flag_set(tp
, POLL_SERDES
);
15914 tg3_flag_clear(tp
, POLL_SERDES
);
15916 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
15917 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
15918 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15919 tg3_flag(tp
, PCIX_MODE
)) {
15920 tp
->rx_offset
= NET_SKB_PAD
;
15921 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15922 tp
->rx_copy_thresh
= ~(u16
)0;
15926 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
15927 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
15928 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
15930 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
15932 /* Increment the rx prod index on the rx std ring by at most
15933 * 8 for these chips to workaround hw errata.
15935 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15936 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15937 tg3_asic_rev(tp
) == ASIC_REV_5755
)
15938 tp
->rx_std_max_post
= 8;
15940 if (tg3_flag(tp
, ASPM_WORKAROUND
))
15941 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
15942 PCIE_PWR_MGMT_L1_THRESH_MSK
;
15947 #ifdef CONFIG_SPARC
15948 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
15950 struct net_device
*dev
= tp
->dev
;
15951 struct pci_dev
*pdev
= tp
->pdev
;
15952 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
15953 const unsigned char *addr
;
15956 addr
= of_get_property(dp
, "local-mac-address", &len
);
15957 if (addr
&& len
== 6) {
15958 memcpy(dev
->dev_addr
, addr
, 6);
15964 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
15966 struct net_device
*dev
= tp
->dev
;
15968 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
15973 static int tg3_get_device_address(struct tg3
*tp
)
15975 struct net_device
*dev
= tp
->dev
;
15976 u32 hi
, lo
, mac_offset
;
15980 #ifdef CONFIG_SPARC
15981 if (!tg3_get_macaddr_sparc(tp
))
15985 if (tg3_flag(tp
, IS_SSB_CORE
)) {
15986 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
15987 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
15992 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15993 tg3_flag(tp
, 5780_CLASS
)) {
15994 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
15996 if (tg3_nvram_lock(tp
))
15997 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
15999 tg3_nvram_unlock(tp
);
16000 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16001 if (tp
->pci_fn
& 1)
16003 if (tp
->pci_fn
> 1)
16004 mac_offset
+= 0x18c;
16005 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16008 /* First try to get it from MAC address mailbox. */
16009 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16010 if ((hi
>> 16) == 0x484b) {
16011 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16012 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16014 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16015 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16016 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16017 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16018 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16020 /* Some old bootcode may report a 0 MAC address in SRAM */
16021 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16024 /* Next, try NVRAM. */
16025 if (!tg3_flag(tp
, NO_NVRAM
) &&
16026 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16027 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16028 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16029 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16031 /* Finally just fetch it out of the MAC control regs. */
16033 hi
= tr32(MAC_ADDR_0_HIGH
);
16034 lo
= tr32(MAC_ADDR_0_LOW
);
16036 dev
->dev_addr
[5] = lo
& 0xff;
16037 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16038 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16039 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16040 dev
->dev_addr
[1] = hi
& 0xff;
16041 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16045 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16046 #ifdef CONFIG_SPARC
16047 if (!tg3_get_default_macaddr_sparc(tp
))
16055 #define BOUNDARY_SINGLE_CACHELINE 1
16056 #define BOUNDARY_MULTI_CACHELINE 2
16058 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16060 int cacheline_size
;
16064 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16066 cacheline_size
= 1024;
16068 cacheline_size
= (int) byte
* 4;
16070 /* On 5703 and later chips, the boundary bits have no
16073 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16074 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16075 !tg3_flag(tp
, PCI_EXPRESS
))
16078 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16079 goal
= BOUNDARY_MULTI_CACHELINE
;
16081 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16082 goal
= BOUNDARY_SINGLE_CACHELINE
;
16088 if (tg3_flag(tp
, 57765_PLUS
)) {
16089 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16096 /* PCI controllers on most RISC systems tend to disconnect
16097 * when a device tries to burst across a cache-line boundary.
16098 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16100 * Unfortunately, for PCI-E there are only limited
16101 * write-side controls for this, and thus for reads
16102 * we will still get the disconnects. We'll also waste
16103 * these PCI cycles for both read and write for chips
16104 * other than 5700 and 5701 which do not implement the
16107 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16108 switch (cacheline_size
) {
16113 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16114 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16115 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16117 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16118 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16123 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16124 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16128 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16129 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16132 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16133 switch (cacheline_size
) {
16137 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16138 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16139 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16145 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16146 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16150 switch (cacheline_size
) {
16152 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16153 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16154 DMA_RWCTRL_WRITE_BNDRY_16
);
16159 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16160 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16161 DMA_RWCTRL_WRITE_BNDRY_32
);
16166 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16167 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16168 DMA_RWCTRL_WRITE_BNDRY_64
);
16173 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16174 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16175 DMA_RWCTRL_WRITE_BNDRY_128
);
16180 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16181 DMA_RWCTRL_WRITE_BNDRY_256
);
16184 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16185 DMA_RWCTRL_WRITE_BNDRY_512
);
16189 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16190 DMA_RWCTRL_WRITE_BNDRY_1024
);
16199 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16200 int size
, int to_device
)
16202 struct tg3_internal_buffer_desc test_desc
;
16203 u32 sram_dma_descs
;
16206 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16208 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16209 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16210 tw32(RDMAC_STATUS
, 0);
16211 tw32(WDMAC_STATUS
, 0);
16213 tw32(BUFMGR_MODE
, 0);
16214 tw32(FTQ_RESET
, 0);
16216 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16217 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16218 test_desc
.nic_mbuf
= 0x00002100;
16219 test_desc
.len
= size
;
16222 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16223 * the *second* time the tg3 driver was getting loaded after an
16226 * Broadcom tells me:
16227 * ...the DMA engine is connected to the GRC block and a DMA
16228 * reset may affect the GRC block in some unpredictable way...
16229 * The behavior of resets to individual blocks has not been tested.
16231 * Broadcom noted the GRC reset will also reset all sub-components.
16234 test_desc
.cqid_sqid
= (13 << 8) | 2;
16236 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16239 test_desc
.cqid_sqid
= (16 << 8) | 7;
16241 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16244 test_desc
.flags
= 0x00000005;
16246 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16249 val
= *(((u32
*)&test_desc
) + i
);
16250 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16251 sram_dma_descs
+ (i
* sizeof(u32
)));
16252 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16254 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16257 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16259 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16262 for (i
= 0; i
< 40; i
++) {
16266 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16268 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16269 if ((val
& 0xffff) == sram_dma_descs
) {
16280 #define TEST_BUFFER_SIZE 0x2000
16282 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16283 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16287 static int tg3_test_dma(struct tg3
*tp
)
16289 dma_addr_t buf_dma
;
16290 u32
*buf
, saved_dma_rwctrl
;
16293 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16294 &buf_dma
, GFP_KERNEL
);
16300 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16301 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16303 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16305 if (tg3_flag(tp
, 57765_PLUS
))
16308 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16309 /* DMA read watermark not used on PCIE */
16310 tp
->dma_rwctrl
|= 0x00180000;
16311 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16312 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16313 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16314 tp
->dma_rwctrl
|= 0x003f0000;
16316 tp
->dma_rwctrl
|= 0x003f000f;
16318 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16319 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16320 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16321 u32 read_water
= 0x7;
16323 /* If the 5704 is behind the EPB bridge, we can
16324 * do the less restrictive ONE_DMA workaround for
16325 * better performance.
16327 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16328 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16329 tp
->dma_rwctrl
|= 0x8000;
16330 else if (ccval
== 0x6 || ccval
== 0x7)
16331 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16333 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16335 /* Set bit 23 to enable PCIX hw bug fix */
16337 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16338 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16340 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16341 /* 5780 always in PCIX mode */
16342 tp
->dma_rwctrl
|= 0x00144000;
16343 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16344 /* 5714 always in PCIX mode */
16345 tp
->dma_rwctrl
|= 0x00148000;
16347 tp
->dma_rwctrl
|= 0x001b000f;
16350 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16351 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16353 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16354 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16355 tp
->dma_rwctrl
&= 0xfffffff0;
16357 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16358 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16359 /* Remove this if it causes problems for some boards. */
16360 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16362 /* On 5700/5701 chips, we need to set this bit.
16363 * Otherwise the chip will issue cacheline transactions
16364 * to streamable DMA memory with not all the byte
16365 * enables turned on. This is an error on several
16366 * RISC PCI controllers, in particular sparc64.
16368 * On 5703/5704 chips, this bit has been reassigned
16369 * a different meaning. In particular, it is used
16370 * on those chips to enable a PCI-X workaround.
16372 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16375 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16378 /* Unneeded, already done by tg3_get_invariants. */
16379 tg3_switch_clocks(tp
);
16382 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16383 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16386 /* It is best to perform DMA test with maximum write burst size
16387 * to expose the 5700/5701 write DMA bug.
16389 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16390 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16391 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16396 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16399 /* Send the buffer to the chip. */
16400 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
16402 dev_err(&tp
->pdev
->dev
,
16403 "%s: Buffer write failed. err = %d\n",
16409 /* validate data reached card RAM correctly. */
16410 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16412 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16413 if (le32_to_cpu(val
) != p
[i
]) {
16414 dev_err(&tp
->pdev
->dev
,
16415 "%s: Buffer corrupted on device! "
16416 "(%d != %d)\n", __func__
, val
, i
);
16417 /* ret = -ENODEV here? */
16422 /* Now read it back. */
16423 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
16425 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16426 "err = %d\n", __func__
, ret
);
16431 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16435 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16436 DMA_RWCTRL_WRITE_BNDRY_16
) {
16437 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16438 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16439 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16442 dev_err(&tp
->pdev
->dev
,
16443 "%s: Buffer corrupted on read back! "
16444 "(%d != %d)\n", __func__
, p
[i
], i
);
16450 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16456 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16457 DMA_RWCTRL_WRITE_BNDRY_16
) {
16458 /* DMA test passed without adjusting DMA boundary,
16459 * now look for chipsets that are known to expose the
16460 * DMA bug without failing the test.
16462 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16463 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16464 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16466 /* Safe to use the calculated DMA boundary. */
16467 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16470 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16474 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16479 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16481 if (tg3_flag(tp
, 57765_PLUS
)) {
16482 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16483 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16484 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16485 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16486 tp
->bufmgr_config
.mbuf_high_water
=
16487 DEFAULT_MB_HIGH_WATER_57765
;
16489 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16490 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16491 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16492 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16493 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16494 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16495 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16496 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16497 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16498 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16499 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16500 tp
->bufmgr_config
.mbuf_high_water
=
16501 DEFAULT_MB_HIGH_WATER_5705
;
16502 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16503 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16504 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16505 tp
->bufmgr_config
.mbuf_high_water
=
16506 DEFAULT_MB_HIGH_WATER_5906
;
16509 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16510 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16511 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16512 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16513 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16514 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16516 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16517 DEFAULT_MB_RDMA_LOW_WATER
;
16518 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16519 DEFAULT_MB_MACRX_LOW_WATER
;
16520 tp
->bufmgr_config
.mbuf_high_water
=
16521 DEFAULT_MB_HIGH_WATER
;
16523 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16524 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
16525 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16526 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
16527 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16528 DEFAULT_MB_HIGH_WATER_JUMBO
;
16531 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
16532 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
16535 static char *tg3_phy_string(struct tg3
*tp
)
16537 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
16538 case TG3_PHY_ID_BCM5400
: return "5400";
16539 case TG3_PHY_ID_BCM5401
: return "5401";
16540 case TG3_PHY_ID_BCM5411
: return "5411";
16541 case TG3_PHY_ID_BCM5701
: return "5701";
16542 case TG3_PHY_ID_BCM5703
: return "5703";
16543 case TG3_PHY_ID_BCM5704
: return "5704";
16544 case TG3_PHY_ID_BCM5705
: return "5705";
16545 case TG3_PHY_ID_BCM5750
: return "5750";
16546 case TG3_PHY_ID_BCM5752
: return "5752";
16547 case TG3_PHY_ID_BCM5714
: return "5714";
16548 case TG3_PHY_ID_BCM5780
: return "5780";
16549 case TG3_PHY_ID_BCM5755
: return "5755";
16550 case TG3_PHY_ID_BCM5787
: return "5787";
16551 case TG3_PHY_ID_BCM5784
: return "5784";
16552 case TG3_PHY_ID_BCM5756
: return "5722/5756";
16553 case TG3_PHY_ID_BCM5906
: return "5906";
16554 case TG3_PHY_ID_BCM5761
: return "5761";
16555 case TG3_PHY_ID_BCM5718C
: return "5718C";
16556 case TG3_PHY_ID_BCM5718S
: return "5718S";
16557 case TG3_PHY_ID_BCM57765
: return "57765";
16558 case TG3_PHY_ID_BCM5719C
: return "5719C";
16559 case TG3_PHY_ID_BCM5720C
: return "5720C";
16560 case TG3_PHY_ID_BCM5762
: return "5762C";
16561 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
16562 case 0: return "serdes";
16563 default: return "unknown";
16567 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
16569 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16570 strcpy(str
, "PCI Express");
16572 } else if (tg3_flag(tp
, PCIX_MODE
)) {
16573 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
16575 strcpy(str
, "PCIX:");
16577 if ((clock_ctrl
== 7) ||
16578 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
16579 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
16580 strcat(str
, "133MHz");
16581 else if (clock_ctrl
== 0)
16582 strcat(str
, "33MHz");
16583 else if (clock_ctrl
== 2)
16584 strcat(str
, "50MHz");
16585 else if (clock_ctrl
== 4)
16586 strcat(str
, "66MHz");
16587 else if (clock_ctrl
== 6)
16588 strcat(str
, "100MHz");
16590 strcpy(str
, "PCI:");
16591 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
16592 strcat(str
, "66MHz");
16594 strcat(str
, "33MHz");
16596 if (tg3_flag(tp
, PCI_32BIT
))
16597 strcat(str
, ":32-bit");
16599 strcat(str
, ":64-bit");
16603 static void tg3_init_coal(struct tg3
*tp
)
16605 struct ethtool_coalesce
*ec
= &tp
->coal
;
16607 memset(ec
, 0, sizeof(*ec
));
16608 ec
->cmd
= ETHTOOL_GCOALESCE
;
16609 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
16610 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
16611 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
16612 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
16613 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
16614 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
16615 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
16616 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
16617 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
16619 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
16620 HOSTCC_MODE_CLRTICK_TXBD
)) {
16621 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
16622 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
16623 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
16624 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
16627 if (tg3_flag(tp
, 5705_PLUS
)) {
16628 ec
->rx_coalesce_usecs_irq
= 0;
16629 ec
->tx_coalesce_usecs_irq
= 0;
16630 ec
->stats_block_coalesce_usecs
= 0;
16634 static int tg3_init_one(struct pci_dev
*pdev
,
16635 const struct pci_device_id
*ent
)
16637 struct net_device
*dev
;
16639 int i
, err
, pm_cap
;
16640 u32 sndmbx
, rcvmbx
, intmbx
;
16642 u64 dma_mask
, persist_dma_mask
;
16643 netdev_features_t features
= 0;
16645 printk_once(KERN_INFO
"%s\n", version
);
16647 err
= pci_enable_device(pdev
);
16649 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
16653 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
16655 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
16656 goto err_out_disable_pdev
;
16659 pci_set_master(pdev
);
16661 /* Find power-management capability. */
16662 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
16664 dev_err(&pdev
->dev
,
16665 "Cannot find Power Management capability, aborting\n");
16667 goto err_out_free_res
;
16670 err
= pci_set_power_state(pdev
, PCI_D0
);
16672 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
16673 goto err_out_free_res
;
16676 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
16679 goto err_out_power_down
;
16682 SET_NETDEV_DEV(dev
, &pdev
->dev
);
16684 tp
= netdev_priv(dev
);
16687 tp
->pm_cap
= pm_cap
;
16688 tp
->rx_mode
= TG3_DEF_RX_MODE
;
16689 tp
->tx_mode
= TG3_DEF_TX_MODE
;
16693 tp
->msg_enable
= tg3_debug
;
16695 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
16697 if (pdev_is_ssb_gige_core(pdev
)) {
16698 tg3_flag_set(tp
, IS_SSB_CORE
);
16699 if (ssb_gige_must_flush_posted_writes(pdev
))
16700 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
16701 if (ssb_gige_one_dma_at_once(pdev
))
16702 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
16703 if (ssb_gige_have_roboswitch(pdev
))
16704 tg3_flag_set(tp
, ROBOSWITCH
);
16705 if (ssb_gige_is_rgmii(pdev
))
16706 tg3_flag_set(tp
, RGMII_MODE
);
16709 /* The word/byte swap controls here control register access byte
16710 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16713 tp
->misc_host_ctrl
=
16714 MISC_HOST_CTRL_MASK_PCI_INT
|
16715 MISC_HOST_CTRL_WORD_SWAP
|
16716 MISC_HOST_CTRL_INDIR_ACCESS
|
16717 MISC_HOST_CTRL_PCISTATE_RW
;
16719 /* The NONFRM (non-frame) byte/word swap controls take effect
16720 * on descriptor entries, anything which isn't packet data.
16722 * The StrongARM chips on the board (one for tx, one for rx)
16723 * are running in big-endian mode.
16725 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
16726 GRC_MODE_WSWAP_NONFRM_DATA
);
16727 #ifdef __BIG_ENDIAN
16728 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
16730 spin_lock_init(&tp
->lock
);
16731 spin_lock_init(&tp
->indirect_lock
);
16732 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
16734 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
16736 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
16738 goto err_out_free_dev
;
16741 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16742 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
16743 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
16744 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
16745 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16746 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16747 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16748 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16749 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16750 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16751 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16752 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
16753 tg3_flag_set(tp
, ENABLE_APE
);
16754 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
16755 if (!tp
->aperegs
) {
16756 dev_err(&pdev
->dev
,
16757 "Cannot map APE registers, aborting\n");
16759 goto err_out_iounmap
;
16763 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
16764 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
16766 dev
->ethtool_ops
= &tg3_ethtool_ops
;
16767 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
16768 dev
->netdev_ops
= &tg3_netdev_ops
;
16769 dev
->irq
= pdev
->irq
;
16771 err
= tg3_get_invariants(tp
, ent
);
16773 dev_err(&pdev
->dev
,
16774 "Problem fetching invariants of chip, aborting\n");
16775 goto err_out_apeunmap
;
16778 /* The EPB bridge inside 5714, 5715, and 5780 and any
16779 * device behind the EPB cannot support DMA addresses > 40-bit.
16780 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16781 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16782 * do DMA address check in tg3_start_xmit().
16784 if (tg3_flag(tp
, IS_5788
))
16785 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
16786 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
16787 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
16788 #ifdef CONFIG_HIGHMEM
16789 dma_mask
= DMA_BIT_MASK(64);
16792 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
16794 /* Configure DMA attributes. */
16795 if (dma_mask
> DMA_BIT_MASK(32)) {
16796 err
= pci_set_dma_mask(pdev
, dma_mask
);
16798 features
|= NETIF_F_HIGHDMA
;
16799 err
= pci_set_consistent_dma_mask(pdev
,
16802 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
16803 "DMA for consistent allocations\n");
16804 goto err_out_apeunmap
;
16808 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
16809 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
16811 dev_err(&pdev
->dev
,
16812 "No usable DMA configuration, aborting\n");
16813 goto err_out_apeunmap
;
16817 tg3_init_bufmgr_config(tp
);
16819 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
16821 /* 5700 B0 chips do not support checksumming correctly due
16822 * to hardware bugs.
16824 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
16825 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
16827 if (tg3_flag(tp
, 5755_PLUS
))
16828 features
|= NETIF_F_IPV6_CSUM
;
16831 /* TSO is on by default on chips that support hardware TSO.
16832 * Firmware TSO on older chips gives lower performance, so it
16833 * is off by default, but can be enabled using ethtool.
16835 if ((tg3_flag(tp
, HW_TSO_1
) ||
16836 tg3_flag(tp
, HW_TSO_2
) ||
16837 tg3_flag(tp
, HW_TSO_3
)) &&
16838 (features
& NETIF_F_IP_CSUM
))
16839 features
|= NETIF_F_TSO
;
16840 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
16841 if (features
& NETIF_F_IPV6_CSUM
)
16842 features
|= NETIF_F_TSO6
;
16843 if (tg3_flag(tp
, HW_TSO_3
) ||
16844 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16845 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16846 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
16847 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16848 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16849 features
|= NETIF_F_TSO_ECN
;
16852 dev
->features
|= features
;
16853 dev
->vlan_features
|= features
;
16856 * Add loopback capability only for a subset of devices that support
16857 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16858 * loopback for the remaining devices.
16860 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
16861 !tg3_flag(tp
, CPMU_PRESENT
))
16862 /* Add the loopback capability */
16863 features
|= NETIF_F_LOOPBACK
;
16865 dev
->hw_features
|= features
;
16867 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
16868 !tg3_flag(tp
, TSO_CAPABLE
) &&
16869 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
16870 tg3_flag_set(tp
, MAX_RXPEND_64
);
16871 tp
->rx_pending
= 63;
16874 err
= tg3_get_device_address(tp
);
16876 dev_err(&pdev
->dev
,
16877 "Could not obtain valid ethernet address, aborting\n");
16878 goto err_out_apeunmap
;
16882 * Reset chip in case UNDI or EFI driver did not shutdown
16883 * DMA self test will enable WDMAC and we'll see (spurious)
16884 * pending DMA on the PCI bus at that point.
16886 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
16887 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
16888 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
16889 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16892 err
= tg3_test_dma(tp
);
16894 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
16895 goto err_out_apeunmap
;
16898 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
16899 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
16900 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
16901 for (i
= 0; i
< tp
->irq_max
; i
++) {
16902 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
16905 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
16907 tnapi
->int_mbox
= intmbx
;
16913 tnapi
->consmbox
= rcvmbx
;
16914 tnapi
->prodmbox
= sndmbx
;
16917 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
16919 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
16921 if (!tg3_flag(tp
, SUPPORT_MSIX
))
16925 * If we support MSIX, we'll be using RSS. If we're using
16926 * RSS, the first vector only handles link interrupts and the
16927 * remaining vectors handle rx and tx interrupts. Reuse the
16928 * mailbox values for the next iteration. The values we setup
16929 * above are still useful for the single vectored mode.
16944 pci_set_drvdata(pdev
, dev
);
16946 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16947 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16948 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16949 tg3_flag_set(tp
, PTP_CAPABLE
);
16951 if (tg3_flag(tp
, 5717_PLUS
)) {
16952 /* Resume a low-power mode */
16953 tg3_frob_aux_power(tp
, false);
16956 tg3_timer_init(tp
);
16958 tg3_carrier_off(tp
);
16960 err
= register_netdev(dev
);
16962 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
16963 goto err_out_apeunmap
;
16966 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16967 tp
->board_part_number
,
16968 tg3_chip_rev_id(tp
),
16969 tg3_bus_string(tp
, str
),
16972 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
16973 struct phy_device
*phydev
;
16974 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
16976 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16977 phydev
->drv
->name
, dev_name(&phydev
->dev
));
16981 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
16982 ethtype
= "10/100Base-TX";
16983 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
16984 ethtype
= "1000Base-SX";
16986 ethtype
= "10/100/1000Base-T";
16988 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
16989 "(WireSpeed[%d], EEE[%d])\n",
16990 tg3_phy_string(tp
), ethtype
,
16991 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
16992 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
16995 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16996 (dev
->features
& NETIF_F_RXCSUM
) != 0,
16997 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
16998 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
16999 tg3_flag(tp
, ENABLE_ASF
) != 0,
17000 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17001 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17003 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17004 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17006 pci_save_state(pdev
);
17012 iounmap(tp
->aperegs
);
17013 tp
->aperegs
= NULL
;
17025 err_out_power_down
:
17026 pci_set_power_state(pdev
, PCI_D3hot
);
17029 pci_release_regions(pdev
);
17031 err_out_disable_pdev
:
17032 pci_disable_device(pdev
);
17033 pci_set_drvdata(pdev
, NULL
);
17037 static void tg3_remove_one(struct pci_dev
*pdev
)
17039 struct net_device
*dev
= pci_get_drvdata(pdev
);
17042 struct tg3
*tp
= netdev_priv(dev
);
17044 release_firmware(tp
->fw
);
17046 tg3_reset_task_cancel(tp
);
17048 if (tg3_flag(tp
, USE_PHYLIB
)) {
17053 unregister_netdev(dev
);
17055 iounmap(tp
->aperegs
);
17056 tp
->aperegs
= NULL
;
17063 pci_release_regions(pdev
);
17064 pci_disable_device(pdev
);
17065 pci_set_drvdata(pdev
, NULL
);
17069 #ifdef CONFIG_PM_SLEEP
17070 static int tg3_suspend(struct device
*device
)
17072 struct pci_dev
*pdev
= to_pci_dev(device
);
17073 struct net_device
*dev
= pci_get_drvdata(pdev
);
17074 struct tg3
*tp
= netdev_priv(dev
);
17077 if (!netif_running(dev
))
17080 tg3_reset_task_cancel(tp
);
17082 tg3_netif_stop(tp
);
17084 tg3_timer_stop(tp
);
17086 tg3_full_lock(tp
, 1);
17087 tg3_disable_ints(tp
);
17088 tg3_full_unlock(tp
);
17090 netif_device_detach(dev
);
17092 tg3_full_lock(tp
, 0);
17093 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17094 tg3_flag_clear(tp
, INIT_COMPLETE
);
17095 tg3_full_unlock(tp
);
17097 err
= tg3_power_down_prepare(tp
);
17101 tg3_full_lock(tp
, 0);
17103 tg3_flag_set(tp
, INIT_COMPLETE
);
17104 err2
= tg3_restart_hw(tp
, 1);
17108 tg3_timer_start(tp
);
17110 netif_device_attach(dev
);
17111 tg3_netif_start(tp
);
17114 tg3_full_unlock(tp
);
17123 static int tg3_resume(struct device
*device
)
17125 struct pci_dev
*pdev
= to_pci_dev(device
);
17126 struct net_device
*dev
= pci_get_drvdata(pdev
);
17127 struct tg3
*tp
= netdev_priv(dev
);
17130 if (!netif_running(dev
))
17133 netif_device_attach(dev
);
17135 tg3_full_lock(tp
, 0);
17137 tg3_flag_set(tp
, INIT_COMPLETE
);
17138 err
= tg3_restart_hw(tp
, 1);
17142 tg3_timer_start(tp
);
17144 tg3_netif_start(tp
);
17147 tg3_full_unlock(tp
);
17155 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17156 #define TG3_PM_OPS (&tg3_pm_ops)
17160 #define TG3_PM_OPS NULL
17162 #endif /* CONFIG_PM_SLEEP */
17165 * tg3_io_error_detected - called when PCI error is detected
17166 * @pdev: Pointer to PCI device
17167 * @state: The current pci connection state
17169 * This function is called after a PCI bus error affecting
17170 * this device has been detected.
17172 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17173 pci_channel_state_t state
)
17175 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17176 struct tg3
*tp
= netdev_priv(netdev
);
17177 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17179 netdev_info(netdev
, "PCI I/O error detected\n");
17183 if (!netif_running(netdev
))
17188 tg3_netif_stop(tp
);
17190 tg3_timer_stop(tp
);
17192 /* Want to make sure that the reset task doesn't run */
17193 tg3_reset_task_cancel(tp
);
17195 netif_device_detach(netdev
);
17197 /* Clean up software state, even if MMIO is blocked */
17198 tg3_full_lock(tp
, 0);
17199 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17200 tg3_full_unlock(tp
);
17203 if (state
== pci_channel_io_perm_failure
)
17204 err
= PCI_ERS_RESULT_DISCONNECT
;
17206 pci_disable_device(pdev
);
17214 * tg3_io_slot_reset - called after the pci bus has been reset.
17215 * @pdev: Pointer to PCI device
17217 * Restart the card from scratch, as if from a cold-boot.
17218 * At this point, the card has exprienced a hard reset,
17219 * followed by fixups by BIOS, and has its config space
17220 * set up identically to what it was at cold boot.
17222 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17224 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17225 struct tg3
*tp
= netdev_priv(netdev
);
17226 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17231 if (pci_enable_device(pdev
)) {
17232 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17236 pci_set_master(pdev
);
17237 pci_restore_state(pdev
);
17238 pci_save_state(pdev
);
17240 if (!netif_running(netdev
)) {
17241 rc
= PCI_ERS_RESULT_RECOVERED
;
17245 err
= tg3_power_up(tp
);
17249 rc
= PCI_ERS_RESULT_RECOVERED
;
17258 * tg3_io_resume - called when traffic can start flowing again.
17259 * @pdev: Pointer to PCI device
17261 * This callback is called when the error recovery driver tells
17262 * us that its OK to resume normal operation.
17264 static void tg3_io_resume(struct pci_dev
*pdev
)
17266 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17267 struct tg3
*tp
= netdev_priv(netdev
);
17272 if (!netif_running(netdev
))
17275 tg3_full_lock(tp
, 0);
17276 tg3_flag_set(tp
, INIT_COMPLETE
);
17277 err
= tg3_restart_hw(tp
, 1);
17279 tg3_full_unlock(tp
);
17280 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17284 netif_device_attach(netdev
);
17286 tg3_timer_start(tp
);
17288 tg3_netif_start(tp
);
17290 tg3_full_unlock(tp
);
17298 static const struct pci_error_handlers tg3_err_handler
= {
17299 .error_detected
= tg3_io_error_detected
,
17300 .slot_reset
= tg3_io_slot_reset
,
17301 .resume
= tg3_io_resume
17304 static struct pci_driver tg3_driver
= {
17305 .name
= DRV_MODULE_NAME
,
17306 .id_table
= tg3_pci_tbl
,
17307 .probe
= tg3_init_one
,
17308 .remove
= tg3_remove_one
,
17309 .err_handler
= &tg3_err_handler
,
17310 .driver
.pm
= TG3_PM_OPS
,
17313 static int __init
tg3_init(void)
17315 return pci_register_driver(&tg3_driver
);
17318 static void __exit
tg3_cleanup(void)
17320 pci_unregister_driver(&tg3_driver
);
17323 module_init(tg3_init
);
17324 module_exit(tg3_cleanup
);