]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/realtek/r8169.c
e7ff886e8047ac3d3a926e8c0384b3ec7568068b
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / realtek / r8169.c
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
51
52 #ifdef RTL8169_DEBUG
53 #define assert(expr) \
54 if (!(expr)) { \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
57 }
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60 #else
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
64
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
78
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83
84 #define R8169_REGS_SIZE 256
85 #define R8169_NAPI_WEIGHT 64
86 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
89 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
91
92 #define RTL8169_TX_TIMEOUT (6*HZ)
93 #define RTL8169_PHY_TIMEOUT (10*HZ)
94
95 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97 #define RTL_EEPROM_SIG_ADDR 0x0000
98
99 /* write/read MMIO register */
100 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
102 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
103 #define RTL_R8(reg) readb (ioaddr + (reg))
104 #define RTL_R16(reg) readw (ioaddr + (reg))
105 #define RTL_R32(reg) readl (ioaddr + (reg))
106
107 enum mac_version {
108 RTL_GIGA_MAC_VER_01 = 0,
109 RTL_GIGA_MAC_VER_02,
110 RTL_GIGA_MAC_VER_03,
111 RTL_GIGA_MAC_VER_04,
112 RTL_GIGA_MAC_VER_05,
113 RTL_GIGA_MAC_VER_06,
114 RTL_GIGA_MAC_VER_07,
115 RTL_GIGA_MAC_VER_08,
116 RTL_GIGA_MAC_VER_09,
117 RTL_GIGA_MAC_VER_10,
118 RTL_GIGA_MAC_VER_11,
119 RTL_GIGA_MAC_VER_12,
120 RTL_GIGA_MAC_VER_13,
121 RTL_GIGA_MAC_VER_14,
122 RTL_GIGA_MAC_VER_15,
123 RTL_GIGA_MAC_VER_16,
124 RTL_GIGA_MAC_VER_17,
125 RTL_GIGA_MAC_VER_18,
126 RTL_GIGA_MAC_VER_19,
127 RTL_GIGA_MAC_VER_20,
128 RTL_GIGA_MAC_VER_21,
129 RTL_GIGA_MAC_VER_22,
130 RTL_GIGA_MAC_VER_23,
131 RTL_GIGA_MAC_VER_24,
132 RTL_GIGA_MAC_VER_25,
133 RTL_GIGA_MAC_VER_26,
134 RTL_GIGA_MAC_VER_27,
135 RTL_GIGA_MAC_VER_28,
136 RTL_GIGA_MAC_VER_29,
137 RTL_GIGA_MAC_VER_30,
138 RTL_GIGA_MAC_VER_31,
139 RTL_GIGA_MAC_VER_32,
140 RTL_GIGA_MAC_VER_33,
141 RTL_GIGA_MAC_VER_34,
142 RTL_GIGA_MAC_VER_35,
143 RTL_GIGA_MAC_VER_36,
144 RTL_GIGA_MAC_VER_37,
145 RTL_GIGA_MAC_VER_38,
146 RTL_GIGA_MAC_VER_39,
147 RTL_GIGA_MAC_VER_40,
148 RTL_GIGA_MAC_VER_41,
149 RTL_GIGA_MAC_NONE = 0xff,
150 };
151
152 enum rtl_tx_desc_version {
153 RTL_TD_0 = 0,
154 RTL_TD_1 = 1,
155 };
156
157 #define JUMBO_1K ETH_DATA_LEN
158 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
159 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
160 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
161 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
162
163 #define _R(NAME,TD,FW,SZ,B) { \
164 .name = NAME, \
165 .txd_version = TD, \
166 .fw_name = FW, \
167 .jumbo_max = SZ, \
168 .jumbo_tx_csum = B \
169 }
170
171 static const struct {
172 const char *name;
173 enum rtl_tx_desc_version txd_version;
174 const char *fw_name;
175 u16 jumbo_max;
176 bool jumbo_tx_csum;
177 } rtl_chip_infos[] = {
178 /* PCI devices. */
179 [RTL_GIGA_MAC_VER_01] =
180 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_02] =
182 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_03] =
184 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
185 [RTL_GIGA_MAC_VER_04] =
186 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
187 [RTL_GIGA_MAC_VER_05] =
188 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
189 [RTL_GIGA_MAC_VER_06] =
190 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
191 /* PCI-E devices. */
192 [RTL_GIGA_MAC_VER_07] =
193 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_08] =
195 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
196 [RTL_GIGA_MAC_VER_09] =
197 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
198 [RTL_GIGA_MAC_VER_10] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_11] =
201 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
202 [RTL_GIGA_MAC_VER_12] =
203 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
204 [RTL_GIGA_MAC_VER_13] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_14] =
207 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
208 [RTL_GIGA_MAC_VER_15] =
209 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
210 [RTL_GIGA_MAC_VER_16] =
211 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
212 [RTL_GIGA_MAC_VER_17] =
213 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
214 [RTL_GIGA_MAC_VER_18] =
215 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_19] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_20] =
219 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_21] =
221 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_22] =
223 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
224 [RTL_GIGA_MAC_VER_23] =
225 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
226 [RTL_GIGA_MAC_VER_24] =
227 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
228 [RTL_GIGA_MAC_VER_25] =
229 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
230 JUMBO_9K, false),
231 [RTL_GIGA_MAC_VER_26] =
232 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
233 JUMBO_9K, false),
234 [RTL_GIGA_MAC_VER_27] =
235 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
236 [RTL_GIGA_MAC_VER_28] =
237 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
238 [RTL_GIGA_MAC_VER_29] =
239 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
240 JUMBO_1K, true),
241 [RTL_GIGA_MAC_VER_30] =
242 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
243 JUMBO_1K, true),
244 [RTL_GIGA_MAC_VER_31] =
245 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_32] =
247 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_33] =
250 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_34] =
253 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_35] =
256 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
257 JUMBO_9K, false),
258 [RTL_GIGA_MAC_VER_36] =
259 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
260 JUMBO_9K, false),
261 [RTL_GIGA_MAC_VER_37] =
262 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
263 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_38] =
265 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_39] =
268 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
269 JUMBO_1K, true),
270 [RTL_GIGA_MAC_VER_40] =
271 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
272 JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_41] =
274 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
275 };
276 #undef _R
277
278 enum cfg_version {
279 RTL_CFG_0 = 0x00,
280 RTL_CFG_1,
281 RTL_CFG_2
282 };
283
284 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
285 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
286 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_DLINK, 0x4300,
291 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
292 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
293 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
294 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
295 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
296 { PCI_VENDOR_ID_LINKSYS, 0x1032,
297 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
298 { 0x0001, 0x8168,
299 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
300 {0,},
301 };
302
303 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
304
305 static int rx_buf_sz = 16383;
306 static int use_dac;
307 static struct {
308 u32 msg_enable;
309 } debug = { -1 };
310
311 enum rtl_registers {
312 MAC0 = 0, /* Ethernet hardware address. */
313 MAC4 = 4,
314 MAR0 = 8, /* Multicast filter. */
315 CounterAddrLow = 0x10,
316 CounterAddrHigh = 0x14,
317 TxDescStartAddrLow = 0x20,
318 TxDescStartAddrHigh = 0x24,
319 TxHDescStartAddrLow = 0x28,
320 TxHDescStartAddrHigh = 0x2c,
321 FLASH = 0x30,
322 ERSR = 0x36,
323 ChipCmd = 0x37,
324 TxPoll = 0x38,
325 IntrMask = 0x3c,
326 IntrStatus = 0x3e,
327
328 TxConfig = 0x40,
329 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
330 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
331
332 RxConfig = 0x44,
333 #define RX128_INT_EN (1 << 15) /* 8111c and later */
334 #define RX_MULTI_EN (1 << 14) /* 8111c only */
335 #define RXCFG_FIFO_SHIFT 13
336 /* No threshold before first PCI xfer */
337 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
338 #define RXCFG_DMA_SHIFT 8
339 /* Unlimited maximum PCI burst. */
340 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341
342 RxMissed = 0x4c,
343 Cfg9346 = 0x50,
344 Config0 = 0x51,
345 Config1 = 0x52,
346 Config2 = 0x53,
347 #define PME_SIGNAL (1 << 5) /* 8168c and later */
348
349 Config3 = 0x54,
350 Config4 = 0x55,
351 Config5 = 0x56,
352 MultiIntr = 0x5c,
353 PHYAR = 0x60,
354 PHYstatus = 0x6c,
355 RxMaxSize = 0xda,
356 CPlusCmd = 0xe0,
357 IntrMitigate = 0xe2,
358 RxDescAddrLow = 0xe4,
359 RxDescAddrHigh = 0xe8,
360 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
361
362 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
363
364 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
365
366 #define TxPacketMax (8064 >> 7)
367 #define EarlySize 0x27
368
369 FuncEvent = 0xf0,
370 FuncEventMask = 0xf4,
371 FuncPresetState = 0xf8,
372 FuncForceEvent = 0xfc,
373 };
374
375 enum rtl8110_registers {
376 TBICSR = 0x64,
377 TBI_ANAR = 0x68,
378 TBI_LPAR = 0x6a,
379 };
380
381 enum rtl8168_8101_registers {
382 CSIDR = 0x64,
383 CSIAR = 0x68,
384 #define CSIAR_FLAG 0x80000000
385 #define CSIAR_WRITE_CMD 0x80000000
386 #define CSIAR_BYTE_ENABLE 0x0f
387 #define CSIAR_BYTE_ENABLE_SHIFT 12
388 #define CSIAR_ADDR_MASK 0x0fff
389 #define CSIAR_FUNC_CARD 0x00000000
390 #define CSIAR_FUNC_SDIO 0x00010000
391 #define CSIAR_FUNC_NIC 0x00020000
392 PMCH = 0x6f,
393 EPHYAR = 0x80,
394 #define EPHYAR_FLAG 0x80000000
395 #define EPHYAR_WRITE_CMD 0x80000000
396 #define EPHYAR_REG_MASK 0x1f
397 #define EPHYAR_REG_SHIFT 16
398 #define EPHYAR_DATA_MASK 0xffff
399 DLLPR = 0xd0,
400 #define PFM_EN (1 << 6)
401 DBG_REG = 0xd1,
402 #define FIX_NAK_1 (1 << 4)
403 #define FIX_NAK_2 (1 << 3)
404 TWSI = 0xd2,
405 MCU = 0xd3,
406 #define NOW_IS_OOB (1 << 7)
407 #define TX_EMPTY (1 << 5)
408 #define RX_EMPTY (1 << 4)
409 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
410 #define EN_NDP (1 << 3)
411 #define EN_OOB_RESET (1 << 2)
412 #define LINK_LIST_RDY (1 << 1)
413 EFUSEAR = 0xdc,
414 #define EFUSEAR_FLAG 0x80000000
415 #define EFUSEAR_WRITE_CMD 0x80000000
416 #define EFUSEAR_READ_CMD 0x00000000
417 #define EFUSEAR_REG_MASK 0x03ff
418 #define EFUSEAR_REG_SHIFT 8
419 #define EFUSEAR_DATA_MASK 0xff
420 };
421
422 enum rtl8168_registers {
423 LED_FREQ = 0x1a,
424 EEE_LED = 0x1b,
425 ERIDR = 0x70,
426 ERIAR = 0x74,
427 #define ERIAR_FLAG 0x80000000
428 #define ERIAR_WRITE_CMD 0x80000000
429 #define ERIAR_READ_CMD 0x00000000
430 #define ERIAR_ADDR_BYTE_ALIGN 4
431 #define ERIAR_TYPE_SHIFT 16
432 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
433 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
434 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
435 #define ERIAR_MASK_SHIFT 12
436 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
437 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
438 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
439 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
440 EPHY_RXER_NUM = 0x7c,
441 OCPDR = 0xb0, /* OCP GPHY access */
442 #define OCPDR_WRITE_CMD 0x80000000
443 #define OCPDR_READ_CMD 0x00000000
444 #define OCPDR_REG_MASK 0x7f
445 #define OCPDR_GPHY_REG_SHIFT 16
446 #define OCPDR_DATA_MASK 0xffff
447 OCPAR = 0xb4,
448 #define OCPAR_FLAG 0x80000000
449 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
450 #define OCPAR_GPHY_READ_CMD 0x0000f060
451 GPHY_OCP = 0xb8,
452 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
453 MISC = 0xf0, /* 8168e only. */
454 #define TXPLA_RST (1 << 29)
455 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
456 #define PWM_EN (1 << 22)
457 #define RXDV_GATED_EN (1 << 19)
458 #define EARLY_TALLY_EN (1 << 16)
459 };
460
461 enum rtl_register_content {
462 /* InterruptStatusBits */
463 SYSErr = 0x8000,
464 PCSTimeout = 0x4000,
465 SWInt = 0x0100,
466 TxDescUnavail = 0x0080,
467 RxFIFOOver = 0x0040,
468 LinkChg = 0x0020,
469 RxOverflow = 0x0010,
470 TxErr = 0x0008,
471 TxOK = 0x0004,
472 RxErr = 0x0002,
473 RxOK = 0x0001,
474
475 /* RxStatusDesc */
476 RxBOVF = (1 << 24),
477 RxFOVF = (1 << 23),
478 RxRWT = (1 << 22),
479 RxRES = (1 << 21),
480 RxRUNT = (1 << 20),
481 RxCRC = (1 << 19),
482
483 /* ChipCmdBits */
484 StopReq = 0x80,
485 CmdReset = 0x10,
486 CmdRxEnb = 0x08,
487 CmdTxEnb = 0x04,
488 RxBufEmpty = 0x01,
489
490 /* TXPoll register p.5 */
491 HPQ = 0x80, /* Poll cmd on the high prio queue */
492 NPQ = 0x40, /* Poll cmd on the low prio queue */
493 FSWInt = 0x01, /* Forced software interrupt */
494
495 /* Cfg9346Bits */
496 Cfg9346_Lock = 0x00,
497 Cfg9346_Unlock = 0xc0,
498
499 /* rx_mode_bits */
500 AcceptErr = 0x20,
501 AcceptRunt = 0x10,
502 AcceptBroadcast = 0x08,
503 AcceptMulticast = 0x04,
504 AcceptMyPhys = 0x02,
505 AcceptAllPhys = 0x01,
506 #define RX_CONFIG_ACCEPT_MASK 0x3f
507
508 /* TxConfigBits */
509 TxInterFrameGapShift = 24,
510 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
511
512 /* Config1 register p.24 */
513 LEDS1 = (1 << 7),
514 LEDS0 = (1 << 6),
515 Speed_down = (1 << 4),
516 MEMMAP = (1 << 3),
517 IOMAP = (1 << 2),
518 VPD = (1 << 1),
519 PMEnable = (1 << 0), /* Power Management Enable */
520
521 /* Config2 register p. 25 */
522 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
523 PCI_Clock_66MHz = 0x01,
524 PCI_Clock_33MHz = 0x00,
525
526 /* Config3 register p.25 */
527 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
528 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
529 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
530 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
531
532 /* Config4 register */
533 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
534
535 /* Config5 register p.27 */
536 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
537 MWF = (1 << 5), /* Accept Multicast wakeup frame */
538 UWF = (1 << 4), /* Accept Unicast wakeup frame */
539 Spi_en = (1 << 3),
540 LanWake = (1 << 1), /* LanWake enable/disable */
541 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
542
543 /* TBICSR p.28 */
544 TBIReset = 0x80000000,
545 TBILoopback = 0x40000000,
546 TBINwEnable = 0x20000000,
547 TBINwRestart = 0x10000000,
548 TBILinkOk = 0x02000000,
549 TBINwComplete = 0x01000000,
550
551 /* CPlusCmd p.31 */
552 EnableBist = (1 << 15), // 8168 8101
553 Mac_dbgo_oe = (1 << 14), // 8168 8101
554 Normal_mode = (1 << 13), // unused
555 Force_half_dup = (1 << 12), // 8168 8101
556 Force_rxflow_en = (1 << 11), // 8168 8101
557 Force_txflow_en = (1 << 10), // 8168 8101
558 Cxpl_dbg_sel = (1 << 9), // 8168 8101
559 ASF = (1 << 8), // 8168 8101
560 PktCntrDisable = (1 << 7), // 8168 8101
561 Mac_dbgo_sel = 0x001c, // 8168
562 RxVlan = (1 << 6),
563 RxChkSum = (1 << 5),
564 PCIDAC = (1 << 4),
565 PCIMulRW = (1 << 3),
566 INTT_0 = 0x0000, // 8168
567 INTT_1 = 0x0001, // 8168
568 INTT_2 = 0x0002, // 8168
569 INTT_3 = 0x0003, // 8168
570
571 /* rtl8169_PHYstatus */
572 TBI_Enable = 0x80,
573 TxFlowCtrl = 0x40,
574 RxFlowCtrl = 0x20,
575 _1000bpsF = 0x10,
576 _100bps = 0x08,
577 _10bps = 0x04,
578 LinkStatus = 0x02,
579 FullDup = 0x01,
580
581 /* _TBICSRBit */
582 TBILinkOK = 0x02000000,
583
584 /* DumpCounterCommand */
585 CounterDump = 0x8,
586 };
587
588 enum rtl_desc_bit {
589 /* First doubleword. */
590 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
591 RingEnd = (1 << 30), /* End of descriptor ring */
592 FirstFrag = (1 << 29), /* First segment of a packet */
593 LastFrag = (1 << 28), /* Final segment of a packet */
594 };
595
596 /* Generic case. */
597 enum rtl_tx_desc_bit {
598 /* First doubleword. */
599 TD_LSO = (1 << 27), /* Large Send Offload */
600 #define TD_MSS_MAX 0x07ffu /* MSS value */
601
602 /* Second doubleword. */
603 TxVlanTag = (1 << 17), /* Add VLAN tag */
604 };
605
606 /* 8169, 8168b and 810x except 8102e. */
607 enum rtl_tx_desc_bit_0 {
608 /* First doubleword. */
609 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
610 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
611 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
612 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
613 };
614
615 /* 8102e, 8168c and beyond. */
616 enum rtl_tx_desc_bit_1 {
617 /* Second doubleword. */
618 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
619 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
620 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
621 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
622 };
623
624 static const struct rtl_tx_desc_info {
625 struct {
626 u32 udp;
627 u32 tcp;
628 } checksum;
629 u16 mss_shift;
630 u16 opts_offset;
631 } tx_desc_info [] = {
632 [RTL_TD_0] = {
633 .checksum = {
634 .udp = TD0_IP_CS | TD0_UDP_CS,
635 .tcp = TD0_IP_CS | TD0_TCP_CS
636 },
637 .mss_shift = TD0_MSS_SHIFT,
638 .opts_offset = 0
639 },
640 [RTL_TD_1] = {
641 .checksum = {
642 .udp = TD1_IP_CS | TD1_UDP_CS,
643 .tcp = TD1_IP_CS | TD1_TCP_CS
644 },
645 .mss_shift = TD1_MSS_SHIFT,
646 .opts_offset = 1
647 }
648 };
649
650 enum rtl_rx_desc_bit {
651 /* Rx private */
652 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
653 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
654
655 #define RxProtoUDP (PID1)
656 #define RxProtoTCP (PID0)
657 #define RxProtoIP (PID1 | PID0)
658 #define RxProtoMask RxProtoIP
659
660 IPFail = (1 << 16), /* IP checksum failed */
661 UDPFail = (1 << 15), /* UDP/IP checksum failed */
662 TCPFail = (1 << 14), /* TCP/IP checksum failed */
663 RxVlanTag = (1 << 16), /* VLAN tag available */
664 };
665
666 #define RsvdMask 0x3fffc000
667
668 struct TxDesc {
669 __le32 opts1;
670 __le32 opts2;
671 __le64 addr;
672 };
673
674 struct RxDesc {
675 __le32 opts1;
676 __le32 opts2;
677 __le64 addr;
678 };
679
680 struct ring_info {
681 struct sk_buff *skb;
682 u32 len;
683 u8 __pad[sizeof(void *) - sizeof(u32)];
684 };
685
686 enum features {
687 RTL_FEATURE_WOL = (1 << 0),
688 RTL_FEATURE_MSI = (1 << 1),
689 RTL_FEATURE_GMII = (1 << 2),
690 };
691
692 struct rtl8169_counters {
693 __le64 tx_packets;
694 __le64 rx_packets;
695 __le64 tx_errors;
696 __le32 rx_errors;
697 __le16 rx_missed;
698 __le16 align_errors;
699 __le32 tx_one_collision;
700 __le32 tx_multi_collision;
701 __le64 rx_unicast;
702 __le64 rx_broadcast;
703 __le32 rx_multicast;
704 __le16 tx_aborted;
705 __le16 tx_underun;
706 };
707
708 enum rtl_flag {
709 RTL_FLAG_TASK_ENABLED,
710 RTL_FLAG_TASK_SLOW_PENDING,
711 RTL_FLAG_TASK_RESET_PENDING,
712 RTL_FLAG_TASK_PHY_PENDING,
713 RTL_FLAG_MAX
714 };
715
716 struct rtl8169_stats {
717 u64 packets;
718 u64 bytes;
719 struct u64_stats_sync syncp;
720 };
721
722 struct rtl8169_private {
723 void __iomem *mmio_addr; /* memory map physical address */
724 struct pci_dev *pci_dev;
725 struct net_device *dev;
726 struct napi_struct napi;
727 u32 msg_enable;
728 u16 txd_version;
729 u16 mac_version;
730 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
731 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
732 u32 dirty_rx;
733 u32 dirty_tx;
734 struct rtl8169_stats rx_stats;
735 struct rtl8169_stats tx_stats;
736 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
737 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
738 dma_addr_t TxPhyAddr;
739 dma_addr_t RxPhyAddr;
740 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
741 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
742 struct timer_list timer;
743 u16 cp_cmd;
744
745 u16 event_slow;
746
747 struct mdio_ops {
748 void (*write)(struct rtl8169_private *, int, int);
749 int (*read)(struct rtl8169_private *, int);
750 } mdio_ops;
751
752 struct pll_power_ops {
753 void (*down)(struct rtl8169_private *);
754 void (*up)(struct rtl8169_private *);
755 } pll_power_ops;
756
757 struct jumbo_ops {
758 void (*enable)(struct rtl8169_private *);
759 void (*disable)(struct rtl8169_private *);
760 } jumbo_ops;
761
762 struct csi_ops {
763 void (*write)(struct rtl8169_private *, int, int);
764 u32 (*read)(struct rtl8169_private *, int);
765 } csi_ops;
766
767 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
768 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
769 void (*phy_reset_enable)(struct rtl8169_private *tp);
770 void (*hw_start)(struct net_device *);
771 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
772 unsigned int (*link_ok)(void __iomem *);
773 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
774
775 struct {
776 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
777 struct mutex mutex;
778 struct work_struct work;
779 } wk;
780
781 unsigned features;
782
783 struct mii_if_info mii;
784 struct rtl8169_counters counters;
785 u32 saved_wolopts;
786 u32 opts1_mask;
787
788 struct rtl_fw {
789 const struct firmware *fw;
790
791 #define RTL_VER_SIZE 32
792
793 char version[RTL_VER_SIZE];
794
795 struct rtl_fw_phy_action {
796 __le32 *code;
797 size_t size;
798 } phy_action;
799 } *rtl_fw;
800 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
801
802 u32 ocp_base;
803 };
804
805 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
806 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
807 module_param(use_dac, int, 0);
808 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
809 module_param_named(debug, debug.msg_enable, int, 0);
810 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
811 MODULE_LICENSE("GPL");
812 MODULE_VERSION(RTL8169_VERSION);
813 MODULE_FIRMWARE(FIRMWARE_8168D_1);
814 MODULE_FIRMWARE(FIRMWARE_8168D_2);
815 MODULE_FIRMWARE(FIRMWARE_8168E_1);
816 MODULE_FIRMWARE(FIRMWARE_8168E_2);
817 MODULE_FIRMWARE(FIRMWARE_8168E_3);
818 MODULE_FIRMWARE(FIRMWARE_8105E_1);
819 MODULE_FIRMWARE(FIRMWARE_8168F_1);
820 MODULE_FIRMWARE(FIRMWARE_8168F_2);
821 MODULE_FIRMWARE(FIRMWARE_8402_1);
822 MODULE_FIRMWARE(FIRMWARE_8411_1);
823 MODULE_FIRMWARE(FIRMWARE_8106E_1);
824 MODULE_FIRMWARE(FIRMWARE_8168G_1);
825
826 static void rtl_lock_work(struct rtl8169_private *tp)
827 {
828 mutex_lock(&tp->wk.mutex);
829 }
830
831 static void rtl_unlock_work(struct rtl8169_private *tp)
832 {
833 mutex_unlock(&tp->wk.mutex);
834 }
835
836 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
837 {
838 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
839 PCI_EXP_DEVCTL_READRQ, force);
840 }
841
842 struct rtl_cond {
843 bool (*check)(struct rtl8169_private *);
844 const char *msg;
845 };
846
847 static void rtl_udelay(unsigned int d)
848 {
849 udelay(d);
850 }
851
852 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
853 void (*delay)(unsigned int), unsigned int d, int n,
854 bool high)
855 {
856 int i;
857
858 for (i = 0; i < n; i++) {
859 delay(d);
860 if (c->check(tp) == high)
861 return true;
862 }
863 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
864 c->msg, !high, n, d);
865 return false;
866 }
867
868 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
869 const struct rtl_cond *c,
870 unsigned int d, int n)
871 {
872 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
873 }
874
875 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
876 const struct rtl_cond *c,
877 unsigned int d, int n)
878 {
879 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
880 }
881
882 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
883 const struct rtl_cond *c,
884 unsigned int d, int n)
885 {
886 return rtl_loop_wait(tp, c, msleep, d, n, true);
887 }
888
889 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
890 const struct rtl_cond *c,
891 unsigned int d, int n)
892 {
893 return rtl_loop_wait(tp, c, msleep, d, n, false);
894 }
895
896 #define DECLARE_RTL_COND(name) \
897 static bool name ## _check(struct rtl8169_private *); \
898 \
899 static const struct rtl_cond name = { \
900 .check = name ## _check, \
901 .msg = #name \
902 }; \
903 \
904 static bool name ## _check(struct rtl8169_private *tp)
905
906 DECLARE_RTL_COND(rtl_ocpar_cond)
907 {
908 void __iomem *ioaddr = tp->mmio_addr;
909
910 return RTL_R32(OCPAR) & OCPAR_FLAG;
911 }
912
913 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
914 {
915 void __iomem *ioaddr = tp->mmio_addr;
916
917 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
918
919 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
920 RTL_R32(OCPDR) : ~0;
921 }
922
923 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
924 {
925 void __iomem *ioaddr = tp->mmio_addr;
926
927 RTL_W32(OCPDR, data);
928 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
929
930 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
931 }
932
933 DECLARE_RTL_COND(rtl_eriar_cond)
934 {
935 void __iomem *ioaddr = tp->mmio_addr;
936
937 return RTL_R32(ERIAR) & ERIAR_FLAG;
938 }
939
940 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
941 {
942 void __iomem *ioaddr = tp->mmio_addr;
943
944 RTL_W8(ERIDR, cmd);
945 RTL_W32(ERIAR, 0x800010e8);
946 msleep(2);
947
948 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
949 return;
950
951 ocp_write(tp, 0x1, 0x30, 0x00000001);
952 }
953
954 #define OOB_CMD_RESET 0x00
955 #define OOB_CMD_DRIVER_START 0x05
956 #define OOB_CMD_DRIVER_STOP 0x06
957
958 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
959 {
960 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
961 }
962
963 DECLARE_RTL_COND(rtl_ocp_read_cond)
964 {
965 u16 reg;
966
967 reg = rtl8168_get_ocp_reg(tp);
968
969 return ocp_read(tp, 0x0f, reg) & 0x00000800;
970 }
971
972 static void rtl8168_driver_start(struct rtl8169_private *tp)
973 {
974 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
975
976 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
977 }
978
979 static void rtl8168_driver_stop(struct rtl8169_private *tp)
980 {
981 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
982
983 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
984 }
985
986 static int r8168dp_check_dash(struct rtl8169_private *tp)
987 {
988 u16 reg = rtl8168_get_ocp_reg(tp);
989
990 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
991 }
992
993 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
994 {
995 if (reg & 0xffff0001) {
996 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
997 return true;
998 }
999 return false;
1000 }
1001
1002 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1003 {
1004 void __iomem *ioaddr = tp->mmio_addr;
1005
1006 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1007 }
1008
1009 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1010 {
1011 void __iomem *ioaddr = tp->mmio_addr;
1012
1013 if (rtl_ocp_reg_failure(tp, reg))
1014 return;
1015
1016 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1017
1018 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1019 }
1020
1021 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1022 {
1023 void __iomem *ioaddr = tp->mmio_addr;
1024
1025 if (rtl_ocp_reg_failure(tp, reg))
1026 return 0;
1027
1028 RTL_W32(GPHY_OCP, reg << 15);
1029
1030 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1031 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1032 }
1033
1034 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1035 {
1036 int val;
1037
1038 val = r8168_phy_ocp_read(tp, reg);
1039 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1040 }
1041
1042 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1043 {
1044 void __iomem *ioaddr = tp->mmio_addr;
1045
1046 if (rtl_ocp_reg_failure(tp, reg))
1047 return;
1048
1049 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1050 }
1051
1052 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1053 {
1054 void __iomem *ioaddr = tp->mmio_addr;
1055
1056 if (rtl_ocp_reg_failure(tp, reg))
1057 return 0;
1058
1059 RTL_W32(OCPDR, reg << 15);
1060
1061 return RTL_R32(OCPDR);
1062 }
1063
1064 #define OCP_STD_PHY_BASE 0xa400
1065
1066 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1067 {
1068 if (reg == 0x1f) {
1069 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1070 return;
1071 }
1072
1073 if (tp->ocp_base != OCP_STD_PHY_BASE)
1074 reg -= 0x10;
1075
1076 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1077 }
1078
1079 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1080 {
1081 if (tp->ocp_base != OCP_STD_PHY_BASE)
1082 reg -= 0x10;
1083
1084 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1085 }
1086
1087 DECLARE_RTL_COND(rtl_phyar_cond)
1088 {
1089 void __iomem *ioaddr = tp->mmio_addr;
1090
1091 return RTL_R32(PHYAR) & 0x80000000;
1092 }
1093
1094 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1095 {
1096 void __iomem *ioaddr = tp->mmio_addr;
1097
1098 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1099
1100 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1101 /*
1102 * According to hardware specs a 20us delay is required after write
1103 * complete indication, but before sending next command.
1104 */
1105 udelay(20);
1106 }
1107
1108 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1109 {
1110 void __iomem *ioaddr = tp->mmio_addr;
1111 int value;
1112
1113 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1114
1115 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1116 RTL_R32(PHYAR) & 0xffff : ~0;
1117
1118 /*
1119 * According to hardware specs a 20us delay is required after read
1120 * complete indication, but before sending next command.
1121 */
1122 udelay(20);
1123
1124 return value;
1125 }
1126
1127 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1128 {
1129 void __iomem *ioaddr = tp->mmio_addr;
1130
1131 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1132 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1133 RTL_W32(EPHY_RXER_NUM, 0);
1134
1135 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1136 }
1137
1138 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1139 {
1140 r8168dp_1_mdio_access(tp, reg,
1141 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1142 }
1143
1144 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1145 {
1146 void __iomem *ioaddr = tp->mmio_addr;
1147
1148 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1149
1150 mdelay(1);
1151 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1152 RTL_W32(EPHY_RXER_NUM, 0);
1153
1154 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1155 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1156 }
1157
1158 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1159
1160 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1161 {
1162 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1163 }
1164
1165 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1166 {
1167 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1168 }
1169
1170 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1171 {
1172 void __iomem *ioaddr = tp->mmio_addr;
1173
1174 r8168dp_2_mdio_start(ioaddr);
1175
1176 r8169_mdio_write(tp, reg, value);
1177
1178 r8168dp_2_mdio_stop(ioaddr);
1179 }
1180
1181 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1182 {
1183 void __iomem *ioaddr = tp->mmio_addr;
1184 int value;
1185
1186 r8168dp_2_mdio_start(ioaddr);
1187
1188 value = r8169_mdio_read(tp, reg);
1189
1190 r8168dp_2_mdio_stop(ioaddr);
1191
1192 return value;
1193 }
1194
1195 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1196 {
1197 tp->mdio_ops.write(tp, location, val);
1198 }
1199
1200 static int rtl_readphy(struct rtl8169_private *tp, int location)
1201 {
1202 return tp->mdio_ops.read(tp, location);
1203 }
1204
1205 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1206 {
1207 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1208 }
1209
1210 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1211 {
1212 int val;
1213
1214 val = rtl_readphy(tp, reg_addr);
1215 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1216 }
1217
1218 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1219 int val)
1220 {
1221 struct rtl8169_private *tp = netdev_priv(dev);
1222
1223 rtl_writephy(tp, location, val);
1224 }
1225
1226 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1227 {
1228 struct rtl8169_private *tp = netdev_priv(dev);
1229
1230 return rtl_readphy(tp, location);
1231 }
1232
1233 DECLARE_RTL_COND(rtl_ephyar_cond)
1234 {
1235 void __iomem *ioaddr = tp->mmio_addr;
1236
1237 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1238 }
1239
1240 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1241 {
1242 void __iomem *ioaddr = tp->mmio_addr;
1243
1244 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1245 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1246
1247 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1248
1249 udelay(10);
1250 }
1251
1252 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1253 {
1254 void __iomem *ioaddr = tp->mmio_addr;
1255
1256 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1257
1258 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1259 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1260 }
1261
1262 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1263 u32 val, int type)
1264 {
1265 void __iomem *ioaddr = tp->mmio_addr;
1266
1267 BUG_ON((addr & 3) || (mask == 0));
1268 RTL_W32(ERIDR, val);
1269 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1270
1271 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1272 }
1273
1274 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1275 {
1276 void __iomem *ioaddr = tp->mmio_addr;
1277
1278 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1279
1280 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1281 RTL_R32(ERIDR) : ~0;
1282 }
1283
1284 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1285 u32 m, int type)
1286 {
1287 u32 val;
1288
1289 val = rtl_eri_read(tp, addr, type);
1290 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1291 }
1292
1293 struct exgmac_reg {
1294 u16 addr;
1295 u16 mask;
1296 u32 val;
1297 };
1298
1299 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1300 const struct exgmac_reg *r, int len)
1301 {
1302 while (len-- > 0) {
1303 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1304 r++;
1305 }
1306 }
1307
1308 DECLARE_RTL_COND(rtl_efusear_cond)
1309 {
1310 void __iomem *ioaddr = tp->mmio_addr;
1311
1312 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1313 }
1314
1315 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1316 {
1317 void __iomem *ioaddr = tp->mmio_addr;
1318
1319 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1320
1321 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1322 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1323 }
1324
1325 static u16 rtl_get_events(struct rtl8169_private *tp)
1326 {
1327 void __iomem *ioaddr = tp->mmio_addr;
1328
1329 return RTL_R16(IntrStatus);
1330 }
1331
1332 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1333 {
1334 void __iomem *ioaddr = tp->mmio_addr;
1335
1336 RTL_W16(IntrStatus, bits);
1337 mmiowb();
1338 }
1339
1340 static void rtl_irq_disable(struct rtl8169_private *tp)
1341 {
1342 void __iomem *ioaddr = tp->mmio_addr;
1343
1344 RTL_W16(IntrMask, 0);
1345 mmiowb();
1346 }
1347
1348 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1349 {
1350 void __iomem *ioaddr = tp->mmio_addr;
1351
1352 RTL_W16(IntrMask, bits);
1353 }
1354
1355 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1356 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1357 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1358
1359 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1360 {
1361 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1362 }
1363
1364 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1365 {
1366 void __iomem *ioaddr = tp->mmio_addr;
1367
1368 rtl_irq_disable(tp);
1369 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1370 RTL_R8(ChipCmd);
1371 }
1372
1373 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1374 {
1375 void __iomem *ioaddr = tp->mmio_addr;
1376
1377 return RTL_R32(TBICSR) & TBIReset;
1378 }
1379
1380 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1381 {
1382 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1383 }
1384
1385 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1386 {
1387 return RTL_R32(TBICSR) & TBILinkOk;
1388 }
1389
1390 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1391 {
1392 return RTL_R8(PHYstatus) & LinkStatus;
1393 }
1394
1395 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1396 {
1397 void __iomem *ioaddr = tp->mmio_addr;
1398
1399 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1400 }
1401
1402 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1403 {
1404 unsigned int val;
1405
1406 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1407 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1408 }
1409
1410 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1411 {
1412 void __iomem *ioaddr = tp->mmio_addr;
1413 struct net_device *dev = tp->dev;
1414
1415 if (!netif_running(dev))
1416 return;
1417
1418 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1419 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1420 if (RTL_R8(PHYstatus) & _1000bpsF) {
1421 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1422 ERIAR_EXGMAC);
1423 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1424 ERIAR_EXGMAC);
1425 } else if (RTL_R8(PHYstatus) & _100bps) {
1426 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1427 ERIAR_EXGMAC);
1428 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1429 ERIAR_EXGMAC);
1430 } else {
1431 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1432 ERIAR_EXGMAC);
1433 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1434 ERIAR_EXGMAC);
1435 }
1436 /* Reset packet filter */
1437 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1438 ERIAR_EXGMAC);
1439 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1440 ERIAR_EXGMAC);
1441 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1442 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1443 if (RTL_R8(PHYstatus) & _1000bpsF) {
1444 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1445 ERIAR_EXGMAC);
1446 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1447 ERIAR_EXGMAC);
1448 } else {
1449 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1450 ERIAR_EXGMAC);
1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1452 ERIAR_EXGMAC);
1453 }
1454 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1455 if (RTL_R8(PHYstatus) & _10bps) {
1456 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1457 ERIAR_EXGMAC);
1458 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1459 ERIAR_EXGMAC);
1460 } else {
1461 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1462 ERIAR_EXGMAC);
1463 }
1464 }
1465 }
1466
1467 static void __rtl8169_check_link_status(struct net_device *dev,
1468 struct rtl8169_private *tp,
1469 void __iomem *ioaddr, bool pm)
1470 {
1471 if (tp->link_ok(ioaddr)) {
1472 rtl_link_chg_patch(tp);
1473 /* This is to cancel a scheduled suspend if there's one. */
1474 if (pm)
1475 pm_request_resume(&tp->pci_dev->dev);
1476 netif_carrier_on(dev);
1477 if (net_ratelimit())
1478 netif_info(tp, ifup, dev, "link up\n");
1479 } else {
1480 netif_carrier_off(dev);
1481 netif_info(tp, ifdown, dev, "link down\n");
1482 if (pm)
1483 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1484 }
1485 }
1486
1487 static void rtl8169_check_link_status(struct net_device *dev,
1488 struct rtl8169_private *tp,
1489 void __iomem *ioaddr)
1490 {
1491 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1492 }
1493
1494 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1495
1496 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1497 {
1498 void __iomem *ioaddr = tp->mmio_addr;
1499 u8 options;
1500 u32 wolopts = 0;
1501
1502 options = RTL_R8(Config1);
1503 if (!(options & PMEnable))
1504 return 0;
1505
1506 options = RTL_R8(Config3);
1507 if (options & LinkUp)
1508 wolopts |= WAKE_PHY;
1509 if (options & MagicPacket)
1510 wolopts |= WAKE_MAGIC;
1511
1512 options = RTL_R8(Config5);
1513 if (options & UWF)
1514 wolopts |= WAKE_UCAST;
1515 if (options & BWF)
1516 wolopts |= WAKE_BCAST;
1517 if (options & MWF)
1518 wolopts |= WAKE_MCAST;
1519
1520 return wolopts;
1521 }
1522
1523 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1524 {
1525 struct rtl8169_private *tp = netdev_priv(dev);
1526
1527 rtl_lock_work(tp);
1528
1529 wol->supported = WAKE_ANY;
1530 wol->wolopts = __rtl8169_get_wol(tp);
1531
1532 rtl_unlock_work(tp);
1533 }
1534
1535 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1536 {
1537 void __iomem *ioaddr = tp->mmio_addr;
1538 unsigned int i;
1539 static const struct {
1540 u32 opt;
1541 u16 reg;
1542 u8 mask;
1543 } cfg[] = {
1544 { WAKE_PHY, Config3, LinkUp },
1545 { WAKE_MAGIC, Config3, MagicPacket },
1546 { WAKE_UCAST, Config5, UWF },
1547 { WAKE_BCAST, Config5, BWF },
1548 { WAKE_MCAST, Config5, MWF },
1549 { WAKE_ANY, Config5, LanWake }
1550 };
1551 u8 options;
1552
1553 RTL_W8(Cfg9346, Cfg9346_Unlock);
1554
1555 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1556 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1557 if (wolopts & cfg[i].opt)
1558 options |= cfg[i].mask;
1559 RTL_W8(cfg[i].reg, options);
1560 }
1561
1562 switch (tp->mac_version) {
1563 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1564 options = RTL_R8(Config1) & ~PMEnable;
1565 if (wolopts)
1566 options |= PMEnable;
1567 RTL_W8(Config1, options);
1568 break;
1569 default:
1570 options = RTL_R8(Config2) & ~PME_SIGNAL;
1571 if (wolopts)
1572 options |= PME_SIGNAL;
1573 RTL_W8(Config2, options);
1574 break;
1575 }
1576
1577 RTL_W8(Cfg9346, Cfg9346_Lock);
1578 }
1579
1580 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1581 {
1582 struct rtl8169_private *tp = netdev_priv(dev);
1583
1584 rtl_lock_work(tp);
1585
1586 if (wol->wolopts)
1587 tp->features |= RTL_FEATURE_WOL;
1588 else
1589 tp->features &= ~RTL_FEATURE_WOL;
1590 __rtl8169_set_wol(tp, wol->wolopts);
1591
1592 rtl_unlock_work(tp);
1593
1594 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1595
1596 return 0;
1597 }
1598
1599 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1600 {
1601 return rtl_chip_infos[tp->mac_version].fw_name;
1602 }
1603
1604 static void rtl8169_get_drvinfo(struct net_device *dev,
1605 struct ethtool_drvinfo *info)
1606 {
1607 struct rtl8169_private *tp = netdev_priv(dev);
1608 struct rtl_fw *rtl_fw = tp->rtl_fw;
1609
1610 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1611 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1612 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1613 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1614 if (!IS_ERR_OR_NULL(rtl_fw))
1615 strlcpy(info->fw_version, rtl_fw->version,
1616 sizeof(info->fw_version));
1617 }
1618
1619 static int rtl8169_get_regs_len(struct net_device *dev)
1620 {
1621 return R8169_REGS_SIZE;
1622 }
1623
1624 static int rtl8169_set_speed_tbi(struct net_device *dev,
1625 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1626 {
1627 struct rtl8169_private *tp = netdev_priv(dev);
1628 void __iomem *ioaddr = tp->mmio_addr;
1629 int ret = 0;
1630 u32 reg;
1631
1632 reg = RTL_R32(TBICSR);
1633 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1634 (duplex == DUPLEX_FULL)) {
1635 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1636 } else if (autoneg == AUTONEG_ENABLE)
1637 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1638 else {
1639 netif_warn(tp, link, dev,
1640 "incorrect speed setting refused in TBI mode\n");
1641 ret = -EOPNOTSUPP;
1642 }
1643
1644 return ret;
1645 }
1646
1647 static int rtl8169_set_speed_xmii(struct net_device *dev,
1648 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1649 {
1650 struct rtl8169_private *tp = netdev_priv(dev);
1651 int giga_ctrl, bmcr;
1652 int rc = -EINVAL;
1653
1654 rtl_writephy(tp, 0x1f, 0x0000);
1655
1656 if (autoneg == AUTONEG_ENABLE) {
1657 int auto_nego;
1658
1659 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1660 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1661 ADVERTISE_100HALF | ADVERTISE_100FULL);
1662
1663 if (adv & ADVERTISED_10baseT_Half)
1664 auto_nego |= ADVERTISE_10HALF;
1665 if (adv & ADVERTISED_10baseT_Full)
1666 auto_nego |= ADVERTISE_10FULL;
1667 if (adv & ADVERTISED_100baseT_Half)
1668 auto_nego |= ADVERTISE_100HALF;
1669 if (adv & ADVERTISED_100baseT_Full)
1670 auto_nego |= ADVERTISE_100FULL;
1671
1672 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1673
1674 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1675 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1676
1677 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1678 if (tp->mii.supports_gmii) {
1679 if (adv & ADVERTISED_1000baseT_Half)
1680 giga_ctrl |= ADVERTISE_1000HALF;
1681 if (adv & ADVERTISED_1000baseT_Full)
1682 giga_ctrl |= ADVERTISE_1000FULL;
1683 } else if (adv & (ADVERTISED_1000baseT_Half |
1684 ADVERTISED_1000baseT_Full)) {
1685 netif_info(tp, link, dev,
1686 "PHY does not support 1000Mbps\n");
1687 goto out;
1688 }
1689
1690 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1691
1692 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1693 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1694 } else {
1695 giga_ctrl = 0;
1696
1697 if (speed == SPEED_10)
1698 bmcr = 0;
1699 else if (speed == SPEED_100)
1700 bmcr = BMCR_SPEED100;
1701 else
1702 goto out;
1703
1704 if (duplex == DUPLEX_FULL)
1705 bmcr |= BMCR_FULLDPLX;
1706 }
1707
1708 rtl_writephy(tp, MII_BMCR, bmcr);
1709
1710 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1711 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1712 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1713 rtl_writephy(tp, 0x17, 0x2138);
1714 rtl_writephy(tp, 0x0e, 0x0260);
1715 } else {
1716 rtl_writephy(tp, 0x17, 0x2108);
1717 rtl_writephy(tp, 0x0e, 0x0000);
1718 }
1719 }
1720
1721 rc = 0;
1722 out:
1723 return rc;
1724 }
1725
1726 static int rtl8169_set_speed(struct net_device *dev,
1727 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1728 {
1729 struct rtl8169_private *tp = netdev_priv(dev);
1730 int ret;
1731
1732 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1733 if (ret < 0)
1734 goto out;
1735
1736 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1737 (advertising & ADVERTISED_1000baseT_Full)) {
1738 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1739 }
1740 out:
1741 return ret;
1742 }
1743
1744 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1745 {
1746 struct rtl8169_private *tp = netdev_priv(dev);
1747 int ret;
1748
1749 del_timer_sync(&tp->timer);
1750
1751 rtl_lock_work(tp);
1752 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1753 cmd->duplex, cmd->advertising);
1754 rtl_unlock_work(tp);
1755
1756 return ret;
1757 }
1758
1759 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1760 netdev_features_t features)
1761 {
1762 struct rtl8169_private *tp = netdev_priv(dev);
1763
1764 if (dev->mtu > TD_MSS_MAX)
1765 features &= ~NETIF_F_ALL_TSO;
1766
1767 if (dev->mtu > JUMBO_1K &&
1768 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1769 features &= ~NETIF_F_IP_CSUM;
1770
1771 return features;
1772 }
1773
1774 static void __rtl8169_set_features(struct net_device *dev,
1775 netdev_features_t features)
1776 {
1777 struct rtl8169_private *tp = netdev_priv(dev);
1778 netdev_features_t changed = features ^ dev->features;
1779 void __iomem *ioaddr = tp->mmio_addr;
1780
1781 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1782 return;
1783
1784 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1785 if (features & NETIF_F_RXCSUM)
1786 tp->cp_cmd |= RxChkSum;
1787 else
1788 tp->cp_cmd &= ~RxChkSum;
1789
1790 if (dev->features & NETIF_F_HW_VLAN_RX)
1791 tp->cp_cmd |= RxVlan;
1792 else
1793 tp->cp_cmd &= ~RxVlan;
1794
1795 RTL_W16(CPlusCmd, tp->cp_cmd);
1796 RTL_R16(CPlusCmd);
1797 }
1798 if (changed & NETIF_F_RXALL) {
1799 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1800 if (features & NETIF_F_RXALL)
1801 tmp |= (AcceptErr | AcceptRunt);
1802 RTL_W32(RxConfig, tmp);
1803 }
1804 }
1805
1806 static int rtl8169_set_features(struct net_device *dev,
1807 netdev_features_t features)
1808 {
1809 struct rtl8169_private *tp = netdev_priv(dev);
1810
1811 rtl_lock_work(tp);
1812 __rtl8169_set_features(dev, features);
1813 rtl_unlock_work(tp);
1814
1815 return 0;
1816 }
1817
1818
1819 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1820 struct sk_buff *skb)
1821 {
1822 return (vlan_tx_tag_present(skb)) ?
1823 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1824 }
1825
1826 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1827 {
1828 u32 opts2 = le32_to_cpu(desc->opts2);
1829
1830 if (opts2 & RxVlanTag)
1831 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1832
1833 desc->opts2 = 0;
1834 }
1835
1836 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1837 {
1838 struct rtl8169_private *tp = netdev_priv(dev);
1839 void __iomem *ioaddr = tp->mmio_addr;
1840 u32 status;
1841
1842 cmd->supported =
1843 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1844 cmd->port = PORT_FIBRE;
1845 cmd->transceiver = XCVR_INTERNAL;
1846
1847 status = RTL_R32(TBICSR);
1848 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1849 cmd->autoneg = !!(status & TBINwEnable);
1850
1851 ethtool_cmd_speed_set(cmd, SPEED_1000);
1852 cmd->duplex = DUPLEX_FULL; /* Always set */
1853
1854 return 0;
1855 }
1856
1857 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1858 {
1859 struct rtl8169_private *tp = netdev_priv(dev);
1860
1861 return mii_ethtool_gset(&tp->mii, cmd);
1862 }
1863
1864 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1865 {
1866 struct rtl8169_private *tp = netdev_priv(dev);
1867 int rc;
1868
1869 rtl_lock_work(tp);
1870 rc = tp->get_settings(dev, cmd);
1871 rtl_unlock_work(tp);
1872
1873 return rc;
1874 }
1875
1876 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1877 void *p)
1878 {
1879 struct rtl8169_private *tp = netdev_priv(dev);
1880
1881 if (regs->len > R8169_REGS_SIZE)
1882 regs->len = R8169_REGS_SIZE;
1883
1884 rtl_lock_work(tp);
1885 memcpy_fromio(p, tp->mmio_addr, regs->len);
1886 rtl_unlock_work(tp);
1887 }
1888
1889 static u32 rtl8169_get_msglevel(struct net_device *dev)
1890 {
1891 struct rtl8169_private *tp = netdev_priv(dev);
1892
1893 return tp->msg_enable;
1894 }
1895
1896 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1897 {
1898 struct rtl8169_private *tp = netdev_priv(dev);
1899
1900 tp->msg_enable = value;
1901 }
1902
1903 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1904 "tx_packets",
1905 "rx_packets",
1906 "tx_errors",
1907 "rx_errors",
1908 "rx_missed",
1909 "align_errors",
1910 "tx_single_collisions",
1911 "tx_multi_collisions",
1912 "unicast",
1913 "broadcast",
1914 "multicast",
1915 "tx_aborted",
1916 "tx_underrun",
1917 };
1918
1919 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1920 {
1921 switch (sset) {
1922 case ETH_SS_STATS:
1923 return ARRAY_SIZE(rtl8169_gstrings);
1924 default:
1925 return -EOPNOTSUPP;
1926 }
1927 }
1928
1929 DECLARE_RTL_COND(rtl_counters_cond)
1930 {
1931 void __iomem *ioaddr = tp->mmio_addr;
1932
1933 return RTL_R32(CounterAddrLow) & CounterDump;
1934 }
1935
1936 static void rtl8169_update_counters(struct net_device *dev)
1937 {
1938 struct rtl8169_private *tp = netdev_priv(dev);
1939 void __iomem *ioaddr = tp->mmio_addr;
1940 struct device *d = &tp->pci_dev->dev;
1941 struct rtl8169_counters *counters;
1942 dma_addr_t paddr;
1943 u32 cmd;
1944
1945 /*
1946 * Some chips are unable to dump tally counters when the receiver
1947 * is disabled.
1948 */
1949 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1950 return;
1951
1952 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1953 if (!counters)
1954 return;
1955
1956 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1957 cmd = (u64)paddr & DMA_BIT_MASK(32);
1958 RTL_W32(CounterAddrLow, cmd);
1959 RTL_W32(CounterAddrLow, cmd | CounterDump);
1960
1961 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1962 memcpy(&tp->counters, counters, sizeof(*counters));
1963
1964 RTL_W32(CounterAddrLow, 0);
1965 RTL_W32(CounterAddrHigh, 0);
1966
1967 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1968 }
1969
1970 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1971 struct ethtool_stats *stats, u64 *data)
1972 {
1973 struct rtl8169_private *tp = netdev_priv(dev);
1974
1975 ASSERT_RTNL();
1976
1977 rtl8169_update_counters(dev);
1978
1979 data[0] = le64_to_cpu(tp->counters.tx_packets);
1980 data[1] = le64_to_cpu(tp->counters.rx_packets);
1981 data[2] = le64_to_cpu(tp->counters.tx_errors);
1982 data[3] = le32_to_cpu(tp->counters.rx_errors);
1983 data[4] = le16_to_cpu(tp->counters.rx_missed);
1984 data[5] = le16_to_cpu(tp->counters.align_errors);
1985 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1986 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1987 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1988 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1989 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1990 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1991 data[12] = le16_to_cpu(tp->counters.tx_underun);
1992 }
1993
1994 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1995 {
1996 switch(stringset) {
1997 case ETH_SS_STATS:
1998 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1999 break;
2000 }
2001 }
2002
2003 static const struct ethtool_ops rtl8169_ethtool_ops = {
2004 .get_drvinfo = rtl8169_get_drvinfo,
2005 .get_regs_len = rtl8169_get_regs_len,
2006 .get_link = ethtool_op_get_link,
2007 .get_settings = rtl8169_get_settings,
2008 .set_settings = rtl8169_set_settings,
2009 .get_msglevel = rtl8169_get_msglevel,
2010 .set_msglevel = rtl8169_set_msglevel,
2011 .get_regs = rtl8169_get_regs,
2012 .get_wol = rtl8169_get_wol,
2013 .set_wol = rtl8169_set_wol,
2014 .get_strings = rtl8169_get_strings,
2015 .get_sset_count = rtl8169_get_sset_count,
2016 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2017 .get_ts_info = ethtool_op_get_ts_info,
2018 };
2019
2020 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2021 struct net_device *dev, u8 default_version)
2022 {
2023 void __iomem *ioaddr = tp->mmio_addr;
2024 /*
2025 * The driver currently handles the 8168Bf and the 8168Be identically
2026 * but they can be identified more specifically through the test below
2027 * if needed:
2028 *
2029 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2030 *
2031 * Same thing for the 8101Eb and the 8101Ec:
2032 *
2033 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2034 */
2035 static const struct rtl_mac_info {
2036 u32 mask;
2037 u32 val;
2038 int mac_version;
2039 } mac_info[] = {
2040 /* 8168G family. */
2041 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2042 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2043
2044 /* 8168F family. */
2045 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2046 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2047 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2048
2049 /* 8168E family. */
2050 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2051 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2052 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2053 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2054
2055 /* 8168D family. */
2056 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2057 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2058 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2059
2060 /* 8168DP family. */
2061 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2062 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2063 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2064
2065 /* 8168C family. */
2066 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2067 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2068 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2069 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2070 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2071 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2072 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2073 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2074 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2075
2076 /* 8168B family. */
2077 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2078 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2079 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2080 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2081
2082 /* 8101 family. */
2083 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2084 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2085 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2086 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2087 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2088 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2089 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2090 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2091 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2092 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2093 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2094 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2095 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2096 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2097 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2098 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2099 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2100 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2101 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2102 /* FIXME: where did these entries come from ? -- FR */
2103 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2104 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2105
2106 /* 8110 family. */
2107 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2108 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2109 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2110 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2111 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2112 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2113
2114 /* Catch-all */
2115 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2116 };
2117 const struct rtl_mac_info *p = mac_info;
2118 u32 reg;
2119
2120 reg = RTL_R32(TxConfig);
2121 while ((reg & p->mask) != p->val)
2122 p++;
2123 tp->mac_version = p->mac_version;
2124
2125 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2126 netif_notice(tp, probe, dev,
2127 "unknown MAC, using family default\n");
2128 tp->mac_version = default_version;
2129 }
2130 }
2131
2132 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2133 {
2134 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2135 }
2136
2137 struct phy_reg {
2138 u16 reg;
2139 u16 val;
2140 };
2141
2142 static void rtl_writephy_batch(struct rtl8169_private *tp,
2143 const struct phy_reg *regs, int len)
2144 {
2145 while (len-- > 0) {
2146 rtl_writephy(tp, regs->reg, regs->val);
2147 regs++;
2148 }
2149 }
2150
2151 #define PHY_READ 0x00000000
2152 #define PHY_DATA_OR 0x10000000
2153 #define PHY_DATA_AND 0x20000000
2154 #define PHY_BJMPN 0x30000000
2155 #define PHY_READ_EFUSE 0x40000000
2156 #define PHY_READ_MAC_BYTE 0x50000000
2157 #define PHY_WRITE_MAC_BYTE 0x60000000
2158 #define PHY_CLEAR_READCOUNT 0x70000000
2159 #define PHY_WRITE 0x80000000
2160 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2161 #define PHY_COMP_EQ_SKIPN 0xa0000000
2162 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2163 #define PHY_WRITE_PREVIOUS 0xc0000000
2164 #define PHY_SKIPN 0xd0000000
2165 #define PHY_DELAY_MS 0xe0000000
2166 #define PHY_WRITE_ERI_WORD 0xf0000000
2167
2168 struct fw_info {
2169 u32 magic;
2170 char version[RTL_VER_SIZE];
2171 __le32 fw_start;
2172 __le32 fw_len;
2173 u8 chksum;
2174 } __packed;
2175
2176 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2177
2178 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2179 {
2180 const struct firmware *fw = rtl_fw->fw;
2181 struct fw_info *fw_info = (struct fw_info *)fw->data;
2182 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2183 char *version = rtl_fw->version;
2184 bool rc = false;
2185
2186 if (fw->size < FW_OPCODE_SIZE)
2187 goto out;
2188
2189 if (!fw_info->magic) {
2190 size_t i, size, start;
2191 u8 checksum = 0;
2192
2193 if (fw->size < sizeof(*fw_info))
2194 goto out;
2195
2196 for (i = 0; i < fw->size; i++)
2197 checksum += fw->data[i];
2198 if (checksum != 0)
2199 goto out;
2200
2201 start = le32_to_cpu(fw_info->fw_start);
2202 if (start > fw->size)
2203 goto out;
2204
2205 size = le32_to_cpu(fw_info->fw_len);
2206 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2207 goto out;
2208
2209 memcpy(version, fw_info->version, RTL_VER_SIZE);
2210
2211 pa->code = (__le32 *)(fw->data + start);
2212 pa->size = size;
2213 } else {
2214 if (fw->size % FW_OPCODE_SIZE)
2215 goto out;
2216
2217 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2218
2219 pa->code = (__le32 *)fw->data;
2220 pa->size = fw->size / FW_OPCODE_SIZE;
2221 }
2222 version[RTL_VER_SIZE - 1] = 0;
2223
2224 rc = true;
2225 out:
2226 return rc;
2227 }
2228
2229 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2230 struct rtl_fw_phy_action *pa)
2231 {
2232 bool rc = false;
2233 size_t index;
2234
2235 for (index = 0; index < pa->size; index++) {
2236 u32 action = le32_to_cpu(pa->code[index]);
2237 u32 regno = (action & 0x0fff0000) >> 16;
2238
2239 switch(action & 0xf0000000) {
2240 case PHY_READ:
2241 case PHY_DATA_OR:
2242 case PHY_DATA_AND:
2243 case PHY_READ_EFUSE:
2244 case PHY_CLEAR_READCOUNT:
2245 case PHY_WRITE:
2246 case PHY_WRITE_PREVIOUS:
2247 case PHY_DELAY_MS:
2248 break;
2249
2250 case PHY_BJMPN:
2251 if (regno > index) {
2252 netif_err(tp, ifup, tp->dev,
2253 "Out of range of firmware\n");
2254 goto out;
2255 }
2256 break;
2257 case PHY_READCOUNT_EQ_SKIP:
2258 if (index + 2 >= pa->size) {
2259 netif_err(tp, ifup, tp->dev,
2260 "Out of range of firmware\n");
2261 goto out;
2262 }
2263 break;
2264 case PHY_COMP_EQ_SKIPN:
2265 case PHY_COMP_NEQ_SKIPN:
2266 case PHY_SKIPN:
2267 if (index + 1 + regno >= pa->size) {
2268 netif_err(tp, ifup, tp->dev,
2269 "Out of range of firmware\n");
2270 goto out;
2271 }
2272 break;
2273
2274 case PHY_READ_MAC_BYTE:
2275 case PHY_WRITE_MAC_BYTE:
2276 case PHY_WRITE_ERI_WORD:
2277 default:
2278 netif_err(tp, ifup, tp->dev,
2279 "Invalid action 0x%08x\n", action);
2280 goto out;
2281 }
2282 }
2283 rc = true;
2284 out:
2285 return rc;
2286 }
2287
2288 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2289 {
2290 struct net_device *dev = tp->dev;
2291 int rc = -EINVAL;
2292
2293 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2294 netif_err(tp, ifup, dev, "invalid firwmare\n");
2295 goto out;
2296 }
2297
2298 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2299 rc = 0;
2300 out:
2301 return rc;
2302 }
2303
2304 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2305 {
2306 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2307 u32 predata, count;
2308 size_t index;
2309
2310 predata = count = 0;
2311
2312 for (index = 0; index < pa->size; ) {
2313 u32 action = le32_to_cpu(pa->code[index]);
2314 u32 data = action & 0x0000ffff;
2315 u32 regno = (action & 0x0fff0000) >> 16;
2316
2317 if (!action)
2318 break;
2319
2320 switch(action & 0xf0000000) {
2321 case PHY_READ:
2322 predata = rtl_readphy(tp, regno);
2323 count++;
2324 index++;
2325 break;
2326 case PHY_DATA_OR:
2327 predata |= data;
2328 index++;
2329 break;
2330 case PHY_DATA_AND:
2331 predata &= data;
2332 index++;
2333 break;
2334 case PHY_BJMPN:
2335 index -= regno;
2336 break;
2337 case PHY_READ_EFUSE:
2338 predata = rtl8168d_efuse_read(tp, regno);
2339 index++;
2340 break;
2341 case PHY_CLEAR_READCOUNT:
2342 count = 0;
2343 index++;
2344 break;
2345 case PHY_WRITE:
2346 rtl_writephy(tp, regno, data);
2347 index++;
2348 break;
2349 case PHY_READCOUNT_EQ_SKIP:
2350 index += (count == data) ? 2 : 1;
2351 break;
2352 case PHY_COMP_EQ_SKIPN:
2353 if (predata == data)
2354 index += regno;
2355 index++;
2356 break;
2357 case PHY_COMP_NEQ_SKIPN:
2358 if (predata != data)
2359 index += regno;
2360 index++;
2361 break;
2362 case PHY_WRITE_PREVIOUS:
2363 rtl_writephy(tp, regno, predata);
2364 index++;
2365 break;
2366 case PHY_SKIPN:
2367 index += regno + 1;
2368 break;
2369 case PHY_DELAY_MS:
2370 mdelay(data);
2371 index++;
2372 break;
2373
2374 case PHY_READ_MAC_BYTE:
2375 case PHY_WRITE_MAC_BYTE:
2376 case PHY_WRITE_ERI_WORD:
2377 default:
2378 BUG();
2379 }
2380 }
2381 }
2382
2383 static void rtl_release_firmware(struct rtl8169_private *tp)
2384 {
2385 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2386 release_firmware(tp->rtl_fw->fw);
2387 kfree(tp->rtl_fw);
2388 }
2389 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2390 }
2391
2392 static void rtl_apply_firmware(struct rtl8169_private *tp)
2393 {
2394 struct rtl_fw *rtl_fw = tp->rtl_fw;
2395
2396 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2397 if (!IS_ERR_OR_NULL(rtl_fw))
2398 rtl_phy_write_fw(tp, rtl_fw);
2399 }
2400
2401 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2402 {
2403 if (rtl_readphy(tp, reg) != val)
2404 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2405 else
2406 rtl_apply_firmware(tp);
2407 }
2408
2409 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2410 {
2411 static const struct phy_reg phy_reg_init[] = {
2412 { 0x1f, 0x0001 },
2413 { 0x06, 0x006e },
2414 { 0x08, 0x0708 },
2415 { 0x15, 0x4000 },
2416 { 0x18, 0x65c7 },
2417
2418 { 0x1f, 0x0001 },
2419 { 0x03, 0x00a1 },
2420 { 0x02, 0x0008 },
2421 { 0x01, 0x0120 },
2422 { 0x00, 0x1000 },
2423 { 0x04, 0x0800 },
2424 { 0x04, 0x0000 },
2425
2426 { 0x03, 0xff41 },
2427 { 0x02, 0xdf60 },
2428 { 0x01, 0x0140 },
2429 { 0x00, 0x0077 },
2430 { 0x04, 0x7800 },
2431 { 0x04, 0x7000 },
2432
2433 { 0x03, 0x802f },
2434 { 0x02, 0x4f02 },
2435 { 0x01, 0x0409 },
2436 { 0x00, 0xf0f9 },
2437 { 0x04, 0x9800 },
2438 { 0x04, 0x9000 },
2439
2440 { 0x03, 0xdf01 },
2441 { 0x02, 0xdf20 },
2442 { 0x01, 0xff95 },
2443 { 0x00, 0xba00 },
2444 { 0x04, 0xa800 },
2445 { 0x04, 0xa000 },
2446
2447 { 0x03, 0xff41 },
2448 { 0x02, 0xdf20 },
2449 { 0x01, 0x0140 },
2450 { 0x00, 0x00bb },
2451 { 0x04, 0xb800 },
2452 { 0x04, 0xb000 },
2453
2454 { 0x03, 0xdf41 },
2455 { 0x02, 0xdc60 },
2456 { 0x01, 0x6340 },
2457 { 0x00, 0x007d },
2458 { 0x04, 0xd800 },
2459 { 0x04, 0xd000 },
2460
2461 { 0x03, 0xdf01 },
2462 { 0x02, 0xdf20 },
2463 { 0x01, 0x100a },
2464 { 0x00, 0xa0ff },
2465 { 0x04, 0xf800 },
2466 { 0x04, 0xf000 },
2467
2468 { 0x1f, 0x0000 },
2469 { 0x0b, 0x0000 },
2470 { 0x00, 0x9200 }
2471 };
2472
2473 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2474 }
2475
2476 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2477 {
2478 static const struct phy_reg phy_reg_init[] = {
2479 { 0x1f, 0x0002 },
2480 { 0x01, 0x90d0 },
2481 { 0x1f, 0x0000 }
2482 };
2483
2484 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2485 }
2486
2487 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2488 {
2489 struct pci_dev *pdev = tp->pci_dev;
2490
2491 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2492 (pdev->subsystem_device != 0xe000))
2493 return;
2494
2495 rtl_writephy(tp, 0x1f, 0x0001);
2496 rtl_writephy(tp, 0x10, 0xf01b);
2497 rtl_writephy(tp, 0x1f, 0x0000);
2498 }
2499
2500 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2501 {
2502 static const struct phy_reg phy_reg_init[] = {
2503 { 0x1f, 0x0001 },
2504 { 0x04, 0x0000 },
2505 { 0x03, 0x00a1 },
2506 { 0x02, 0x0008 },
2507 { 0x01, 0x0120 },
2508 { 0x00, 0x1000 },
2509 { 0x04, 0x0800 },
2510 { 0x04, 0x9000 },
2511 { 0x03, 0x802f },
2512 { 0x02, 0x4f02 },
2513 { 0x01, 0x0409 },
2514 { 0x00, 0xf099 },
2515 { 0x04, 0x9800 },
2516 { 0x04, 0xa000 },
2517 { 0x03, 0xdf01 },
2518 { 0x02, 0xdf20 },
2519 { 0x01, 0xff95 },
2520 { 0x00, 0xba00 },
2521 { 0x04, 0xa800 },
2522 { 0x04, 0xf000 },
2523 { 0x03, 0xdf01 },
2524 { 0x02, 0xdf20 },
2525 { 0x01, 0x101a },
2526 { 0x00, 0xa0ff },
2527 { 0x04, 0xf800 },
2528 { 0x04, 0x0000 },
2529 { 0x1f, 0x0000 },
2530
2531 { 0x1f, 0x0001 },
2532 { 0x10, 0xf41b },
2533 { 0x14, 0xfb54 },
2534 { 0x18, 0xf5c7 },
2535 { 0x1f, 0x0000 },
2536
2537 { 0x1f, 0x0001 },
2538 { 0x17, 0x0cc0 },
2539 { 0x1f, 0x0000 }
2540 };
2541
2542 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2543
2544 rtl8169scd_hw_phy_config_quirk(tp);
2545 }
2546
2547 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2548 {
2549 static const struct phy_reg phy_reg_init[] = {
2550 { 0x1f, 0x0001 },
2551 { 0x04, 0x0000 },
2552 { 0x03, 0x00a1 },
2553 { 0x02, 0x0008 },
2554 { 0x01, 0x0120 },
2555 { 0x00, 0x1000 },
2556 { 0x04, 0x0800 },
2557 { 0x04, 0x9000 },
2558 { 0x03, 0x802f },
2559 { 0x02, 0x4f02 },
2560 { 0x01, 0x0409 },
2561 { 0x00, 0xf099 },
2562 { 0x04, 0x9800 },
2563 { 0x04, 0xa000 },
2564 { 0x03, 0xdf01 },
2565 { 0x02, 0xdf20 },
2566 { 0x01, 0xff95 },
2567 { 0x00, 0xba00 },
2568 { 0x04, 0xa800 },
2569 { 0x04, 0xf000 },
2570 { 0x03, 0xdf01 },
2571 { 0x02, 0xdf20 },
2572 { 0x01, 0x101a },
2573 { 0x00, 0xa0ff },
2574 { 0x04, 0xf800 },
2575 { 0x04, 0x0000 },
2576 { 0x1f, 0x0000 },
2577
2578 { 0x1f, 0x0001 },
2579 { 0x0b, 0x8480 },
2580 { 0x1f, 0x0000 },
2581
2582 { 0x1f, 0x0001 },
2583 { 0x18, 0x67c7 },
2584 { 0x04, 0x2000 },
2585 { 0x03, 0x002f },
2586 { 0x02, 0x4360 },
2587 { 0x01, 0x0109 },
2588 { 0x00, 0x3022 },
2589 { 0x04, 0x2800 },
2590 { 0x1f, 0x0000 },
2591
2592 { 0x1f, 0x0001 },
2593 { 0x17, 0x0cc0 },
2594 { 0x1f, 0x0000 }
2595 };
2596
2597 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2598 }
2599
2600 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2601 {
2602 static const struct phy_reg phy_reg_init[] = {
2603 { 0x10, 0xf41b },
2604 { 0x1f, 0x0000 }
2605 };
2606
2607 rtl_writephy(tp, 0x1f, 0x0001);
2608 rtl_patchphy(tp, 0x16, 1 << 0);
2609
2610 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2611 }
2612
2613 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2614 {
2615 static const struct phy_reg phy_reg_init[] = {
2616 { 0x1f, 0x0001 },
2617 { 0x10, 0xf41b },
2618 { 0x1f, 0x0000 }
2619 };
2620
2621 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2622 }
2623
2624 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2625 {
2626 static const struct phy_reg phy_reg_init[] = {
2627 { 0x1f, 0x0000 },
2628 { 0x1d, 0x0f00 },
2629 { 0x1f, 0x0002 },
2630 { 0x0c, 0x1ec8 },
2631 { 0x1f, 0x0000 }
2632 };
2633
2634 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2635 }
2636
2637 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2638 {
2639 static const struct phy_reg phy_reg_init[] = {
2640 { 0x1f, 0x0001 },
2641 { 0x1d, 0x3d98 },
2642 { 0x1f, 0x0000 }
2643 };
2644
2645 rtl_writephy(tp, 0x1f, 0x0000);
2646 rtl_patchphy(tp, 0x14, 1 << 5);
2647 rtl_patchphy(tp, 0x0d, 1 << 5);
2648
2649 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2650 }
2651
2652 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2653 {
2654 static const struct phy_reg phy_reg_init[] = {
2655 { 0x1f, 0x0001 },
2656 { 0x12, 0x2300 },
2657 { 0x1f, 0x0002 },
2658 { 0x00, 0x88d4 },
2659 { 0x01, 0x82b1 },
2660 { 0x03, 0x7002 },
2661 { 0x08, 0x9e30 },
2662 { 0x09, 0x01f0 },
2663 { 0x0a, 0x5500 },
2664 { 0x0c, 0x00c8 },
2665 { 0x1f, 0x0003 },
2666 { 0x12, 0xc096 },
2667 { 0x16, 0x000a },
2668 { 0x1f, 0x0000 },
2669 { 0x1f, 0x0000 },
2670 { 0x09, 0x2000 },
2671 { 0x09, 0x0000 }
2672 };
2673
2674 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2675
2676 rtl_patchphy(tp, 0x14, 1 << 5);
2677 rtl_patchphy(tp, 0x0d, 1 << 5);
2678 rtl_writephy(tp, 0x1f, 0x0000);
2679 }
2680
2681 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2682 {
2683 static const struct phy_reg phy_reg_init[] = {
2684 { 0x1f, 0x0001 },
2685 { 0x12, 0x2300 },
2686 { 0x03, 0x802f },
2687 { 0x02, 0x4f02 },
2688 { 0x01, 0x0409 },
2689 { 0x00, 0xf099 },
2690 { 0x04, 0x9800 },
2691 { 0x04, 0x9000 },
2692 { 0x1d, 0x3d98 },
2693 { 0x1f, 0x0002 },
2694 { 0x0c, 0x7eb8 },
2695 { 0x06, 0x0761 },
2696 { 0x1f, 0x0003 },
2697 { 0x16, 0x0f0a },
2698 { 0x1f, 0x0000 }
2699 };
2700
2701 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2702
2703 rtl_patchphy(tp, 0x16, 1 << 0);
2704 rtl_patchphy(tp, 0x14, 1 << 5);
2705 rtl_patchphy(tp, 0x0d, 1 << 5);
2706 rtl_writephy(tp, 0x1f, 0x0000);
2707 }
2708
2709 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2710 {
2711 static const struct phy_reg phy_reg_init[] = {
2712 { 0x1f, 0x0001 },
2713 { 0x12, 0x2300 },
2714 { 0x1d, 0x3d98 },
2715 { 0x1f, 0x0002 },
2716 { 0x0c, 0x7eb8 },
2717 { 0x06, 0x5461 },
2718 { 0x1f, 0x0003 },
2719 { 0x16, 0x0f0a },
2720 { 0x1f, 0x0000 }
2721 };
2722
2723 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2724
2725 rtl_patchphy(tp, 0x16, 1 << 0);
2726 rtl_patchphy(tp, 0x14, 1 << 5);
2727 rtl_patchphy(tp, 0x0d, 1 << 5);
2728 rtl_writephy(tp, 0x1f, 0x0000);
2729 }
2730
2731 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2732 {
2733 rtl8168c_3_hw_phy_config(tp);
2734 }
2735
2736 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2737 {
2738 static const struct phy_reg phy_reg_init_0[] = {
2739 /* Channel Estimation */
2740 { 0x1f, 0x0001 },
2741 { 0x06, 0x4064 },
2742 { 0x07, 0x2863 },
2743 { 0x08, 0x059c },
2744 { 0x09, 0x26b4 },
2745 { 0x0a, 0x6a19 },
2746 { 0x0b, 0xdcc8 },
2747 { 0x10, 0xf06d },
2748 { 0x14, 0x7f68 },
2749 { 0x18, 0x7fd9 },
2750 { 0x1c, 0xf0ff },
2751 { 0x1d, 0x3d9c },
2752 { 0x1f, 0x0003 },
2753 { 0x12, 0xf49f },
2754 { 0x13, 0x070b },
2755 { 0x1a, 0x05ad },
2756 { 0x14, 0x94c0 },
2757
2758 /*
2759 * Tx Error Issue
2760 * Enhance line driver power
2761 */
2762 { 0x1f, 0x0002 },
2763 { 0x06, 0x5561 },
2764 { 0x1f, 0x0005 },
2765 { 0x05, 0x8332 },
2766 { 0x06, 0x5561 },
2767
2768 /*
2769 * Can not link to 1Gbps with bad cable
2770 * Decrease SNR threshold form 21.07dB to 19.04dB
2771 */
2772 { 0x1f, 0x0001 },
2773 { 0x17, 0x0cc0 },
2774
2775 { 0x1f, 0x0000 },
2776 { 0x0d, 0xf880 }
2777 };
2778
2779 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2780
2781 /*
2782 * Rx Error Issue
2783 * Fine Tune Switching regulator parameter
2784 */
2785 rtl_writephy(tp, 0x1f, 0x0002);
2786 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2787 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2788
2789 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2790 static const struct phy_reg phy_reg_init[] = {
2791 { 0x1f, 0x0002 },
2792 { 0x05, 0x669a },
2793 { 0x1f, 0x0005 },
2794 { 0x05, 0x8330 },
2795 { 0x06, 0x669a },
2796 { 0x1f, 0x0002 }
2797 };
2798 int val;
2799
2800 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2801
2802 val = rtl_readphy(tp, 0x0d);
2803
2804 if ((val & 0x00ff) != 0x006c) {
2805 static const u32 set[] = {
2806 0x0065, 0x0066, 0x0067, 0x0068,
2807 0x0069, 0x006a, 0x006b, 0x006c
2808 };
2809 int i;
2810
2811 rtl_writephy(tp, 0x1f, 0x0002);
2812
2813 val &= 0xff00;
2814 for (i = 0; i < ARRAY_SIZE(set); i++)
2815 rtl_writephy(tp, 0x0d, val | set[i]);
2816 }
2817 } else {
2818 static const struct phy_reg phy_reg_init[] = {
2819 { 0x1f, 0x0002 },
2820 { 0x05, 0x6662 },
2821 { 0x1f, 0x0005 },
2822 { 0x05, 0x8330 },
2823 { 0x06, 0x6662 }
2824 };
2825
2826 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2827 }
2828
2829 /* RSET couple improve */
2830 rtl_writephy(tp, 0x1f, 0x0002);
2831 rtl_patchphy(tp, 0x0d, 0x0300);
2832 rtl_patchphy(tp, 0x0f, 0x0010);
2833
2834 /* Fine tune PLL performance */
2835 rtl_writephy(tp, 0x1f, 0x0002);
2836 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2837 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2838
2839 rtl_writephy(tp, 0x1f, 0x0005);
2840 rtl_writephy(tp, 0x05, 0x001b);
2841
2842 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2843
2844 rtl_writephy(tp, 0x1f, 0x0000);
2845 }
2846
2847 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2848 {
2849 static const struct phy_reg phy_reg_init_0[] = {
2850 /* Channel Estimation */
2851 { 0x1f, 0x0001 },
2852 { 0x06, 0x4064 },
2853 { 0x07, 0x2863 },
2854 { 0x08, 0x059c },
2855 { 0x09, 0x26b4 },
2856 { 0x0a, 0x6a19 },
2857 { 0x0b, 0xdcc8 },
2858 { 0x10, 0xf06d },
2859 { 0x14, 0x7f68 },
2860 { 0x18, 0x7fd9 },
2861 { 0x1c, 0xf0ff },
2862 { 0x1d, 0x3d9c },
2863 { 0x1f, 0x0003 },
2864 { 0x12, 0xf49f },
2865 { 0x13, 0x070b },
2866 { 0x1a, 0x05ad },
2867 { 0x14, 0x94c0 },
2868
2869 /*
2870 * Tx Error Issue
2871 * Enhance line driver power
2872 */
2873 { 0x1f, 0x0002 },
2874 { 0x06, 0x5561 },
2875 { 0x1f, 0x0005 },
2876 { 0x05, 0x8332 },
2877 { 0x06, 0x5561 },
2878
2879 /*
2880 * Can not link to 1Gbps with bad cable
2881 * Decrease SNR threshold form 21.07dB to 19.04dB
2882 */
2883 { 0x1f, 0x0001 },
2884 { 0x17, 0x0cc0 },
2885
2886 { 0x1f, 0x0000 },
2887 { 0x0d, 0xf880 }
2888 };
2889
2890 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2891
2892 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2893 static const struct phy_reg phy_reg_init[] = {
2894 { 0x1f, 0x0002 },
2895 { 0x05, 0x669a },
2896 { 0x1f, 0x0005 },
2897 { 0x05, 0x8330 },
2898 { 0x06, 0x669a },
2899
2900 { 0x1f, 0x0002 }
2901 };
2902 int val;
2903
2904 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2905
2906 val = rtl_readphy(tp, 0x0d);
2907 if ((val & 0x00ff) != 0x006c) {
2908 static const u32 set[] = {
2909 0x0065, 0x0066, 0x0067, 0x0068,
2910 0x0069, 0x006a, 0x006b, 0x006c
2911 };
2912 int i;
2913
2914 rtl_writephy(tp, 0x1f, 0x0002);
2915
2916 val &= 0xff00;
2917 for (i = 0; i < ARRAY_SIZE(set); i++)
2918 rtl_writephy(tp, 0x0d, val | set[i]);
2919 }
2920 } else {
2921 static const struct phy_reg phy_reg_init[] = {
2922 { 0x1f, 0x0002 },
2923 { 0x05, 0x2642 },
2924 { 0x1f, 0x0005 },
2925 { 0x05, 0x8330 },
2926 { 0x06, 0x2642 }
2927 };
2928
2929 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2930 }
2931
2932 /* Fine tune PLL performance */
2933 rtl_writephy(tp, 0x1f, 0x0002);
2934 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2935 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2936
2937 /* Switching regulator Slew rate */
2938 rtl_writephy(tp, 0x1f, 0x0002);
2939 rtl_patchphy(tp, 0x0f, 0x0017);
2940
2941 rtl_writephy(tp, 0x1f, 0x0005);
2942 rtl_writephy(tp, 0x05, 0x001b);
2943
2944 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2945
2946 rtl_writephy(tp, 0x1f, 0x0000);
2947 }
2948
2949 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2950 {
2951 static const struct phy_reg phy_reg_init[] = {
2952 { 0x1f, 0x0002 },
2953 { 0x10, 0x0008 },
2954 { 0x0d, 0x006c },
2955
2956 { 0x1f, 0x0000 },
2957 { 0x0d, 0xf880 },
2958
2959 { 0x1f, 0x0001 },
2960 { 0x17, 0x0cc0 },
2961
2962 { 0x1f, 0x0001 },
2963 { 0x0b, 0xa4d8 },
2964 { 0x09, 0x281c },
2965 { 0x07, 0x2883 },
2966 { 0x0a, 0x6b35 },
2967 { 0x1d, 0x3da4 },
2968 { 0x1c, 0xeffd },
2969 { 0x14, 0x7f52 },
2970 { 0x18, 0x7fc6 },
2971 { 0x08, 0x0601 },
2972 { 0x06, 0x4063 },
2973 { 0x10, 0xf074 },
2974 { 0x1f, 0x0003 },
2975 { 0x13, 0x0789 },
2976 { 0x12, 0xf4bd },
2977 { 0x1a, 0x04fd },
2978 { 0x14, 0x84b0 },
2979 { 0x1f, 0x0000 },
2980 { 0x00, 0x9200 },
2981
2982 { 0x1f, 0x0005 },
2983 { 0x01, 0x0340 },
2984 { 0x1f, 0x0001 },
2985 { 0x04, 0x4000 },
2986 { 0x03, 0x1d21 },
2987 { 0x02, 0x0c32 },
2988 { 0x01, 0x0200 },
2989 { 0x00, 0x5554 },
2990 { 0x04, 0x4800 },
2991 { 0x04, 0x4000 },
2992 { 0x04, 0xf000 },
2993 { 0x03, 0xdf01 },
2994 { 0x02, 0xdf20 },
2995 { 0x01, 0x101a },
2996 { 0x00, 0xa0ff },
2997 { 0x04, 0xf800 },
2998 { 0x04, 0xf000 },
2999 { 0x1f, 0x0000 },
3000
3001 { 0x1f, 0x0007 },
3002 { 0x1e, 0x0023 },
3003 { 0x16, 0x0000 },
3004 { 0x1f, 0x0000 }
3005 };
3006
3007 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3008 }
3009
3010 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3011 {
3012 static const struct phy_reg phy_reg_init[] = {
3013 { 0x1f, 0x0001 },
3014 { 0x17, 0x0cc0 },
3015
3016 { 0x1f, 0x0007 },
3017 { 0x1e, 0x002d },
3018 { 0x18, 0x0040 },
3019 { 0x1f, 0x0000 }
3020 };
3021
3022 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3023 rtl_patchphy(tp, 0x0d, 1 << 5);
3024 }
3025
3026 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3027 {
3028 static const struct phy_reg phy_reg_init[] = {
3029 /* Enable Delay cap */
3030 { 0x1f, 0x0005 },
3031 { 0x05, 0x8b80 },
3032 { 0x06, 0xc896 },
3033 { 0x1f, 0x0000 },
3034
3035 /* Channel estimation fine tune */
3036 { 0x1f, 0x0001 },
3037 { 0x0b, 0x6c20 },
3038 { 0x07, 0x2872 },
3039 { 0x1c, 0xefff },
3040 { 0x1f, 0x0003 },
3041 { 0x14, 0x6420 },
3042 { 0x1f, 0x0000 },
3043
3044 /* Update PFM & 10M TX idle timer */
3045 { 0x1f, 0x0007 },
3046 { 0x1e, 0x002f },
3047 { 0x15, 0x1919 },
3048 { 0x1f, 0x0000 },
3049
3050 { 0x1f, 0x0007 },
3051 { 0x1e, 0x00ac },
3052 { 0x18, 0x0006 },
3053 { 0x1f, 0x0000 }
3054 };
3055
3056 rtl_apply_firmware(tp);
3057
3058 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3059
3060 /* DCO enable for 10M IDLE Power */
3061 rtl_writephy(tp, 0x1f, 0x0007);
3062 rtl_writephy(tp, 0x1e, 0x0023);
3063 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3064 rtl_writephy(tp, 0x1f, 0x0000);
3065
3066 /* For impedance matching */
3067 rtl_writephy(tp, 0x1f, 0x0002);
3068 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3069 rtl_writephy(tp, 0x1f, 0x0000);
3070
3071 /* PHY auto speed down */
3072 rtl_writephy(tp, 0x1f, 0x0007);
3073 rtl_writephy(tp, 0x1e, 0x002d);
3074 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3075 rtl_writephy(tp, 0x1f, 0x0000);
3076 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3077
3078 rtl_writephy(tp, 0x1f, 0x0005);
3079 rtl_writephy(tp, 0x05, 0x8b86);
3080 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3081 rtl_writephy(tp, 0x1f, 0x0000);
3082
3083 rtl_writephy(tp, 0x1f, 0x0005);
3084 rtl_writephy(tp, 0x05, 0x8b85);
3085 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3086 rtl_writephy(tp, 0x1f, 0x0007);
3087 rtl_writephy(tp, 0x1e, 0x0020);
3088 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3089 rtl_writephy(tp, 0x1f, 0x0006);
3090 rtl_writephy(tp, 0x00, 0x5a00);
3091 rtl_writephy(tp, 0x1f, 0x0000);
3092 rtl_writephy(tp, 0x0d, 0x0007);
3093 rtl_writephy(tp, 0x0e, 0x003c);
3094 rtl_writephy(tp, 0x0d, 0x4007);
3095 rtl_writephy(tp, 0x0e, 0x0000);
3096 rtl_writephy(tp, 0x0d, 0x0000);
3097 }
3098
3099 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3100 {
3101 static const struct phy_reg phy_reg_init[] = {
3102 /* Enable Delay cap */
3103 { 0x1f, 0x0004 },
3104 { 0x1f, 0x0007 },
3105 { 0x1e, 0x00ac },
3106 { 0x18, 0x0006 },
3107 { 0x1f, 0x0002 },
3108 { 0x1f, 0x0000 },
3109 { 0x1f, 0x0000 },
3110
3111 /* Channel estimation fine tune */
3112 { 0x1f, 0x0003 },
3113 { 0x09, 0xa20f },
3114 { 0x1f, 0x0000 },
3115 { 0x1f, 0x0000 },
3116
3117 /* Green Setting */
3118 { 0x1f, 0x0005 },
3119 { 0x05, 0x8b5b },
3120 { 0x06, 0x9222 },
3121 { 0x05, 0x8b6d },
3122 { 0x06, 0x8000 },
3123 { 0x05, 0x8b76 },
3124 { 0x06, 0x8000 },
3125 { 0x1f, 0x0000 }
3126 };
3127
3128 rtl_apply_firmware(tp);
3129
3130 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3131
3132 /* For 4-corner performance improve */
3133 rtl_writephy(tp, 0x1f, 0x0005);
3134 rtl_writephy(tp, 0x05, 0x8b80);
3135 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3136 rtl_writephy(tp, 0x1f, 0x0000);
3137
3138 /* PHY auto speed down */
3139 rtl_writephy(tp, 0x1f, 0x0004);
3140 rtl_writephy(tp, 0x1f, 0x0007);
3141 rtl_writephy(tp, 0x1e, 0x002d);
3142 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3143 rtl_writephy(tp, 0x1f, 0x0002);
3144 rtl_writephy(tp, 0x1f, 0x0000);
3145 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3146
3147 /* improve 10M EEE waveform */
3148 rtl_writephy(tp, 0x1f, 0x0005);
3149 rtl_writephy(tp, 0x05, 0x8b86);
3150 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3151 rtl_writephy(tp, 0x1f, 0x0000);
3152
3153 /* Improve 2-pair detection performance */
3154 rtl_writephy(tp, 0x1f, 0x0005);
3155 rtl_writephy(tp, 0x05, 0x8b85);
3156 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3157 rtl_writephy(tp, 0x1f, 0x0000);
3158
3159 /* EEE setting */
3160 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3161 rtl_writephy(tp, 0x1f, 0x0005);
3162 rtl_writephy(tp, 0x05, 0x8b85);
3163 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3164 rtl_writephy(tp, 0x1f, 0x0004);
3165 rtl_writephy(tp, 0x1f, 0x0007);
3166 rtl_writephy(tp, 0x1e, 0x0020);
3167 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3168 rtl_writephy(tp, 0x1f, 0x0002);
3169 rtl_writephy(tp, 0x1f, 0x0000);
3170 rtl_writephy(tp, 0x0d, 0x0007);
3171 rtl_writephy(tp, 0x0e, 0x003c);
3172 rtl_writephy(tp, 0x0d, 0x4007);
3173 rtl_writephy(tp, 0x0e, 0x0000);
3174 rtl_writephy(tp, 0x0d, 0x0000);
3175
3176 /* Green feature */
3177 rtl_writephy(tp, 0x1f, 0x0003);
3178 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3179 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3180 rtl_writephy(tp, 0x1f, 0x0000);
3181 }
3182
3183 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3184 {
3185 /* For 4-corner performance improve */
3186 rtl_writephy(tp, 0x1f, 0x0005);
3187 rtl_writephy(tp, 0x05, 0x8b80);
3188 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3189 rtl_writephy(tp, 0x1f, 0x0000);
3190
3191 /* PHY auto speed down */
3192 rtl_writephy(tp, 0x1f, 0x0007);
3193 rtl_writephy(tp, 0x1e, 0x002d);
3194 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3195 rtl_writephy(tp, 0x1f, 0x0000);
3196 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3197
3198 /* Improve 10M EEE waveform */
3199 rtl_writephy(tp, 0x1f, 0x0005);
3200 rtl_writephy(tp, 0x05, 0x8b86);
3201 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3202 rtl_writephy(tp, 0x1f, 0x0000);
3203 }
3204
3205 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3206 {
3207 static const struct phy_reg phy_reg_init[] = {
3208 /* Channel estimation fine tune */
3209 { 0x1f, 0x0003 },
3210 { 0x09, 0xa20f },
3211 { 0x1f, 0x0000 },
3212
3213 /* Modify green table for giga & fnet */
3214 { 0x1f, 0x0005 },
3215 { 0x05, 0x8b55 },
3216 { 0x06, 0x0000 },
3217 { 0x05, 0x8b5e },
3218 { 0x06, 0x0000 },
3219 { 0x05, 0x8b67 },
3220 { 0x06, 0x0000 },
3221 { 0x05, 0x8b70 },
3222 { 0x06, 0x0000 },
3223 { 0x1f, 0x0000 },
3224 { 0x1f, 0x0007 },
3225 { 0x1e, 0x0078 },
3226 { 0x17, 0x0000 },
3227 { 0x19, 0x00fb },
3228 { 0x1f, 0x0000 },
3229
3230 /* Modify green table for 10M */
3231 { 0x1f, 0x0005 },
3232 { 0x05, 0x8b79 },
3233 { 0x06, 0xaa00 },
3234 { 0x1f, 0x0000 },
3235
3236 /* Disable hiimpedance detection (RTCT) */
3237 { 0x1f, 0x0003 },
3238 { 0x01, 0x328a },
3239 { 0x1f, 0x0000 }
3240 };
3241
3242 rtl_apply_firmware(tp);
3243
3244 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3245
3246 rtl8168f_hw_phy_config(tp);
3247
3248 /* Improve 2-pair detection performance */
3249 rtl_writephy(tp, 0x1f, 0x0005);
3250 rtl_writephy(tp, 0x05, 0x8b85);
3251 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3252 rtl_writephy(tp, 0x1f, 0x0000);
3253 }
3254
3255 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3256 {
3257 rtl_apply_firmware(tp);
3258
3259 rtl8168f_hw_phy_config(tp);
3260 }
3261
3262 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3263 {
3264 static const struct phy_reg phy_reg_init[] = {
3265 /* Channel estimation fine tune */
3266 { 0x1f, 0x0003 },
3267 { 0x09, 0xa20f },
3268 { 0x1f, 0x0000 },
3269
3270 /* Modify green table for giga & fnet */
3271 { 0x1f, 0x0005 },
3272 { 0x05, 0x8b55 },
3273 { 0x06, 0x0000 },
3274 { 0x05, 0x8b5e },
3275 { 0x06, 0x0000 },
3276 { 0x05, 0x8b67 },
3277 { 0x06, 0x0000 },
3278 { 0x05, 0x8b70 },
3279 { 0x06, 0x0000 },
3280 { 0x1f, 0x0000 },
3281 { 0x1f, 0x0007 },
3282 { 0x1e, 0x0078 },
3283 { 0x17, 0x0000 },
3284 { 0x19, 0x00aa },
3285 { 0x1f, 0x0000 },
3286
3287 /* Modify green table for 10M */
3288 { 0x1f, 0x0005 },
3289 { 0x05, 0x8b79 },
3290 { 0x06, 0xaa00 },
3291 { 0x1f, 0x0000 },
3292
3293 /* Disable hiimpedance detection (RTCT) */
3294 { 0x1f, 0x0003 },
3295 { 0x01, 0x328a },
3296 { 0x1f, 0x0000 }
3297 };
3298
3299
3300 rtl_apply_firmware(tp);
3301
3302 rtl8168f_hw_phy_config(tp);
3303
3304 /* Improve 2-pair detection performance */
3305 rtl_writephy(tp, 0x1f, 0x0005);
3306 rtl_writephy(tp, 0x05, 0x8b85);
3307 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3308 rtl_writephy(tp, 0x1f, 0x0000);
3309
3310 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3311
3312 /* Modify green table for giga */
3313 rtl_writephy(tp, 0x1f, 0x0005);
3314 rtl_writephy(tp, 0x05, 0x8b54);
3315 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3316 rtl_writephy(tp, 0x05, 0x8b5d);
3317 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3318 rtl_writephy(tp, 0x05, 0x8a7c);
3319 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3320 rtl_writephy(tp, 0x05, 0x8a7f);
3321 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3322 rtl_writephy(tp, 0x05, 0x8a82);
3323 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3324 rtl_writephy(tp, 0x05, 0x8a85);
3325 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3326 rtl_writephy(tp, 0x05, 0x8a88);
3327 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3328 rtl_writephy(tp, 0x1f, 0x0000);
3329
3330 /* uc same-seed solution */
3331 rtl_writephy(tp, 0x1f, 0x0005);
3332 rtl_writephy(tp, 0x05, 0x8b85);
3333 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3334 rtl_writephy(tp, 0x1f, 0x0000);
3335
3336 /* eee setting */
3337 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3338 rtl_writephy(tp, 0x1f, 0x0005);
3339 rtl_writephy(tp, 0x05, 0x8b85);
3340 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3341 rtl_writephy(tp, 0x1f, 0x0004);
3342 rtl_writephy(tp, 0x1f, 0x0007);
3343 rtl_writephy(tp, 0x1e, 0x0020);
3344 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3345 rtl_writephy(tp, 0x1f, 0x0000);
3346 rtl_writephy(tp, 0x0d, 0x0007);
3347 rtl_writephy(tp, 0x0e, 0x003c);
3348 rtl_writephy(tp, 0x0d, 0x4007);
3349 rtl_writephy(tp, 0x0e, 0x0000);
3350 rtl_writephy(tp, 0x0d, 0x0000);
3351
3352 /* Green feature */
3353 rtl_writephy(tp, 0x1f, 0x0003);
3354 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3355 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3356 rtl_writephy(tp, 0x1f, 0x0000);
3357 }
3358
3359 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3360 {
3361 static const u16 mac_ocp_patch[] = {
3362 0xe008, 0xe01b, 0xe01d, 0xe01f,
3363 0xe021, 0xe023, 0xe025, 0xe027,
3364 0x49d2, 0xf10d, 0x766c, 0x49e2,
3365 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3366
3367 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3368 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3369 0xbe00, 0xb416, 0x0076, 0xe86c,
3370 0xc602, 0xbe00, 0x0000, 0xc602,
3371
3372 0xbe00, 0x0000, 0xc602, 0xbe00,
3373 0x0000, 0xc602, 0xbe00, 0x0000,
3374 0xc602, 0xbe00, 0x0000, 0xc602,
3375 0xbe00, 0x0000, 0xc602, 0xbe00,
3376
3377 0x0000, 0x0000, 0x0000, 0x0000
3378 };
3379 u32 i;
3380
3381 /* Patch code for GPHY reset */
3382 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3383 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3384 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3385 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3386
3387 rtl_apply_firmware(tp);
3388
3389 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3390 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3391 else
3392 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3393
3394 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3395 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3396 else
3397 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3398
3399 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3400 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3401
3402 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3403 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3404
3405 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3406 }
3407
3408 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3409 {
3410 static const struct phy_reg phy_reg_init[] = {
3411 { 0x1f, 0x0003 },
3412 { 0x08, 0x441d },
3413 { 0x01, 0x9100 },
3414 { 0x1f, 0x0000 }
3415 };
3416
3417 rtl_writephy(tp, 0x1f, 0x0000);
3418 rtl_patchphy(tp, 0x11, 1 << 12);
3419 rtl_patchphy(tp, 0x19, 1 << 13);
3420 rtl_patchphy(tp, 0x10, 1 << 15);
3421
3422 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3423 }
3424
3425 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3426 {
3427 static const struct phy_reg phy_reg_init[] = {
3428 { 0x1f, 0x0005 },
3429 { 0x1a, 0x0000 },
3430 { 0x1f, 0x0000 },
3431
3432 { 0x1f, 0x0004 },
3433 { 0x1c, 0x0000 },
3434 { 0x1f, 0x0000 },
3435
3436 { 0x1f, 0x0001 },
3437 { 0x15, 0x7701 },
3438 { 0x1f, 0x0000 }
3439 };
3440
3441 /* Disable ALDPS before ram code */
3442 rtl_writephy(tp, 0x1f, 0x0000);
3443 rtl_writephy(tp, 0x18, 0x0310);
3444 msleep(100);
3445
3446 rtl_apply_firmware(tp);
3447
3448 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3449 }
3450
3451 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3452 {
3453 /* Disable ALDPS before setting firmware */
3454 rtl_writephy(tp, 0x1f, 0x0000);
3455 rtl_writephy(tp, 0x18, 0x0310);
3456 msleep(20);
3457
3458 rtl_apply_firmware(tp);
3459
3460 /* EEE setting */
3461 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3462 rtl_writephy(tp, 0x1f, 0x0004);
3463 rtl_writephy(tp, 0x10, 0x401f);
3464 rtl_writephy(tp, 0x19, 0x7030);
3465 rtl_writephy(tp, 0x1f, 0x0000);
3466 }
3467
3468 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3469 {
3470 static const struct phy_reg phy_reg_init[] = {
3471 { 0x1f, 0x0004 },
3472 { 0x10, 0xc07f },
3473 { 0x19, 0x7030 },
3474 { 0x1f, 0x0000 }
3475 };
3476
3477 /* Disable ALDPS before ram code */
3478 rtl_writephy(tp, 0x1f, 0x0000);
3479 rtl_writephy(tp, 0x18, 0x0310);
3480 msleep(100);
3481
3482 rtl_apply_firmware(tp);
3483
3484 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3485 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3486
3487 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3488 }
3489
3490 static void rtl_hw_phy_config(struct net_device *dev)
3491 {
3492 struct rtl8169_private *tp = netdev_priv(dev);
3493
3494 rtl8169_print_mac_version(tp);
3495
3496 switch (tp->mac_version) {
3497 case RTL_GIGA_MAC_VER_01:
3498 break;
3499 case RTL_GIGA_MAC_VER_02:
3500 case RTL_GIGA_MAC_VER_03:
3501 rtl8169s_hw_phy_config(tp);
3502 break;
3503 case RTL_GIGA_MAC_VER_04:
3504 rtl8169sb_hw_phy_config(tp);
3505 break;
3506 case RTL_GIGA_MAC_VER_05:
3507 rtl8169scd_hw_phy_config(tp);
3508 break;
3509 case RTL_GIGA_MAC_VER_06:
3510 rtl8169sce_hw_phy_config(tp);
3511 break;
3512 case RTL_GIGA_MAC_VER_07:
3513 case RTL_GIGA_MAC_VER_08:
3514 case RTL_GIGA_MAC_VER_09:
3515 rtl8102e_hw_phy_config(tp);
3516 break;
3517 case RTL_GIGA_MAC_VER_11:
3518 rtl8168bb_hw_phy_config(tp);
3519 break;
3520 case RTL_GIGA_MAC_VER_12:
3521 rtl8168bef_hw_phy_config(tp);
3522 break;
3523 case RTL_GIGA_MAC_VER_17:
3524 rtl8168bef_hw_phy_config(tp);
3525 break;
3526 case RTL_GIGA_MAC_VER_18:
3527 rtl8168cp_1_hw_phy_config(tp);
3528 break;
3529 case RTL_GIGA_MAC_VER_19:
3530 rtl8168c_1_hw_phy_config(tp);
3531 break;
3532 case RTL_GIGA_MAC_VER_20:
3533 rtl8168c_2_hw_phy_config(tp);
3534 break;
3535 case RTL_GIGA_MAC_VER_21:
3536 rtl8168c_3_hw_phy_config(tp);
3537 break;
3538 case RTL_GIGA_MAC_VER_22:
3539 rtl8168c_4_hw_phy_config(tp);
3540 break;
3541 case RTL_GIGA_MAC_VER_23:
3542 case RTL_GIGA_MAC_VER_24:
3543 rtl8168cp_2_hw_phy_config(tp);
3544 break;
3545 case RTL_GIGA_MAC_VER_25:
3546 rtl8168d_1_hw_phy_config(tp);
3547 break;
3548 case RTL_GIGA_MAC_VER_26:
3549 rtl8168d_2_hw_phy_config(tp);
3550 break;
3551 case RTL_GIGA_MAC_VER_27:
3552 rtl8168d_3_hw_phy_config(tp);
3553 break;
3554 case RTL_GIGA_MAC_VER_28:
3555 rtl8168d_4_hw_phy_config(tp);
3556 break;
3557 case RTL_GIGA_MAC_VER_29:
3558 case RTL_GIGA_MAC_VER_30:
3559 rtl8105e_hw_phy_config(tp);
3560 break;
3561 case RTL_GIGA_MAC_VER_31:
3562 /* None. */
3563 break;
3564 case RTL_GIGA_MAC_VER_32:
3565 case RTL_GIGA_MAC_VER_33:
3566 rtl8168e_1_hw_phy_config(tp);
3567 break;
3568 case RTL_GIGA_MAC_VER_34:
3569 rtl8168e_2_hw_phy_config(tp);
3570 break;
3571 case RTL_GIGA_MAC_VER_35:
3572 rtl8168f_1_hw_phy_config(tp);
3573 break;
3574 case RTL_GIGA_MAC_VER_36:
3575 rtl8168f_2_hw_phy_config(tp);
3576 break;
3577
3578 case RTL_GIGA_MAC_VER_37:
3579 rtl8402_hw_phy_config(tp);
3580 break;
3581
3582 case RTL_GIGA_MAC_VER_38:
3583 rtl8411_hw_phy_config(tp);
3584 break;
3585
3586 case RTL_GIGA_MAC_VER_39:
3587 rtl8106e_hw_phy_config(tp);
3588 break;
3589
3590 case RTL_GIGA_MAC_VER_40:
3591 rtl8168g_1_hw_phy_config(tp);
3592 break;
3593
3594 case RTL_GIGA_MAC_VER_41:
3595 default:
3596 break;
3597 }
3598 }
3599
3600 static void rtl_phy_work(struct rtl8169_private *tp)
3601 {
3602 struct timer_list *timer = &tp->timer;
3603 void __iomem *ioaddr = tp->mmio_addr;
3604 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3605
3606 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3607
3608 if (tp->phy_reset_pending(tp)) {
3609 /*
3610 * A busy loop could burn quite a few cycles on nowadays CPU.
3611 * Let's delay the execution of the timer for a few ticks.
3612 */
3613 timeout = HZ/10;
3614 goto out_mod_timer;
3615 }
3616
3617 if (tp->link_ok(ioaddr))
3618 return;
3619
3620 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3621
3622 tp->phy_reset_enable(tp);
3623
3624 out_mod_timer:
3625 mod_timer(timer, jiffies + timeout);
3626 }
3627
3628 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3629 {
3630 if (!test_and_set_bit(flag, tp->wk.flags))
3631 schedule_work(&tp->wk.work);
3632 }
3633
3634 static void rtl8169_phy_timer(unsigned long __opaque)
3635 {
3636 struct net_device *dev = (struct net_device *)__opaque;
3637 struct rtl8169_private *tp = netdev_priv(dev);
3638
3639 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3640 }
3641
3642 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3643 void __iomem *ioaddr)
3644 {
3645 iounmap(ioaddr);
3646 pci_release_regions(pdev);
3647 pci_clear_mwi(pdev);
3648 pci_disable_device(pdev);
3649 free_netdev(dev);
3650 }
3651
3652 DECLARE_RTL_COND(rtl_phy_reset_cond)
3653 {
3654 return tp->phy_reset_pending(tp);
3655 }
3656
3657 static void rtl8169_phy_reset(struct net_device *dev,
3658 struct rtl8169_private *tp)
3659 {
3660 tp->phy_reset_enable(tp);
3661 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3662 }
3663
3664 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3665 {
3666 void __iomem *ioaddr = tp->mmio_addr;
3667
3668 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3669 (RTL_R8(PHYstatus) & TBI_Enable);
3670 }
3671
3672 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3673 {
3674 void __iomem *ioaddr = tp->mmio_addr;
3675
3676 rtl_hw_phy_config(dev);
3677
3678 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3679 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3680 RTL_W8(0x82, 0x01);
3681 }
3682
3683 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3684
3685 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3686 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3687
3688 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3689 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3690 RTL_W8(0x82, 0x01);
3691 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3692 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3693 }
3694
3695 rtl8169_phy_reset(dev, tp);
3696
3697 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3698 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3699 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3700 (tp->mii.supports_gmii ?
3701 ADVERTISED_1000baseT_Half |
3702 ADVERTISED_1000baseT_Full : 0));
3703
3704 if (rtl_tbi_enabled(tp))
3705 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3706 }
3707
3708 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3709 {
3710 void __iomem *ioaddr = tp->mmio_addr;
3711 u32 high;
3712 u32 low;
3713
3714 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3715 high = addr[4] | (addr[5] << 8);
3716
3717 rtl_lock_work(tp);
3718
3719 RTL_W8(Cfg9346, Cfg9346_Unlock);
3720
3721 RTL_W32(MAC4, high);
3722 RTL_R32(MAC4);
3723
3724 RTL_W32(MAC0, low);
3725 RTL_R32(MAC0);
3726
3727 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3728 const struct exgmac_reg e[] = {
3729 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3730 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3731 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3732 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3733 low >> 16 },
3734 };
3735
3736 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3737 }
3738
3739 RTL_W8(Cfg9346, Cfg9346_Lock);
3740
3741 rtl_unlock_work(tp);
3742 }
3743
3744 static int rtl_set_mac_address(struct net_device *dev, void *p)
3745 {
3746 struct rtl8169_private *tp = netdev_priv(dev);
3747 struct sockaddr *addr = p;
3748
3749 if (!is_valid_ether_addr(addr->sa_data))
3750 return -EADDRNOTAVAIL;
3751
3752 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3753
3754 rtl_rar_set(tp, dev->dev_addr);
3755
3756 return 0;
3757 }
3758
3759 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3760 {
3761 struct rtl8169_private *tp = netdev_priv(dev);
3762 struct mii_ioctl_data *data = if_mii(ifr);
3763
3764 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3765 }
3766
3767 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3768 struct mii_ioctl_data *data, int cmd)
3769 {
3770 switch (cmd) {
3771 case SIOCGMIIPHY:
3772 data->phy_id = 32; /* Internal PHY */
3773 return 0;
3774
3775 case SIOCGMIIREG:
3776 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3777 return 0;
3778
3779 case SIOCSMIIREG:
3780 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3781 return 0;
3782 }
3783 return -EOPNOTSUPP;
3784 }
3785
3786 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3787 {
3788 return -EOPNOTSUPP;
3789 }
3790
3791 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3792 {
3793 if (tp->features & RTL_FEATURE_MSI) {
3794 pci_disable_msi(pdev);
3795 tp->features &= ~RTL_FEATURE_MSI;
3796 }
3797 }
3798
3799 static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3800 {
3801 struct mdio_ops *ops = &tp->mdio_ops;
3802
3803 switch (tp->mac_version) {
3804 case RTL_GIGA_MAC_VER_27:
3805 ops->write = r8168dp_1_mdio_write;
3806 ops->read = r8168dp_1_mdio_read;
3807 break;
3808 case RTL_GIGA_MAC_VER_28:
3809 case RTL_GIGA_MAC_VER_31:
3810 ops->write = r8168dp_2_mdio_write;
3811 ops->read = r8168dp_2_mdio_read;
3812 break;
3813 case RTL_GIGA_MAC_VER_40:
3814 case RTL_GIGA_MAC_VER_41:
3815 ops->write = r8168g_mdio_write;
3816 ops->read = r8168g_mdio_read;
3817 break;
3818 default:
3819 ops->write = r8169_mdio_write;
3820 ops->read = r8169_mdio_read;
3821 break;
3822 }
3823 }
3824
3825 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3826 {
3827 void __iomem *ioaddr = tp->mmio_addr;
3828
3829 switch (tp->mac_version) {
3830 case RTL_GIGA_MAC_VER_29:
3831 case RTL_GIGA_MAC_VER_30:
3832 case RTL_GIGA_MAC_VER_32:
3833 case RTL_GIGA_MAC_VER_33:
3834 case RTL_GIGA_MAC_VER_34:
3835 case RTL_GIGA_MAC_VER_37:
3836 case RTL_GIGA_MAC_VER_38:
3837 case RTL_GIGA_MAC_VER_39:
3838 case RTL_GIGA_MAC_VER_40:
3839 case RTL_GIGA_MAC_VER_41:
3840 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3841 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3842 break;
3843 default:
3844 break;
3845 }
3846 }
3847
3848 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3849 {
3850 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3851 return false;
3852
3853 rtl_writephy(tp, 0x1f, 0x0000);
3854 rtl_writephy(tp, MII_BMCR, 0x0000);
3855
3856 rtl_wol_suspend_quirk(tp);
3857
3858 return true;
3859 }
3860
3861 static void r810x_phy_power_down(struct rtl8169_private *tp)
3862 {
3863 rtl_writephy(tp, 0x1f, 0x0000);
3864 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3865 }
3866
3867 static void r810x_phy_power_up(struct rtl8169_private *tp)
3868 {
3869 rtl_writephy(tp, 0x1f, 0x0000);
3870 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3871 }
3872
3873 static void r810x_pll_power_down(struct rtl8169_private *tp)
3874 {
3875 void __iomem *ioaddr = tp->mmio_addr;
3876
3877 if (rtl_wol_pll_power_down(tp))
3878 return;
3879
3880 r810x_phy_power_down(tp);
3881
3882 switch (tp->mac_version) {
3883 case RTL_GIGA_MAC_VER_07:
3884 case RTL_GIGA_MAC_VER_08:
3885 case RTL_GIGA_MAC_VER_09:
3886 case RTL_GIGA_MAC_VER_10:
3887 case RTL_GIGA_MAC_VER_13:
3888 case RTL_GIGA_MAC_VER_16:
3889 break;
3890 default:
3891 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3892 break;
3893 }
3894 }
3895
3896 static void r810x_pll_power_up(struct rtl8169_private *tp)
3897 {
3898 void __iomem *ioaddr = tp->mmio_addr;
3899
3900 r810x_phy_power_up(tp);
3901
3902 switch (tp->mac_version) {
3903 case RTL_GIGA_MAC_VER_07:
3904 case RTL_GIGA_MAC_VER_08:
3905 case RTL_GIGA_MAC_VER_09:
3906 case RTL_GIGA_MAC_VER_10:
3907 case RTL_GIGA_MAC_VER_13:
3908 case RTL_GIGA_MAC_VER_16:
3909 break;
3910 default:
3911 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3912 break;
3913 }
3914 }
3915
3916 static void r8168_phy_power_up(struct rtl8169_private *tp)
3917 {
3918 rtl_writephy(tp, 0x1f, 0x0000);
3919 switch (tp->mac_version) {
3920 case RTL_GIGA_MAC_VER_11:
3921 case RTL_GIGA_MAC_VER_12:
3922 case RTL_GIGA_MAC_VER_17:
3923 case RTL_GIGA_MAC_VER_18:
3924 case RTL_GIGA_MAC_VER_19:
3925 case RTL_GIGA_MAC_VER_20:
3926 case RTL_GIGA_MAC_VER_21:
3927 case RTL_GIGA_MAC_VER_22:
3928 case RTL_GIGA_MAC_VER_23:
3929 case RTL_GIGA_MAC_VER_24:
3930 case RTL_GIGA_MAC_VER_25:
3931 case RTL_GIGA_MAC_VER_26:
3932 case RTL_GIGA_MAC_VER_27:
3933 case RTL_GIGA_MAC_VER_28:
3934 case RTL_GIGA_MAC_VER_31:
3935 rtl_writephy(tp, 0x0e, 0x0000);
3936 break;
3937 default:
3938 break;
3939 }
3940 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3941 }
3942
3943 static void r8168_phy_power_down(struct rtl8169_private *tp)
3944 {
3945 rtl_writephy(tp, 0x1f, 0x0000);
3946 switch (tp->mac_version) {
3947 case RTL_GIGA_MAC_VER_32:
3948 case RTL_GIGA_MAC_VER_33:
3949 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3950 break;
3951
3952 case RTL_GIGA_MAC_VER_11:
3953 case RTL_GIGA_MAC_VER_12:
3954 case RTL_GIGA_MAC_VER_17:
3955 case RTL_GIGA_MAC_VER_18:
3956 case RTL_GIGA_MAC_VER_19:
3957 case RTL_GIGA_MAC_VER_20:
3958 case RTL_GIGA_MAC_VER_21:
3959 case RTL_GIGA_MAC_VER_22:
3960 case RTL_GIGA_MAC_VER_23:
3961 case RTL_GIGA_MAC_VER_24:
3962 case RTL_GIGA_MAC_VER_25:
3963 case RTL_GIGA_MAC_VER_26:
3964 case RTL_GIGA_MAC_VER_27:
3965 case RTL_GIGA_MAC_VER_28:
3966 case RTL_GIGA_MAC_VER_31:
3967 rtl_writephy(tp, 0x0e, 0x0200);
3968 default:
3969 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3970 break;
3971 }
3972 }
3973
3974 static void r8168_pll_power_down(struct rtl8169_private *tp)
3975 {
3976 void __iomem *ioaddr = tp->mmio_addr;
3977
3978 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3979 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3980 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3981 r8168dp_check_dash(tp)) {
3982 return;
3983 }
3984
3985 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3986 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3987 (RTL_R16(CPlusCmd) & ASF)) {
3988 return;
3989 }
3990
3991 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3992 tp->mac_version == RTL_GIGA_MAC_VER_33)
3993 rtl_ephy_write(tp, 0x19, 0xff64);
3994
3995 if (rtl_wol_pll_power_down(tp))
3996 return;
3997
3998 r8168_phy_power_down(tp);
3999
4000 switch (tp->mac_version) {
4001 case RTL_GIGA_MAC_VER_25:
4002 case RTL_GIGA_MAC_VER_26:
4003 case RTL_GIGA_MAC_VER_27:
4004 case RTL_GIGA_MAC_VER_28:
4005 case RTL_GIGA_MAC_VER_31:
4006 case RTL_GIGA_MAC_VER_32:
4007 case RTL_GIGA_MAC_VER_33:
4008 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4009 break;
4010 }
4011 }
4012
4013 static void r8168_pll_power_up(struct rtl8169_private *tp)
4014 {
4015 void __iomem *ioaddr = tp->mmio_addr;
4016
4017 switch (tp->mac_version) {
4018 case RTL_GIGA_MAC_VER_25:
4019 case RTL_GIGA_MAC_VER_26:
4020 case RTL_GIGA_MAC_VER_27:
4021 case RTL_GIGA_MAC_VER_28:
4022 case RTL_GIGA_MAC_VER_31:
4023 case RTL_GIGA_MAC_VER_32:
4024 case RTL_GIGA_MAC_VER_33:
4025 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4026 break;
4027 }
4028
4029 r8168_phy_power_up(tp);
4030 }
4031
4032 static void rtl_generic_op(struct rtl8169_private *tp,
4033 void (*op)(struct rtl8169_private *))
4034 {
4035 if (op)
4036 op(tp);
4037 }
4038
4039 static void rtl_pll_power_down(struct rtl8169_private *tp)
4040 {
4041 rtl_generic_op(tp, tp->pll_power_ops.down);
4042 }
4043
4044 static void rtl_pll_power_up(struct rtl8169_private *tp)
4045 {
4046 rtl_generic_op(tp, tp->pll_power_ops.up);
4047 }
4048
4049 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
4050 {
4051 struct pll_power_ops *ops = &tp->pll_power_ops;
4052
4053 switch (tp->mac_version) {
4054 case RTL_GIGA_MAC_VER_07:
4055 case RTL_GIGA_MAC_VER_08:
4056 case RTL_GIGA_MAC_VER_09:
4057 case RTL_GIGA_MAC_VER_10:
4058 case RTL_GIGA_MAC_VER_16:
4059 case RTL_GIGA_MAC_VER_29:
4060 case RTL_GIGA_MAC_VER_30:
4061 case RTL_GIGA_MAC_VER_37:
4062 case RTL_GIGA_MAC_VER_39:
4063 ops->down = r810x_pll_power_down;
4064 ops->up = r810x_pll_power_up;
4065 break;
4066
4067 case RTL_GIGA_MAC_VER_11:
4068 case RTL_GIGA_MAC_VER_12:
4069 case RTL_GIGA_MAC_VER_17:
4070 case RTL_GIGA_MAC_VER_18:
4071 case RTL_GIGA_MAC_VER_19:
4072 case RTL_GIGA_MAC_VER_20:
4073 case RTL_GIGA_MAC_VER_21:
4074 case RTL_GIGA_MAC_VER_22:
4075 case RTL_GIGA_MAC_VER_23:
4076 case RTL_GIGA_MAC_VER_24:
4077 case RTL_GIGA_MAC_VER_25:
4078 case RTL_GIGA_MAC_VER_26:
4079 case RTL_GIGA_MAC_VER_27:
4080 case RTL_GIGA_MAC_VER_28:
4081 case RTL_GIGA_MAC_VER_31:
4082 case RTL_GIGA_MAC_VER_32:
4083 case RTL_GIGA_MAC_VER_33:
4084 case RTL_GIGA_MAC_VER_34:
4085 case RTL_GIGA_MAC_VER_35:
4086 case RTL_GIGA_MAC_VER_36:
4087 case RTL_GIGA_MAC_VER_38:
4088 case RTL_GIGA_MAC_VER_40:
4089 case RTL_GIGA_MAC_VER_41:
4090 ops->down = r8168_pll_power_down;
4091 ops->up = r8168_pll_power_up;
4092 break;
4093
4094 default:
4095 ops->down = NULL;
4096 ops->up = NULL;
4097 break;
4098 }
4099 }
4100
4101 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4102 {
4103 void __iomem *ioaddr = tp->mmio_addr;
4104
4105 switch (tp->mac_version) {
4106 case RTL_GIGA_MAC_VER_01:
4107 case RTL_GIGA_MAC_VER_02:
4108 case RTL_GIGA_MAC_VER_03:
4109 case RTL_GIGA_MAC_VER_04:
4110 case RTL_GIGA_MAC_VER_05:
4111 case RTL_GIGA_MAC_VER_06:
4112 case RTL_GIGA_MAC_VER_10:
4113 case RTL_GIGA_MAC_VER_11:
4114 case RTL_GIGA_MAC_VER_12:
4115 case RTL_GIGA_MAC_VER_13:
4116 case RTL_GIGA_MAC_VER_14:
4117 case RTL_GIGA_MAC_VER_15:
4118 case RTL_GIGA_MAC_VER_16:
4119 case RTL_GIGA_MAC_VER_17:
4120 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4121 break;
4122 case RTL_GIGA_MAC_VER_18:
4123 case RTL_GIGA_MAC_VER_19:
4124 case RTL_GIGA_MAC_VER_20:
4125 case RTL_GIGA_MAC_VER_21:
4126 case RTL_GIGA_MAC_VER_22:
4127 case RTL_GIGA_MAC_VER_23:
4128 case RTL_GIGA_MAC_VER_24:
4129 case RTL_GIGA_MAC_VER_34:
4130 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4131 break;
4132 default:
4133 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4134 break;
4135 }
4136 }
4137
4138 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4139 {
4140 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4141 }
4142
4143 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4144 {
4145 void __iomem *ioaddr = tp->mmio_addr;
4146
4147 RTL_W8(Cfg9346, Cfg9346_Unlock);
4148 rtl_generic_op(tp, tp->jumbo_ops.enable);
4149 RTL_W8(Cfg9346, Cfg9346_Lock);
4150 }
4151
4152 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4153 {
4154 void __iomem *ioaddr = tp->mmio_addr;
4155
4156 RTL_W8(Cfg9346, Cfg9346_Unlock);
4157 rtl_generic_op(tp, tp->jumbo_ops.disable);
4158 RTL_W8(Cfg9346, Cfg9346_Lock);
4159 }
4160
4161 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4162 {
4163 void __iomem *ioaddr = tp->mmio_addr;
4164
4165 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4166 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4167 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4168 }
4169
4170 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4171 {
4172 void __iomem *ioaddr = tp->mmio_addr;
4173
4174 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4175 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4176 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4177 }
4178
4179 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4180 {
4181 void __iomem *ioaddr = tp->mmio_addr;
4182
4183 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4184 }
4185
4186 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4187 {
4188 void __iomem *ioaddr = tp->mmio_addr;
4189
4190 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4191 }
4192
4193 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4194 {
4195 void __iomem *ioaddr = tp->mmio_addr;
4196
4197 RTL_W8(MaxTxPacketSize, 0x3f);
4198 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4199 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4200 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4201 }
4202
4203 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4204 {
4205 void __iomem *ioaddr = tp->mmio_addr;
4206
4207 RTL_W8(MaxTxPacketSize, 0x0c);
4208 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4209 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4210 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4211 }
4212
4213 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4214 {
4215 rtl_tx_performance_tweak(tp->pci_dev,
4216 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4217 }
4218
4219 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4220 {
4221 rtl_tx_performance_tweak(tp->pci_dev,
4222 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4223 }
4224
4225 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4226 {
4227 void __iomem *ioaddr = tp->mmio_addr;
4228
4229 r8168b_0_hw_jumbo_enable(tp);
4230
4231 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4232 }
4233
4234 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4235 {
4236 void __iomem *ioaddr = tp->mmio_addr;
4237
4238 r8168b_0_hw_jumbo_disable(tp);
4239
4240 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4241 }
4242
4243 static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4244 {
4245 struct jumbo_ops *ops = &tp->jumbo_ops;
4246
4247 switch (tp->mac_version) {
4248 case RTL_GIGA_MAC_VER_11:
4249 ops->disable = r8168b_0_hw_jumbo_disable;
4250 ops->enable = r8168b_0_hw_jumbo_enable;
4251 break;
4252 case RTL_GIGA_MAC_VER_12:
4253 case RTL_GIGA_MAC_VER_17:
4254 ops->disable = r8168b_1_hw_jumbo_disable;
4255 ops->enable = r8168b_1_hw_jumbo_enable;
4256 break;
4257 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4258 case RTL_GIGA_MAC_VER_19:
4259 case RTL_GIGA_MAC_VER_20:
4260 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4261 case RTL_GIGA_MAC_VER_22:
4262 case RTL_GIGA_MAC_VER_23:
4263 case RTL_GIGA_MAC_VER_24:
4264 case RTL_GIGA_MAC_VER_25:
4265 case RTL_GIGA_MAC_VER_26:
4266 ops->disable = r8168c_hw_jumbo_disable;
4267 ops->enable = r8168c_hw_jumbo_enable;
4268 break;
4269 case RTL_GIGA_MAC_VER_27:
4270 case RTL_GIGA_MAC_VER_28:
4271 ops->disable = r8168dp_hw_jumbo_disable;
4272 ops->enable = r8168dp_hw_jumbo_enable;
4273 break;
4274 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4275 case RTL_GIGA_MAC_VER_32:
4276 case RTL_GIGA_MAC_VER_33:
4277 case RTL_GIGA_MAC_VER_34:
4278 ops->disable = r8168e_hw_jumbo_disable;
4279 ops->enable = r8168e_hw_jumbo_enable;
4280 break;
4281
4282 /*
4283 * No action needed for jumbo frames with 8169.
4284 * No jumbo for 810x at all.
4285 */
4286 case RTL_GIGA_MAC_VER_40:
4287 case RTL_GIGA_MAC_VER_41:
4288 default:
4289 ops->disable = NULL;
4290 ops->enable = NULL;
4291 break;
4292 }
4293 }
4294
4295 DECLARE_RTL_COND(rtl_chipcmd_cond)
4296 {
4297 void __iomem *ioaddr = tp->mmio_addr;
4298
4299 return RTL_R8(ChipCmd) & CmdReset;
4300 }
4301
4302 static void rtl_hw_reset(struct rtl8169_private *tp)
4303 {
4304 void __iomem *ioaddr = tp->mmio_addr;
4305
4306 RTL_W8(ChipCmd, CmdReset);
4307
4308 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4309 }
4310
4311 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4312 {
4313 struct rtl_fw *rtl_fw;
4314 const char *name;
4315 int rc = -ENOMEM;
4316
4317 name = rtl_lookup_firmware_name(tp);
4318 if (!name)
4319 goto out_no_firmware;
4320
4321 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4322 if (!rtl_fw)
4323 goto err_warn;
4324
4325 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4326 if (rc < 0)
4327 goto err_free;
4328
4329 rc = rtl_check_firmware(tp, rtl_fw);
4330 if (rc < 0)
4331 goto err_release_firmware;
4332
4333 tp->rtl_fw = rtl_fw;
4334 out:
4335 return;
4336
4337 err_release_firmware:
4338 release_firmware(rtl_fw->fw);
4339 err_free:
4340 kfree(rtl_fw);
4341 err_warn:
4342 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4343 name, rc);
4344 out_no_firmware:
4345 tp->rtl_fw = NULL;
4346 goto out;
4347 }
4348
4349 static void rtl_request_firmware(struct rtl8169_private *tp)
4350 {
4351 if (IS_ERR(tp->rtl_fw))
4352 rtl_request_uncached_firmware(tp);
4353 }
4354
4355 static void rtl_rx_close(struct rtl8169_private *tp)
4356 {
4357 void __iomem *ioaddr = tp->mmio_addr;
4358
4359 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4360 }
4361
4362 DECLARE_RTL_COND(rtl_npq_cond)
4363 {
4364 void __iomem *ioaddr = tp->mmio_addr;
4365
4366 return RTL_R8(TxPoll) & NPQ;
4367 }
4368
4369 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4370 {
4371 void __iomem *ioaddr = tp->mmio_addr;
4372
4373 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4374 }
4375
4376 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4377 {
4378 void __iomem *ioaddr = tp->mmio_addr;
4379
4380 /* Disable interrupts */
4381 rtl8169_irq_mask_and_ack(tp);
4382
4383 rtl_rx_close(tp);
4384
4385 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4386 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4387 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4388 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4389 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4390 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4391 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4392 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4393 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4394 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4395 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4396 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4397 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4398 } else {
4399 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4400 udelay(100);
4401 }
4402
4403 rtl_hw_reset(tp);
4404 }
4405
4406 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4407 {
4408 void __iomem *ioaddr = tp->mmio_addr;
4409
4410 /* Set DMA burst size and Interframe Gap Time */
4411 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4412 (InterFrameGap << TxInterFrameGapShift));
4413 }
4414
4415 static void rtl_hw_start(struct net_device *dev)
4416 {
4417 struct rtl8169_private *tp = netdev_priv(dev);
4418
4419 tp->hw_start(dev);
4420
4421 rtl_irq_enable_all(tp);
4422 }
4423
4424 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4425 void __iomem *ioaddr)
4426 {
4427 /*
4428 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4429 * register to be written before TxDescAddrLow to work.
4430 * Switching from MMIO to I/O access fixes the issue as well.
4431 */
4432 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4433 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4434 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4435 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4436 }
4437
4438 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4439 {
4440 u16 cmd;
4441
4442 cmd = RTL_R16(CPlusCmd);
4443 RTL_W16(CPlusCmd, cmd);
4444 return cmd;
4445 }
4446
4447 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4448 {
4449 /* Low hurts. Let's disable the filtering. */
4450 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4451 }
4452
4453 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4454 {
4455 static const struct rtl_cfg2_info {
4456 u32 mac_version;
4457 u32 clk;
4458 u32 val;
4459 } cfg2_info [] = {
4460 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4461 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4462 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4463 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4464 };
4465 const struct rtl_cfg2_info *p = cfg2_info;
4466 unsigned int i;
4467 u32 clk;
4468
4469 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4470 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4471 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4472 RTL_W32(0x7c, p->val);
4473 break;
4474 }
4475 }
4476 }
4477
4478 static void rtl_set_rx_mode(struct net_device *dev)
4479 {
4480 struct rtl8169_private *tp = netdev_priv(dev);
4481 void __iomem *ioaddr = tp->mmio_addr;
4482 u32 mc_filter[2]; /* Multicast hash filter */
4483 int rx_mode;
4484 u32 tmp = 0;
4485
4486 if (dev->flags & IFF_PROMISC) {
4487 /* Unconditionally log net taps. */
4488 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4489 rx_mode =
4490 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4491 AcceptAllPhys;
4492 mc_filter[1] = mc_filter[0] = 0xffffffff;
4493 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4494 (dev->flags & IFF_ALLMULTI)) {
4495 /* Too many to filter perfectly -- accept all multicasts. */
4496 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4497 mc_filter[1] = mc_filter[0] = 0xffffffff;
4498 } else {
4499 struct netdev_hw_addr *ha;
4500
4501 rx_mode = AcceptBroadcast | AcceptMyPhys;
4502 mc_filter[1] = mc_filter[0] = 0;
4503 netdev_for_each_mc_addr(ha, dev) {
4504 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4505 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4506 rx_mode |= AcceptMulticast;
4507 }
4508 }
4509
4510 if (dev->features & NETIF_F_RXALL)
4511 rx_mode |= (AcceptErr | AcceptRunt);
4512
4513 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4514
4515 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4516 u32 data = mc_filter[0];
4517
4518 mc_filter[0] = swab32(mc_filter[1]);
4519 mc_filter[1] = swab32(data);
4520 }
4521
4522 RTL_W32(MAR0 + 4, mc_filter[1]);
4523 RTL_W32(MAR0 + 0, mc_filter[0]);
4524
4525 RTL_W32(RxConfig, tmp);
4526 }
4527
4528 static void rtl_hw_start_8169(struct net_device *dev)
4529 {
4530 struct rtl8169_private *tp = netdev_priv(dev);
4531 void __iomem *ioaddr = tp->mmio_addr;
4532 struct pci_dev *pdev = tp->pci_dev;
4533
4534 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4535 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4536 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4537 }
4538
4539 RTL_W8(Cfg9346, Cfg9346_Unlock);
4540 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4541 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4542 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4543 tp->mac_version == RTL_GIGA_MAC_VER_04)
4544 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4545
4546 rtl_init_rxcfg(tp);
4547
4548 RTL_W8(EarlyTxThres, NoEarlyTx);
4549
4550 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4551
4552 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4553 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4554 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4555 tp->mac_version == RTL_GIGA_MAC_VER_04)
4556 rtl_set_rx_tx_config_registers(tp);
4557
4558 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4559
4560 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4561 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4562 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4563 "Bit-3 and bit-14 MUST be 1\n");
4564 tp->cp_cmd |= (1 << 14);
4565 }
4566
4567 RTL_W16(CPlusCmd, tp->cp_cmd);
4568
4569 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4570
4571 /*
4572 * Undocumented corner. Supposedly:
4573 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4574 */
4575 RTL_W16(IntrMitigate, 0x0000);
4576
4577 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4578
4579 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4580 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4581 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4582 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4583 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4584 rtl_set_rx_tx_config_registers(tp);
4585 }
4586
4587 RTL_W8(Cfg9346, Cfg9346_Lock);
4588
4589 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4590 RTL_R8(IntrMask);
4591
4592 RTL_W32(RxMissed, 0);
4593
4594 rtl_set_rx_mode(dev);
4595
4596 /* no early-rx interrupts */
4597 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4598 }
4599
4600 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4601 {
4602 if (tp->csi_ops.write)
4603 tp->csi_ops.write(tp, addr, value);
4604 }
4605
4606 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4607 {
4608 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4609 }
4610
4611 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4612 {
4613 u32 csi;
4614
4615 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4616 rtl_csi_write(tp, 0x070c, csi | bits);
4617 }
4618
4619 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4620 {
4621 rtl_csi_access_enable(tp, 0x17000000);
4622 }
4623
4624 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4625 {
4626 rtl_csi_access_enable(tp, 0x27000000);
4627 }
4628
4629 DECLARE_RTL_COND(rtl_csiar_cond)
4630 {
4631 void __iomem *ioaddr = tp->mmio_addr;
4632
4633 return RTL_R32(CSIAR) & CSIAR_FLAG;
4634 }
4635
4636 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4637 {
4638 void __iomem *ioaddr = tp->mmio_addr;
4639
4640 RTL_W32(CSIDR, value);
4641 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4642 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4643
4644 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4645 }
4646
4647 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4648 {
4649 void __iomem *ioaddr = tp->mmio_addr;
4650
4651 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4652 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4653
4654 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4655 RTL_R32(CSIDR) : ~0;
4656 }
4657
4658 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4659 {
4660 void __iomem *ioaddr = tp->mmio_addr;
4661
4662 RTL_W32(CSIDR, value);
4663 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4664 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4665 CSIAR_FUNC_NIC);
4666
4667 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4668 }
4669
4670 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4671 {
4672 void __iomem *ioaddr = tp->mmio_addr;
4673
4674 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4675 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4676
4677 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4678 RTL_R32(CSIDR) : ~0;
4679 }
4680
4681 static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4682 {
4683 struct csi_ops *ops = &tp->csi_ops;
4684
4685 switch (tp->mac_version) {
4686 case RTL_GIGA_MAC_VER_01:
4687 case RTL_GIGA_MAC_VER_02:
4688 case RTL_GIGA_MAC_VER_03:
4689 case RTL_GIGA_MAC_VER_04:
4690 case RTL_GIGA_MAC_VER_05:
4691 case RTL_GIGA_MAC_VER_06:
4692 case RTL_GIGA_MAC_VER_10:
4693 case RTL_GIGA_MAC_VER_11:
4694 case RTL_GIGA_MAC_VER_12:
4695 case RTL_GIGA_MAC_VER_13:
4696 case RTL_GIGA_MAC_VER_14:
4697 case RTL_GIGA_MAC_VER_15:
4698 case RTL_GIGA_MAC_VER_16:
4699 case RTL_GIGA_MAC_VER_17:
4700 ops->write = NULL;
4701 ops->read = NULL;
4702 break;
4703
4704 case RTL_GIGA_MAC_VER_37:
4705 case RTL_GIGA_MAC_VER_38:
4706 ops->write = r8402_csi_write;
4707 ops->read = r8402_csi_read;
4708 break;
4709
4710 default:
4711 ops->write = r8169_csi_write;
4712 ops->read = r8169_csi_read;
4713 break;
4714 }
4715 }
4716
4717 struct ephy_info {
4718 unsigned int offset;
4719 u16 mask;
4720 u16 bits;
4721 };
4722
4723 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4724 int len)
4725 {
4726 u16 w;
4727
4728 while (len-- > 0) {
4729 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4730 rtl_ephy_write(tp, e->offset, w);
4731 e++;
4732 }
4733 }
4734
4735 static void rtl_disable_clock_request(struct pci_dev *pdev)
4736 {
4737 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4738 PCI_EXP_LNKCTL_CLKREQ_EN);
4739 }
4740
4741 static void rtl_enable_clock_request(struct pci_dev *pdev)
4742 {
4743 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4744 PCI_EXP_LNKCTL_CLKREQ_EN);
4745 }
4746
4747 #define R8168_CPCMD_QUIRK_MASK (\
4748 EnableBist | \
4749 Mac_dbgo_oe | \
4750 Force_half_dup | \
4751 Force_rxflow_en | \
4752 Force_txflow_en | \
4753 Cxpl_dbg_sel | \
4754 ASF | \
4755 PktCntrDisable | \
4756 Mac_dbgo_sel)
4757
4758 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4759 {
4760 void __iomem *ioaddr = tp->mmio_addr;
4761 struct pci_dev *pdev = tp->pci_dev;
4762
4763 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4764
4765 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4766
4767 rtl_tx_performance_tweak(pdev,
4768 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4769 }
4770
4771 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4772 {
4773 void __iomem *ioaddr = tp->mmio_addr;
4774
4775 rtl_hw_start_8168bb(tp);
4776
4777 RTL_W8(MaxTxPacketSize, TxPacketMax);
4778
4779 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4780 }
4781
4782 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4783 {
4784 void __iomem *ioaddr = tp->mmio_addr;
4785 struct pci_dev *pdev = tp->pci_dev;
4786
4787 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4788
4789 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4790
4791 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4792
4793 rtl_disable_clock_request(pdev);
4794
4795 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4796 }
4797
4798 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4799 {
4800 static const struct ephy_info e_info_8168cp[] = {
4801 { 0x01, 0, 0x0001 },
4802 { 0x02, 0x0800, 0x1000 },
4803 { 0x03, 0, 0x0042 },
4804 { 0x06, 0x0080, 0x0000 },
4805 { 0x07, 0, 0x2000 }
4806 };
4807
4808 rtl_csi_access_enable_2(tp);
4809
4810 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4811
4812 __rtl_hw_start_8168cp(tp);
4813 }
4814
4815 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4816 {
4817 void __iomem *ioaddr = tp->mmio_addr;
4818 struct pci_dev *pdev = tp->pci_dev;
4819
4820 rtl_csi_access_enable_2(tp);
4821
4822 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4823
4824 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4825
4826 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4827 }
4828
4829 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4830 {
4831 void __iomem *ioaddr = tp->mmio_addr;
4832 struct pci_dev *pdev = tp->pci_dev;
4833
4834 rtl_csi_access_enable_2(tp);
4835
4836 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4837
4838 /* Magic. */
4839 RTL_W8(DBG_REG, 0x20);
4840
4841 RTL_W8(MaxTxPacketSize, TxPacketMax);
4842
4843 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4844
4845 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4846 }
4847
4848 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4849 {
4850 void __iomem *ioaddr = tp->mmio_addr;
4851 static const struct ephy_info e_info_8168c_1[] = {
4852 { 0x02, 0x0800, 0x1000 },
4853 { 0x03, 0, 0x0002 },
4854 { 0x06, 0x0080, 0x0000 }
4855 };
4856
4857 rtl_csi_access_enable_2(tp);
4858
4859 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4860
4861 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4862
4863 __rtl_hw_start_8168cp(tp);
4864 }
4865
4866 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4867 {
4868 static const struct ephy_info e_info_8168c_2[] = {
4869 { 0x01, 0, 0x0001 },
4870 { 0x03, 0x0400, 0x0220 }
4871 };
4872
4873 rtl_csi_access_enable_2(tp);
4874
4875 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4876
4877 __rtl_hw_start_8168cp(tp);
4878 }
4879
4880 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4881 {
4882 rtl_hw_start_8168c_2(tp);
4883 }
4884
4885 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4886 {
4887 rtl_csi_access_enable_2(tp);
4888
4889 __rtl_hw_start_8168cp(tp);
4890 }
4891
4892 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4893 {
4894 void __iomem *ioaddr = tp->mmio_addr;
4895 struct pci_dev *pdev = tp->pci_dev;
4896
4897 rtl_csi_access_enable_2(tp);
4898
4899 rtl_disable_clock_request(pdev);
4900
4901 RTL_W8(MaxTxPacketSize, TxPacketMax);
4902
4903 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4904
4905 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4906 }
4907
4908 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4909 {
4910 void __iomem *ioaddr = tp->mmio_addr;
4911 struct pci_dev *pdev = tp->pci_dev;
4912
4913 rtl_csi_access_enable_1(tp);
4914
4915 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4916
4917 RTL_W8(MaxTxPacketSize, TxPacketMax);
4918
4919 rtl_disable_clock_request(pdev);
4920 }
4921
4922 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4923 {
4924 void __iomem *ioaddr = tp->mmio_addr;
4925 struct pci_dev *pdev = tp->pci_dev;
4926 static const struct ephy_info e_info_8168d_4[] = {
4927 { 0x0b, ~0, 0x48 },
4928 { 0x19, 0x20, 0x50 },
4929 { 0x0c, ~0, 0x20 }
4930 };
4931 int i;
4932
4933 rtl_csi_access_enable_1(tp);
4934
4935 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4936
4937 RTL_W8(MaxTxPacketSize, TxPacketMax);
4938
4939 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4940 const struct ephy_info *e = e_info_8168d_4 + i;
4941 u16 w;
4942
4943 w = rtl_ephy_read(tp, e->offset);
4944 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4945 }
4946
4947 rtl_enable_clock_request(pdev);
4948 }
4949
4950 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4951 {
4952 void __iomem *ioaddr = tp->mmio_addr;
4953 struct pci_dev *pdev = tp->pci_dev;
4954 static const struct ephy_info e_info_8168e_1[] = {
4955 { 0x00, 0x0200, 0x0100 },
4956 { 0x00, 0x0000, 0x0004 },
4957 { 0x06, 0x0002, 0x0001 },
4958 { 0x06, 0x0000, 0x0030 },
4959 { 0x07, 0x0000, 0x2000 },
4960 { 0x00, 0x0000, 0x0020 },
4961 { 0x03, 0x5800, 0x2000 },
4962 { 0x03, 0x0000, 0x0001 },
4963 { 0x01, 0x0800, 0x1000 },
4964 { 0x07, 0x0000, 0x4000 },
4965 { 0x1e, 0x0000, 0x2000 },
4966 { 0x19, 0xffff, 0xfe6c },
4967 { 0x0a, 0x0000, 0x0040 }
4968 };
4969
4970 rtl_csi_access_enable_2(tp);
4971
4972 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4973
4974 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4975
4976 RTL_W8(MaxTxPacketSize, TxPacketMax);
4977
4978 rtl_disable_clock_request(pdev);
4979
4980 /* Reset tx FIFO pointer */
4981 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4982 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4983
4984 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4985 }
4986
4987 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4988 {
4989 void __iomem *ioaddr = tp->mmio_addr;
4990 struct pci_dev *pdev = tp->pci_dev;
4991 static const struct ephy_info e_info_8168e_2[] = {
4992 { 0x09, 0x0000, 0x0080 },
4993 { 0x19, 0x0000, 0x0224 }
4994 };
4995
4996 rtl_csi_access_enable_1(tp);
4997
4998 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4999
5000 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5001
5002 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5003 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5004 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5005 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5006 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5007 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5008 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5009 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5010
5011 RTL_W8(MaxTxPacketSize, EarlySize);
5012
5013 rtl_disable_clock_request(pdev);
5014
5015 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5016 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5017
5018 /* Adjust EEE LED frequency */
5019 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5020
5021 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5022 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5023 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5024 }
5025
5026 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5027 {
5028 void __iomem *ioaddr = tp->mmio_addr;
5029 struct pci_dev *pdev = tp->pci_dev;
5030
5031 rtl_csi_access_enable_2(tp);
5032
5033 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5034
5035 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5036 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5037 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5038 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5039 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5040 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5041 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5042 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5043 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5044 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5045
5046 RTL_W8(MaxTxPacketSize, EarlySize);
5047
5048 rtl_disable_clock_request(pdev);
5049
5050 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5051 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5052 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5053 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5054 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5055 }
5056
5057 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5058 {
5059 void __iomem *ioaddr = tp->mmio_addr;
5060 static const struct ephy_info e_info_8168f_1[] = {
5061 { 0x06, 0x00c0, 0x0020 },
5062 { 0x08, 0x0001, 0x0002 },
5063 { 0x09, 0x0000, 0x0080 },
5064 { 0x19, 0x0000, 0x0224 }
5065 };
5066
5067 rtl_hw_start_8168f(tp);
5068
5069 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5070
5071 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5072
5073 /* Adjust EEE LED frequency */
5074 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5075 }
5076
5077 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5078 {
5079 static const struct ephy_info e_info_8168f_1[] = {
5080 { 0x06, 0x00c0, 0x0020 },
5081 { 0x0f, 0xffff, 0x5200 },
5082 { 0x1e, 0x0000, 0x4000 },
5083 { 0x19, 0x0000, 0x0224 }
5084 };
5085
5086 rtl_hw_start_8168f(tp);
5087
5088 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5089
5090 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5091 }
5092
5093 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5094 {
5095 void __iomem *ioaddr = tp->mmio_addr;
5096 struct pci_dev *pdev = tp->pci_dev;
5097
5098 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5099 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5100 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5101 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5102
5103 rtl_csi_access_enable_1(tp);
5104
5105 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5106
5107 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5108 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5109
5110 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5111 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5112 RTL_W8(MaxTxPacketSize, EarlySize);
5113
5114 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5115 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5116
5117 /* Adjust EEE LED frequency */
5118 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5119
5120 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5121 }
5122
5123 static void rtl_hw_start_8168(struct net_device *dev)
5124 {
5125 struct rtl8169_private *tp = netdev_priv(dev);
5126 void __iomem *ioaddr = tp->mmio_addr;
5127
5128 RTL_W8(Cfg9346, Cfg9346_Unlock);
5129
5130 RTL_W8(MaxTxPacketSize, TxPacketMax);
5131
5132 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5133
5134 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5135
5136 RTL_W16(CPlusCmd, tp->cp_cmd);
5137
5138 RTL_W16(IntrMitigate, 0x5151);
5139
5140 /* Work around for RxFIFO overflow. */
5141 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5142 tp->event_slow |= RxFIFOOver | PCSTimeout;
5143 tp->event_slow &= ~RxOverflow;
5144 }
5145
5146 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5147
5148 rtl_set_rx_mode(dev);
5149
5150 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5151 (InterFrameGap << TxInterFrameGapShift));
5152
5153 RTL_R8(IntrMask);
5154
5155 switch (tp->mac_version) {
5156 case RTL_GIGA_MAC_VER_11:
5157 rtl_hw_start_8168bb(tp);
5158 break;
5159
5160 case RTL_GIGA_MAC_VER_12:
5161 case RTL_GIGA_MAC_VER_17:
5162 rtl_hw_start_8168bef(tp);
5163 break;
5164
5165 case RTL_GIGA_MAC_VER_18:
5166 rtl_hw_start_8168cp_1(tp);
5167 break;
5168
5169 case RTL_GIGA_MAC_VER_19:
5170 rtl_hw_start_8168c_1(tp);
5171 break;
5172
5173 case RTL_GIGA_MAC_VER_20:
5174 rtl_hw_start_8168c_2(tp);
5175 break;
5176
5177 case RTL_GIGA_MAC_VER_21:
5178 rtl_hw_start_8168c_3(tp);
5179 break;
5180
5181 case RTL_GIGA_MAC_VER_22:
5182 rtl_hw_start_8168c_4(tp);
5183 break;
5184
5185 case RTL_GIGA_MAC_VER_23:
5186 rtl_hw_start_8168cp_2(tp);
5187 break;
5188
5189 case RTL_GIGA_MAC_VER_24:
5190 rtl_hw_start_8168cp_3(tp);
5191 break;
5192
5193 case RTL_GIGA_MAC_VER_25:
5194 case RTL_GIGA_MAC_VER_26:
5195 case RTL_GIGA_MAC_VER_27:
5196 rtl_hw_start_8168d(tp);
5197 break;
5198
5199 case RTL_GIGA_MAC_VER_28:
5200 rtl_hw_start_8168d_4(tp);
5201 break;
5202
5203 case RTL_GIGA_MAC_VER_31:
5204 rtl_hw_start_8168dp(tp);
5205 break;
5206
5207 case RTL_GIGA_MAC_VER_32:
5208 case RTL_GIGA_MAC_VER_33:
5209 rtl_hw_start_8168e_1(tp);
5210 break;
5211 case RTL_GIGA_MAC_VER_34:
5212 rtl_hw_start_8168e_2(tp);
5213 break;
5214
5215 case RTL_GIGA_MAC_VER_35:
5216 case RTL_GIGA_MAC_VER_36:
5217 rtl_hw_start_8168f_1(tp);
5218 break;
5219
5220 case RTL_GIGA_MAC_VER_38:
5221 rtl_hw_start_8411(tp);
5222 break;
5223
5224 case RTL_GIGA_MAC_VER_40:
5225 case RTL_GIGA_MAC_VER_41:
5226 rtl_hw_start_8168g_1(tp);
5227 break;
5228
5229 default:
5230 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5231 dev->name, tp->mac_version);
5232 break;
5233 }
5234
5235 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5236
5237 RTL_W8(Cfg9346, Cfg9346_Lock);
5238
5239 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5240 }
5241
5242 #define R810X_CPCMD_QUIRK_MASK (\
5243 EnableBist | \
5244 Mac_dbgo_oe | \
5245 Force_half_dup | \
5246 Force_rxflow_en | \
5247 Force_txflow_en | \
5248 Cxpl_dbg_sel | \
5249 ASF | \
5250 PktCntrDisable | \
5251 Mac_dbgo_sel)
5252
5253 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5254 {
5255 void __iomem *ioaddr = tp->mmio_addr;
5256 struct pci_dev *pdev = tp->pci_dev;
5257 static const struct ephy_info e_info_8102e_1[] = {
5258 { 0x01, 0, 0x6e65 },
5259 { 0x02, 0, 0x091f },
5260 { 0x03, 0, 0xc2f9 },
5261 { 0x06, 0, 0xafb5 },
5262 { 0x07, 0, 0x0e00 },
5263 { 0x19, 0, 0xec80 },
5264 { 0x01, 0, 0x2e65 },
5265 { 0x01, 0, 0x6e65 }
5266 };
5267 u8 cfg1;
5268
5269 rtl_csi_access_enable_2(tp);
5270
5271 RTL_W8(DBG_REG, FIX_NAK_1);
5272
5273 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5274
5275 RTL_W8(Config1,
5276 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5277 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5278
5279 cfg1 = RTL_R8(Config1);
5280 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5281 RTL_W8(Config1, cfg1 & ~LEDS0);
5282
5283 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5284 }
5285
5286 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5287 {
5288 void __iomem *ioaddr = tp->mmio_addr;
5289 struct pci_dev *pdev = tp->pci_dev;
5290
5291 rtl_csi_access_enable_2(tp);
5292
5293 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5294
5295 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5296 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5297 }
5298
5299 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5300 {
5301 rtl_hw_start_8102e_2(tp);
5302
5303 rtl_ephy_write(tp, 0x03, 0xc2f9);
5304 }
5305
5306 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5307 {
5308 void __iomem *ioaddr = tp->mmio_addr;
5309 static const struct ephy_info e_info_8105e_1[] = {
5310 { 0x07, 0, 0x4000 },
5311 { 0x19, 0, 0x0200 },
5312 { 0x19, 0, 0x0020 },
5313 { 0x1e, 0, 0x2000 },
5314 { 0x03, 0, 0x0001 },
5315 { 0x19, 0, 0x0100 },
5316 { 0x19, 0, 0x0004 },
5317 { 0x0a, 0, 0x0020 }
5318 };
5319
5320 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5321 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5322
5323 /* Disable Early Tally Counter */
5324 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5325
5326 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5327 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5328
5329 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5330 }
5331
5332 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5333 {
5334 rtl_hw_start_8105e_1(tp);
5335 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5336 }
5337
5338 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5339 {
5340 void __iomem *ioaddr = tp->mmio_addr;
5341 static const struct ephy_info e_info_8402[] = {
5342 { 0x19, 0xffff, 0xff64 },
5343 { 0x1e, 0, 0x4000 }
5344 };
5345
5346 rtl_csi_access_enable_2(tp);
5347
5348 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5349 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5350
5351 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5352 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5353
5354 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5355
5356 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5357
5358 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5359 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5360 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5361 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5362 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5363 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5364 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5365 }
5366
5367 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5368 {
5369 void __iomem *ioaddr = tp->mmio_addr;
5370
5371 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5372 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5373
5374 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5375 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5376 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5377 }
5378
5379 static void rtl_hw_start_8101(struct net_device *dev)
5380 {
5381 struct rtl8169_private *tp = netdev_priv(dev);
5382 void __iomem *ioaddr = tp->mmio_addr;
5383 struct pci_dev *pdev = tp->pci_dev;
5384
5385 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5386 tp->event_slow &= ~RxFIFOOver;
5387
5388 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5389 tp->mac_version == RTL_GIGA_MAC_VER_16)
5390 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5391 PCI_EXP_DEVCTL_NOSNOOP_EN);
5392
5393 RTL_W8(Cfg9346, Cfg9346_Unlock);
5394
5395 switch (tp->mac_version) {
5396 case RTL_GIGA_MAC_VER_07:
5397 rtl_hw_start_8102e_1(tp);
5398 break;
5399
5400 case RTL_GIGA_MAC_VER_08:
5401 rtl_hw_start_8102e_3(tp);
5402 break;
5403
5404 case RTL_GIGA_MAC_VER_09:
5405 rtl_hw_start_8102e_2(tp);
5406 break;
5407
5408 case RTL_GIGA_MAC_VER_29:
5409 rtl_hw_start_8105e_1(tp);
5410 break;
5411 case RTL_GIGA_MAC_VER_30:
5412 rtl_hw_start_8105e_2(tp);
5413 break;
5414
5415 case RTL_GIGA_MAC_VER_37:
5416 rtl_hw_start_8402(tp);
5417 break;
5418
5419 case RTL_GIGA_MAC_VER_39:
5420 rtl_hw_start_8106(tp);
5421 break;
5422 }
5423
5424 RTL_W8(Cfg9346, Cfg9346_Lock);
5425
5426 RTL_W8(MaxTxPacketSize, TxPacketMax);
5427
5428 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5429
5430 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5431 RTL_W16(CPlusCmd, tp->cp_cmd);
5432
5433 RTL_W16(IntrMitigate, 0x0000);
5434
5435 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5436
5437 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5438 rtl_set_rx_tx_config_registers(tp);
5439
5440 RTL_R8(IntrMask);
5441
5442 rtl_set_rx_mode(dev);
5443
5444 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5445 }
5446
5447 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5448 {
5449 struct rtl8169_private *tp = netdev_priv(dev);
5450
5451 if (new_mtu < ETH_ZLEN ||
5452 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5453 return -EINVAL;
5454
5455 if (new_mtu > ETH_DATA_LEN)
5456 rtl_hw_jumbo_enable(tp);
5457 else
5458 rtl_hw_jumbo_disable(tp);
5459
5460 dev->mtu = new_mtu;
5461 netdev_update_features(dev);
5462
5463 return 0;
5464 }
5465
5466 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5467 {
5468 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5469 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5470 }
5471
5472 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5473 void **data_buff, struct RxDesc *desc)
5474 {
5475 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5476 DMA_FROM_DEVICE);
5477
5478 kfree(*data_buff);
5479 *data_buff = NULL;
5480 rtl8169_make_unusable_by_asic(desc);
5481 }
5482
5483 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5484 {
5485 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5486
5487 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5488 }
5489
5490 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5491 u32 rx_buf_sz)
5492 {
5493 desc->addr = cpu_to_le64(mapping);
5494 wmb();
5495 rtl8169_mark_to_asic(desc, rx_buf_sz);
5496 }
5497
5498 static inline void *rtl8169_align(void *data)
5499 {
5500 return (void *)ALIGN((long)data, 16);
5501 }
5502
5503 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5504 struct RxDesc *desc)
5505 {
5506 void *data;
5507 dma_addr_t mapping;
5508 struct device *d = &tp->pci_dev->dev;
5509 struct net_device *dev = tp->dev;
5510 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5511
5512 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5513 if (!data)
5514 return NULL;
5515
5516 if (rtl8169_align(data) != data) {
5517 kfree(data);
5518 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5519 if (!data)
5520 return NULL;
5521 }
5522
5523 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5524 DMA_FROM_DEVICE);
5525 if (unlikely(dma_mapping_error(d, mapping))) {
5526 if (net_ratelimit())
5527 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5528 goto err_out;
5529 }
5530
5531 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5532 return data;
5533
5534 err_out:
5535 kfree(data);
5536 return NULL;
5537 }
5538
5539 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5540 {
5541 unsigned int i;
5542
5543 for (i = 0; i < NUM_RX_DESC; i++) {
5544 if (tp->Rx_databuff[i]) {
5545 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5546 tp->RxDescArray + i);
5547 }
5548 }
5549 }
5550
5551 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5552 {
5553 desc->opts1 |= cpu_to_le32(RingEnd);
5554 }
5555
5556 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5557 {
5558 unsigned int i;
5559
5560 for (i = 0; i < NUM_RX_DESC; i++) {
5561 void *data;
5562
5563 if (tp->Rx_databuff[i])
5564 continue;
5565
5566 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5567 if (!data) {
5568 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5569 goto err_out;
5570 }
5571 tp->Rx_databuff[i] = data;
5572 }
5573
5574 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5575 return 0;
5576
5577 err_out:
5578 rtl8169_rx_clear(tp);
5579 return -ENOMEM;
5580 }
5581
5582 static int rtl8169_init_ring(struct net_device *dev)
5583 {
5584 struct rtl8169_private *tp = netdev_priv(dev);
5585
5586 rtl8169_init_ring_indexes(tp);
5587
5588 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5589 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5590
5591 return rtl8169_rx_fill(tp);
5592 }
5593
5594 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5595 struct TxDesc *desc)
5596 {
5597 unsigned int len = tx_skb->len;
5598
5599 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5600
5601 desc->opts1 = 0x00;
5602 desc->opts2 = 0x00;
5603 desc->addr = 0x00;
5604 tx_skb->len = 0;
5605 }
5606
5607 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5608 unsigned int n)
5609 {
5610 unsigned int i;
5611
5612 for (i = 0; i < n; i++) {
5613 unsigned int entry = (start + i) % NUM_TX_DESC;
5614 struct ring_info *tx_skb = tp->tx_skb + entry;
5615 unsigned int len = tx_skb->len;
5616
5617 if (len) {
5618 struct sk_buff *skb = tx_skb->skb;
5619
5620 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5621 tp->TxDescArray + entry);
5622 if (skb) {
5623 tp->dev->stats.tx_dropped++;
5624 dev_kfree_skb(skb);
5625 tx_skb->skb = NULL;
5626 }
5627 }
5628 }
5629 }
5630
5631 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5632 {
5633 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5634 tp->cur_tx = tp->dirty_tx = 0;
5635 }
5636
5637 static void rtl_reset_work(struct rtl8169_private *tp)
5638 {
5639 struct net_device *dev = tp->dev;
5640 int i;
5641
5642 napi_disable(&tp->napi);
5643 netif_stop_queue(dev);
5644 synchronize_sched();
5645
5646 rtl8169_hw_reset(tp);
5647
5648 for (i = 0; i < NUM_RX_DESC; i++)
5649 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5650
5651 rtl8169_tx_clear(tp);
5652 rtl8169_init_ring_indexes(tp);
5653
5654 napi_enable(&tp->napi);
5655 rtl_hw_start(dev);
5656 netif_wake_queue(dev);
5657 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5658 }
5659
5660 static void rtl8169_tx_timeout(struct net_device *dev)
5661 {
5662 struct rtl8169_private *tp = netdev_priv(dev);
5663
5664 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5665 }
5666
5667 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5668 u32 *opts)
5669 {
5670 struct skb_shared_info *info = skb_shinfo(skb);
5671 unsigned int cur_frag, entry;
5672 struct TxDesc * uninitialized_var(txd);
5673 struct device *d = &tp->pci_dev->dev;
5674
5675 entry = tp->cur_tx;
5676 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5677 const skb_frag_t *frag = info->frags + cur_frag;
5678 dma_addr_t mapping;
5679 u32 status, len;
5680 void *addr;
5681
5682 entry = (entry + 1) % NUM_TX_DESC;
5683
5684 txd = tp->TxDescArray + entry;
5685 len = skb_frag_size(frag);
5686 addr = skb_frag_address(frag);
5687 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5688 if (unlikely(dma_mapping_error(d, mapping))) {
5689 if (net_ratelimit())
5690 netif_err(tp, drv, tp->dev,
5691 "Failed to map TX fragments DMA!\n");
5692 goto err_out;
5693 }
5694
5695 /* Anti gcc 2.95.3 bugware (sic) */
5696 status = opts[0] | len |
5697 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5698
5699 txd->opts1 = cpu_to_le32(status);
5700 txd->opts2 = cpu_to_le32(opts[1]);
5701 txd->addr = cpu_to_le64(mapping);
5702
5703 tp->tx_skb[entry].len = len;
5704 }
5705
5706 if (cur_frag) {
5707 tp->tx_skb[entry].skb = skb;
5708 txd->opts1 |= cpu_to_le32(LastFrag);
5709 }
5710
5711 return cur_frag;
5712
5713 err_out:
5714 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5715 return -EIO;
5716 }
5717
5718 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5719 struct sk_buff *skb, u32 *opts)
5720 {
5721 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5722 u32 mss = skb_shinfo(skb)->gso_size;
5723 int offset = info->opts_offset;
5724
5725 if (mss) {
5726 opts[0] |= TD_LSO;
5727 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5728 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5729 const struct iphdr *ip = ip_hdr(skb);
5730
5731 if (ip->protocol == IPPROTO_TCP)
5732 opts[offset] |= info->checksum.tcp;
5733 else if (ip->protocol == IPPROTO_UDP)
5734 opts[offset] |= info->checksum.udp;
5735 else
5736 WARN_ON_ONCE(1);
5737 }
5738 }
5739
5740 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5741 struct net_device *dev)
5742 {
5743 struct rtl8169_private *tp = netdev_priv(dev);
5744 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5745 struct TxDesc *txd = tp->TxDescArray + entry;
5746 void __iomem *ioaddr = tp->mmio_addr;
5747 struct device *d = &tp->pci_dev->dev;
5748 dma_addr_t mapping;
5749 u32 status, len;
5750 u32 opts[2];
5751 int frags;
5752
5753 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5754 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5755 goto err_stop_0;
5756 }
5757
5758 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5759 goto err_stop_0;
5760
5761 len = skb_headlen(skb);
5762 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5763 if (unlikely(dma_mapping_error(d, mapping))) {
5764 if (net_ratelimit())
5765 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5766 goto err_dma_0;
5767 }
5768
5769 tp->tx_skb[entry].len = len;
5770 txd->addr = cpu_to_le64(mapping);
5771
5772 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5773 opts[0] = DescOwn;
5774
5775 rtl8169_tso_csum(tp, skb, opts);
5776
5777 frags = rtl8169_xmit_frags(tp, skb, opts);
5778 if (frags < 0)
5779 goto err_dma_1;
5780 else if (frags)
5781 opts[0] |= FirstFrag;
5782 else {
5783 opts[0] |= FirstFrag | LastFrag;
5784 tp->tx_skb[entry].skb = skb;
5785 }
5786
5787 txd->opts2 = cpu_to_le32(opts[1]);
5788
5789 skb_tx_timestamp(skb);
5790
5791 wmb();
5792
5793 /* Anti gcc 2.95.3 bugware (sic) */
5794 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5795 txd->opts1 = cpu_to_le32(status);
5796
5797 tp->cur_tx += frags + 1;
5798
5799 wmb();
5800
5801 RTL_W8(TxPoll, NPQ);
5802
5803 mmiowb();
5804
5805 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5806 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5807 * not miss a ring update when it notices a stopped queue.
5808 */
5809 smp_wmb();
5810 netif_stop_queue(dev);
5811 /* Sync with rtl_tx:
5812 * - publish queue status and cur_tx ring index (write barrier)
5813 * - refresh dirty_tx ring index (read barrier).
5814 * May the current thread have a pessimistic view of the ring
5815 * status and forget to wake up queue, a racing rtl_tx thread
5816 * can't.
5817 */
5818 smp_mb();
5819 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5820 netif_wake_queue(dev);
5821 }
5822
5823 return NETDEV_TX_OK;
5824
5825 err_dma_1:
5826 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5827 err_dma_0:
5828 dev_kfree_skb(skb);
5829 dev->stats.tx_dropped++;
5830 return NETDEV_TX_OK;
5831
5832 err_stop_0:
5833 netif_stop_queue(dev);
5834 dev->stats.tx_dropped++;
5835 return NETDEV_TX_BUSY;
5836 }
5837
5838 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5839 {
5840 struct rtl8169_private *tp = netdev_priv(dev);
5841 struct pci_dev *pdev = tp->pci_dev;
5842 u16 pci_status, pci_cmd;
5843
5844 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5845 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5846
5847 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5848 pci_cmd, pci_status);
5849
5850 /*
5851 * The recovery sequence below admits a very elaborated explanation:
5852 * - it seems to work;
5853 * - I did not see what else could be done;
5854 * - it makes iop3xx happy.
5855 *
5856 * Feel free to adjust to your needs.
5857 */
5858 if (pdev->broken_parity_status)
5859 pci_cmd &= ~PCI_COMMAND_PARITY;
5860 else
5861 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5862
5863 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5864
5865 pci_write_config_word(pdev, PCI_STATUS,
5866 pci_status & (PCI_STATUS_DETECTED_PARITY |
5867 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5868 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5869
5870 /* The infamous DAC f*ckup only happens at boot time */
5871 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5872 void __iomem *ioaddr = tp->mmio_addr;
5873
5874 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5875 tp->cp_cmd &= ~PCIDAC;
5876 RTL_W16(CPlusCmd, tp->cp_cmd);
5877 dev->features &= ~NETIF_F_HIGHDMA;
5878 }
5879
5880 rtl8169_hw_reset(tp);
5881
5882 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5883 }
5884
5885 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5886 {
5887 unsigned int dirty_tx, tx_left;
5888
5889 dirty_tx = tp->dirty_tx;
5890 smp_rmb();
5891 tx_left = tp->cur_tx - dirty_tx;
5892
5893 while (tx_left > 0) {
5894 unsigned int entry = dirty_tx % NUM_TX_DESC;
5895 struct ring_info *tx_skb = tp->tx_skb + entry;
5896 u32 status;
5897
5898 rmb();
5899 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5900 if (status & DescOwn)
5901 break;
5902
5903 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5904 tp->TxDescArray + entry);
5905 if (status & LastFrag) {
5906 u64_stats_update_begin(&tp->tx_stats.syncp);
5907 tp->tx_stats.packets++;
5908 tp->tx_stats.bytes += tx_skb->skb->len;
5909 u64_stats_update_end(&tp->tx_stats.syncp);
5910 dev_kfree_skb(tx_skb->skb);
5911 tx_skb->skb = NULL;
5912 }
5913 dirty_tx++;
5914 tx_left--;
5915 }
5916
5917 if (tp->dirty_tx != dirty_tx) {
5918 tp->dirty_tx = dirty_tx;
5919 /* Sync with rtl8169_start_xmit:
5920 * - publish dirty_tx ring index (write barrier)
5921 * - refresh cur_tx ring index and queue status (read barrier)
5922 * May the current thread miss the stopped queue condition,
5923 * a racing xmit thread can only have a right view of the
5924 * ring status.
5925 */
5926 smp_mb();
5927 if (netif_queue_stopped(dev) &&
5928 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5929 netif_wake_queue(dev);
5930 }
5931 /*
5932 * 8168 hack: TxPoll requests are lost when the Tx packets are
5933 * too close. Let's kick an extra TxPoll request when a burst
5934 * of start_xmit activity is detected (if it is not detected,
5935 * it is slow enough). -- FR
5936 */
5937 if (tp->cur_tx != dirty_tx) {
5938 void __iomem *ioaddr = tp->mmio_addr;
5939
5940 RTL_W8(TxPoll, NPQ);
5941 }
5942 }
5943 }
5944
5945 static inline int rtl8169_fragmented_frame(u32 status)
5946 {
5947 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5948 }
5949
5950 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5951 {
5952 u32 status = opts1 & RxProtoMask;
5953
5954 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5955 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5956 skb->ip_summed = CHECKSUM_UNNECESSARY;
5957 else
5958 skb_checksum_none_assert(skb);
5959 }
5960
5961 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5962 struct rtl8169_private *tp,
5963 int pkt_size,
5964 dma_addr_t addr)
5965 {
5966 struct sk_buff *skb;
5967 struct device *d = &tp->pci_dev->dev;
5968
5969 data = rtl8169_align(data);
5970 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5971 prefetch(data);
5972 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5973 if (skb)
5974 memcpy(skb->data, data, pkt_size);
5975 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5976
5977 return skb;
5978 }
5979
5980 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5981 {
5982 unsigned int cur_rx, rx_left;
5983 unsigned int count;
5984
5985 cur_rx = tp->cur_rx;
5986 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5987 rx_left = min(rx_left, budget);
5988
5989 for (; rx_left > 0; rx_left--, cur_rx++) {
5990 unsigned int entry = cur_rx % NUM_RX_DESC;
5991 struct RxDesc *desc = tp->RxDescArray + entry;
5992 u32 status;
5993
5994 rmb();
5995 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5996
5997 if (status & DescOwn)
5998 break;
5999 if (unlikely(status & RxRES)) {
6000 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6001 status);
6002 dev->stats.rx_errors++;
6003 if (status & (RxRWT | RxRUNT))
6004 dev->stats.rx_length_errors++;
6005 if (status & RxCRC)
6006 dev->stats.rx_crc_errors++;
6007 if (status & RxFOVF) {
6008 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6009 dev->stats.rx_fifo_errors++;
6010 }
6011 if ((status & (RxRUNT | RxCRC)) &&
6012 !(status & (RxRWT | RxFOVF)) &&
6013 (dev->features & NETIF_F_RXALL))
6014 goto process_pkt;
6015
6016 rtl8169_mark_to_asic(desc, rx_buf_sz);
6017 } else {
6018 struct sk_buff *skb;
6019 dma_addr_t addr;
6020 int pkt_size;
6021
6022 process_pkt:
6023 addr = le64_to_cpu(desc->addr);
6024 if (likely(!(dev->features & NETIF_F_RXFCS)))
6025 pkt_size = (status & 0x00003fff) - 4;
6026 else
6027 pkt_size = status & 0x00003fff;
6028
6029 /*
6030 * The driver does not support incoming fragmented
6031 * frames. They are seen as a symptom of over-mtu
6032 * sized frames.
6033 */
6034 if (unlikely(rtl8169_fragmented_frame(status))) {
6035 dev->stats.rx_dropped++;
6036 dev->stats.rx_length_errors++;
6037 rtl8169_mark_to_asic(desc, rx_buf_sz);
6038 continue;
6039 }
6040
6041 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6042 tp, pkt_size, addr);
6043 rtl8169_mark_to_asic(desc, rx_buf_sz);
6044 if (!skb) {
6045 dev->stats.rx_dropped++;
6046 continue;
6047 }
6048
6049 rtl8169_rx_csum(skb, status);
6050 skb_put(skb, pkt_size);
6051 skb->protocol = eth_type_trans(skb, dev);
6052
6053 rtl8169_rx_vlan_tag(desc, skb);
6054
6055 napi_gro_receive(&tp->napi, skb);
6056
6057 u64_stats_update_begin(&tp->rx_stats.syncp);
6058 tp->rx_stats.packets++;
6059 tp->rx_stats.bytes += pkt_size;
6060 u64_stats_update_end(&tp->rx_stats.syncp);
6061 }
6062
6063 /* Work around for AMD plateform. */
6064 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
6065 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
6066 desc->opts2 = 0;
6067 cur_rx++;
6068 }
6069 }
6070
6071 count = cur_rx - tp->cur_rx;
6072 tp->cur_rx = cur_rx;
6073
6074 tp->dirty_rx += count;
6075
6076 return count;
6077 }
6078
6079 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6080 {
6081 struct net_device *dev = dev_instance;
6082 struct rtl8169_private *tp = netdev_priv(dev);
6083 int handled = 0;
6084 u16 status;
6085
6086 status = rtl_get_events(tp);
6087 if (status && status != 0xffff) {
6088 status &= RTL_EVENT_NAPI | tp->event_slow;
6089 if (status) {
6090 handled = 1;
6091
6092 rtl_irq_disable(tp);
6093 napi_schedule(&tp->napi);
6094 }
6095 }
6096 return IRQ_RETVAL(handled);
6097 }
6098
6099 /*
6100 * Workqueue context.
6101 */
6102 static void rtl_slow_event_work(struct rtl8169_private *tp)
6103 {
6104 struct net_device *dev = tp->dev;
6105 u16 status;
6106
6107 status = rtl_get_events(tp) & tp->event_slow;
6108 rtl_ack_events(tp, status);
6109
6110 if (unlikely(status & RxFIFOOver)) {
6111 switch (tp->mac_version) {
6112 /* Work around for rx fifo overflow */
6113 case RTL_GIGA_MAC_VER_11:
6114 netif_stop_queue(dev);
6115 /* XXX - Hack alert. See rtl_task(). */
6116 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6117 default:
6118 break;
6119 }
6120 }
6121
6122 if (unlikely(status & SYSErr))
6123 rtl8169_pcierr_interrupt(dev);
6124
6125 if (status & LinkChg)
6126 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6127
6128 rtl_irq_enable_all(tp);
6129 }
6130
6131 static void rtl_task(struct work_struct *work)
6132 {
6133 static const struct {
6134 int bitnr;
6135 void (*action)(struct rtl8169_private *);
6136 } rtl_work[] = {
6137 /* XXX - keep rtl_slow_event_work() as first element. */
6138 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6139 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6140 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6141 };
6142 struct rtl8169_private *tp =
6143 container_of(work, struct rtl8169_private, wk.work);
6144 struct net_device *dev = tp->dev;
6145 int i;
6146
6147 rtl_lock_work(tp);
6148
6149 if (!netif_running(dev) ||
6150 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6151 goto out_unlock;
6152
6153 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6154 bool pending;
6155
6156 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6157 if (pending)
6158 rtl_work[i].action(tp);
6159 }
6160
6161 out_unlock:
6162 rtl_unlock_work(tp);
6163 }
6164
6165 static int rtl8169_poll(struct napi_struct *napi, int budget)
6166 {
6167 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6168 struct net_device *dev = tp->dev;
6169 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6170 int work_done= 0;
6171 u16 status;
6172
6173 status = rtl_get_events(tp);
6174 rtl_ack_events(tp, status & ~tp->event_slow);
6175
6176 if (status & RTL_EVENT_NAPI_RX)
6177 work_done = rtl_rx(dev, tp, (u32) budget);
6178
6179 if (status & RTL_EVENT_NAPI_TX)
6180 rtl_tx(dev, tp);
6181
6182 if (status & tp->event_slow) {
6183 enable_mask &= ~tp->event_slow;
6184
6185 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6186 }
6187
6188 if (work_done < budget) {
6189 napi_complete(napi);
6190
6191 rtl_irq_enable(tp, enable_mask);
6192 mmiowb();
6193 }
6194
6195 return work_done;
6196 }
6197
6198 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6199 {
6200 struct rtl8169_private *tp = netdev_priv(dev);
6201
6202 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6203 return;
6204
6205 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6206 RTL_W32(RxMissed, 0);
6207 }
6208
6209 static void rtl8169_down(struct net_device *dev)
6210 {
6211 struct rtl8169_private *tp = netdev_priv(dev);
6212 void __iomem *ioaddr = tp->mmio_addr;
6213
6214 del_timer_sync(&tp->timer);
6215
6216 napi_disable(&tp->napi);
6217 netif_stop_queue(dev);
6218
6219 rtl8169_hw_reset(tp);
6220 /*
6221 * At this point device interrupts can not be enabled in any function,
6222 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6223 * and napi is disabled (rtl8169_poll).
6224 */
6225 rtl8169_rx_missed(dev, ioaddr);
6226
6227 /* Give a racing hard_start_xmit a few cycles to complete. */
6228 synchronize_sched();
6229
6230 rtl8169_tx_clear(tp);
6231
6232 rtl8169_rx_clear(tp);
6233
6234 rtl_pll_power_down(tp);
6235 }
6236
6237 static int rtl8169_close(struct net_device *dev)
6238 {
6239 struct rtl8169_private *tp = netdev_priv(dev);
6240 struct pci_dev *pdev = tp->pci_dev;
6241
6242 pm_runtime_get_sync(&pdev->dev);
6243
6244 /* Update counters before going down */
6245 rtl8169_update_counters(dev);
6246
6247 rtl_lock_work(tp);
6248 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6249
6250 rtl8169_down(dev);
6251 rtl_unlock_work(tp);
6252
6253 free_irq(pdev->irq, dev);
6254
6255 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6256 tp->RxPhyAddr);
6257 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6258 tp->TxPhyAddr);
6259 tp->TxDescArray = NULL;
6260 tp->RxDescArray = NULL;
6261
6262 pm_runtime_put_sync(&pdev->dev);
6263
6264 return 0;
6265 }
6266
6267 #ifdef CONFIG_NET_POLL_CONTROLLER
6268 static void rtl8169_netpoll(struct net_device *dev)
6269 {
6270 struct rtl8169_private *tp = netdev_priv(dev);
6271
6272 rtl8169_interrupt(tp->pci_dev->irq, dev);
6273 }
6274 #endif
6275
6276 static int rtl_open(struct net_device *dev)
6277 {
6278 struct rtl8169_private *tp = netdev_priv(dev);
6279 void __iomem *ioaddr = tp->mmio_addr;
6280 struct pci_dev *pdev = tp->pci_dev;
6281 int retval = -ENOMEM;
6282
6283 pm_runtime_get_sync(&pdev->dev);
6284
6285 /*
6286 * Rx and Tx descriptors needs 256 bytes alignment.
6287 * dma_alloc_coherent provides more.
6288 */
6289 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6290 &tp->TxPhyAddr, GFP_KERNEL);
6291 if (!tp->TxDescArray)
6292 goto err_pm_runtime_put;
6293
6294 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6295 &tp->RxPhyAddr, GFP_KERNEL);
6296 if (!tp->RxDescArray)
6297 goto err_free_tx_0;
6298
6299 retval = rtl8169_init_ring(dev);
6300 if (retval < 0)
6301 goto err_free_rx_1;
6302
6303 INIT_WORK(&tp->wk.work, rtl_task);
6304
6305 smp_mb();
6306
6307 rtl_request_firmware(tp);
6308
6309 retval = request_irq(pdev->irq, rtl8169_interrupt,
6310 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6311 dev->name, dev);
6312 if (retval < 0)
6313 goto err_release_fw_2;
6314
6315 rtl_lock_work(tp);
6316
6317 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6318
6319 napi_enable(&tp->napi);
6320
6321 rtl8169_init_phy(dev, tp);
6322
6323 __rtl8169_set_features(dev, dev->features);
6324
6325 rtl_pll_power_up(tp);
6326
6327 rtl_hw_start(dev);
6328
6329 netif_start_queue(dev);
6330
6331 rtl_unlock_work(tp);
6332
6333 tp->saved_wolopts = 0;
6334 pm_runtime_put_noidle(&pdev->dev);
6335
6336 rtl8169_check_link_status(dev, tp, ioaddr);
6337 out:
6338 return retval;
6339
6340 err_release_fw_2:
6341 rtl_release_firmware(tp);
6342 rtl8169_rx_clear(tp);
6343 err_free_rx_1:
6344 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6345 tp->RxPhyAddr);
6346 tp->RxDescArray = NULL;
6347 err_free_tx_0:
6348 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6349 tp->TxPhyAddr);
6350 tp->TxDescArray = NULL;
6351 err_pm_runtime_put:
6352 pm_runtime_put_noidle(&pdev->dev);
6353 goto out;
6354 }
6355
6356 static struct rtnl_link_stats64 *
6357 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6358 {
6359 struct rtl8169_private *tp = netdev_priv(dev);
6360 void __iomem *ioaddr = tp->mmio_addr;
6361 unsigned int start;
6362
6363 if (netif_running(dev))
6364 rtl8169_rx_missed(dev, ioaddr);
6365
6366 do {
6367 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6368 stats->rx_packets = tp->rx_stats.packets;
6369 stats->rx_bytes = tp->rx_stats.bytes;
6370 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6371
6372
6373 do {
6374 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6375 stats->tx_packets = tp->tx_stats.packets;
6376 stats->tx_bytes = tp->tx_stats.bytes;
6377 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6378
6379 stats->rx_dropped = dev->stats.rx_dropped;
6380 stats->tx_dropped = dev->stats.tx_dropped;
6381 stats->rx_length_errors = dev->stats.rx_length_errors;
6382 stats->rx_errors = dev->stats.rx_errors;
6383 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6384 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6385 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6386
6387 return stats;
6388 }
6389
6390 static void rtl8169_net_suspend(struct net_device *dev)
6391 {
6392 struct rtl8169_private *tp = netdev_priv(dev);
6393
6394 if (!netif_running(dev))
6395 return;
6396
6397 netif_device_detach(dev);
6398 netif_stop_queue(dev);
6399
6400 rtl_lock_work(tp);
6401 napi_disable(&tp->napi);
6402 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6403 rtl_unlock_work(tp);
6404
6405 rtl_pll_power_down(tp);
6406 }
6407
6408 #ifdef CONFIG_PM
6409
6410 static int rtl8169_suspend(struct device *device)
6411 {
6412 struct pci_dev *pdev = to_pci_dev(device);
6413 struct net_device *dev = pci_get_drvdata(pdev);
6414
6415 rtl8169_net_suspend(dev);
6416
6417 return 0;
6418 }
6419
6420 static void __rtl8169_resume(struct net_device *dev)
6421 {
6422 struct rtl8169_private *tp = netdev_priv(dev);
6423
6424 netif_device_attach(dev);
6425
6426 rtl_pll_power_up(tp);
6427
6428 rtl_lock_work(tp);
6429 napi_enable(&tp->napi);
6430 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6431 rtl_unlock_work(tp);
6432
6433 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6434 }
6435
6436 static int rtl8169_resume(struct device *device)
6437 {
6438 struct pci_dev *pdev = to_pci_dev(device);
6439 struct net_device *dev = pci_get_drvdata(pdev);
6440 struct rtl8169_private *tp = netdev_priv(dev);
6441
6442 rtl8169_init_phy(dev, tp);
6443
6444 if (netif_running(dev))
6445 __rtl8169_resume(dev);
6446
6447 return 0;
6448 }
6449
6450 static int rtl8169_runtime_suspend(struct device *device)
6451 {
6452 struct pci_dev *pdev = to_pci_dev(device);
6453 struct net_device *dev = pci_get_drvdata(pdev);
6454 struct rtl8169_private *tp = netdev_priv(dev);
6455
6456 if (!tp->TxDescArray)
6457 return 0;
6458
6459 rtl_lock_work(tp);
6460 tp->saved_wolopts = __rtl8169_get_wol(tp);
6461 __rtl8169_set_wol(tp, WAKE_ANY);
6462 rtl_unlock_work(tp);
6463
6464 rtl8169_net_suspend(dev);
6465
6466 return 0;
6467 }
6468
6469 static int rtl8169_runtime_resume(struct device *device)
6470 {
6471 struct pci_dev *pdev = to_pci_dev(device);
6472 struct net_device *dev = pci_get_drvdata(pdev);
6473 struct rtl8169_private *tp = netdev_priv(dev);
6474
6475 if (!tp->TxDescArray)
6476 return 0;
6477
6478 rtl_lock_work(tp);
6479 __rtl8169_set_wol(tp, tp->saved_wolopts);
6480 tp->saved_wolopts = 0;
6481 rtl_unlock_work(tp);
6482
6483 rtl8169_init_phy(dev, tp);
6484
6485 __rtl8169_resume(dev);
6486
6487 return 0;
6488 }
6489
6490 static int rtl8169_runtime_idle(struct device *device)
6491 {
6492 struct pci_dev *pdev = to_pci_dev(device);
6493 struct net_device *dev = pci_get_drvdata(pdev);
6494 struct rtl8169_private *tp = netdev_priv(dev);
6495
6496 return tp->TxDescArray ? -EBUSY : 0;
6497 }
6498
6499 static const struct dev_pm_ops rtl8169_pm_ops = {
6500 .suspend = rtl8169_suspend,
6501 .resume = rtl8169_resume,
6502 .freeze = rtl8169_suspend,
6503 .thaw = rtl8169_resume,
6504 .poweroff = rtl8169_suspend,
6505 .restore = rtl8169_resume,
6506 .runtime_suspend = rtl8169_runtime_suspend,
6507 .runtime_resume = rtl8169_runtime_resume,
6508 .runtime_idle = rtl8169_runtime_idle,
6509 };
6510
6511 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6512
6513 #else /* !CONFIG_PM */
6514
6515 #define RTL8169_PM_OPS NULL
6516
6517 #endif /* !CONFIG_PM */
6518
6519 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6520 {
6521 void __iomem *ioaddr = tp->mmio_addr;
6522
6523 /* WoL fails with 8168b when the receiver is disabled. */
6524 switch (tp->mac_version) {
6525 case RTL_GIGA_MAC_VER_11:
6526 case RTL_GIGA_MAC_VER_12:
6527 case RTL_GIGA_MAC_VER_17:
6528 pci_clear_master(tp->pci_dev);
6529
6530 RTL_W8(ChipCmd, CmdRxEnb);
6531 /* PCI commit */
6532 RTL_R8(ChipCmd);
6533 break;
6534 default:
6535 break;
6536 }
6537 }
6538
6539 static void rtl_shutdown(struct pci_dev *pdev)
6540 {
6541 struct net_device *dev = pci_get_drvdata(pdev);
6542 struct rtl8169_private *tp = netdev_priv(dev);
6543 struct device *d = &pdev->dev;
6544
6545 pm_runtime_get_sync(d);
6546
6547 rtl8169_net_suspend(dev);
6548
6549 /* Restore original MAC address */
6550 rtl_rar_set(tp, dev->perm_addr);
6551
6552 rtl8169_hw_reset(tp);
6553
6554 if (system_state == SYSTEM_POWER_OFF) {
6555 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6556 rtl_wol_suspend_quirk(tp);
6557 rtl_wol_shutdown_quirk(tp);
6558 }
6559
6560 pci_wake_from_d3(pdev, true);
6561 pci_set_power_state(pdev, PCI_D3hot);
6562 }
6563
6564 pm_runtime_put_noidle(d);
6565 }
6566
6567 static void __devexit rtl_remove_one(struct pci_dev *pdev)
6568 {
6569 struct net_device *dev = pci_get_drvdata(pdev);
6570 struct rtl8169_private *tp = netdev_priv(dev);
6571
6572 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6573 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6574 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6575 rtl8168_driver_stop(tp);
6576 }
6577
6578 cancel_work_sync(&tp->wk.work);
6579
6580 netif_napi_del(&tp->napi);
6581
6582 unregister_netdev(dev);
6583
6584 rtl_release_firmware(tp);
6585
6586 if (pci_dev_run_wake(pdev))
6587 pm_runtime_get_noresume(&pdev->dev);
6588
6589 /* restore original MAC address */
6590 rtl_rar_set(tp, dev->perm_addr);
6591
6592 rtl_disable_msi(pdev, tp);
6593 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6594 pci_set_drvdata(pdev, NULL);
6595 }
6596
6597 static const struct net_device_ops rtl_netdev_ops = {
6598 .ndo_open = rtl_open,
6599 .ndo_stop = rtl8169_close,
6600 .ndo_get_stats64 = rtl8169_get_stats64,
6601 .ndo_start_xmit = rtl8169_start_xmit,
6602 .ndo_tx_timeout = rtl8169_tx_timeout,
6603 .ndo_validate_addr = eth_validate_addr,
6604 .ndo_change_mtu = rtl8169_change_mtu,
6605 .ndo_fix_features = rtl8169_fix_features,
6606 .ndo_set_features = rtl8169_set_features,
6607 .ndo_set_mac_address = rtl_set_mac_address,
6608 .ndo_do_ioctl = rtl8169_ioctl,
6609 .ndo_set_rx_mode = rtl_set_rx_mode,
6610 #ifdef CONFIG_NET_POLL_CONTROLLER
6611 .ndo_poll_controller = rtl8169_netpoll,
6612 #endif
6613
6614 };
6615
6616 static const struct rtl_cfg_info {
6617 void (*hw_start)(struct net_device *);
6618 unsigned int region;
6619 unsigned int align;
6620 u16 event_slow;
6621 unsigned features;
6622 u8 default_ver;
6623 } rtl_cfg_infos [] = {
6624 [RTL_CFG_0] = {
6625 .hw_start = rtl_hw_start_8169,
6626 .region = 1,
6627 .align = 0,
6628 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6629 .features = RTL_FEATURE_GMII,
6630 .default_ver = RTL_GIGA_MAC_VER_01,
6631 },
6632 [RTL_CFG_1] = {
6633 .hw_start = rtl_hw_start_8168,
6634 .region = 2,
6635 .align = 8,
6636 .event_slow = SYSErr | LinkChg | RxOverflow,
6637 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6638 .default_ver = RTL_GIGA_MAC_VER_11,
6639 },
6640 [RTL_CFG_2] = {
6641 .hw_start = rtl_hw_start_8101,
6642 .region = 2,
6643 .align = 8,
6644 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6645 PCSTimeout,
6646 .features = RTL_FEATURE_MSI,
6647 .default_ver = RTL_GIGA_MAC_VER_13,
6648 }
6649 };
6650
6651 /* Cfg9346_Unlock assumed. */
6652 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6653 const struct rtl_cfg_info *cfg)
6654 {
6655 void __iomem *ioaddr = tp->mmio_addr;
6656 unsigned msi = 0;
6657 u8 cfg2;
6658
6659 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6660 if (cfg->features & RTL_FEATURE_MSI) {
6661 if (pci_enable_msi(tp->pci_dev)) {
6662 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6663 } else {
6664 cfg2 |= MSIEnable;
6665 msi = RTL_FEATURE_MSI;
6666 }
6667 }
6668 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6669 RTL_W8(Config2, cfg2);
6670 return msi;
6671 }
6672
6673 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6674 {
6675 void __iomem *ioaddr = tp->mmio_addr;
6676
6677 return RTL_R8(MCU) & LINK_LIST_RDY;
6678 }
6679
6680 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6681 {
6682 void __iomem *ioaddr = tp->mmio_addr;
6683
6684 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6685 }
6686
6687 static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6688 {
6689 void __iomem *ioaddr = tp->mmio_addr;
6690 u32 data;
6691
6692 tp->ocp_base = OCP_STD_PHY_BASE;
6693
6694 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6695
6696 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6697 return;
6698
6699 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6700 return;
6701
6702 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6703 msleep(1);
6704 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6705
6706 data = r8168_mac_ocp_read(tp, 0xe8de);
6707 data &= ~(1 << 14);
6708 r8168_mac_ocp_write(tp, 0xe8de, data);
6709
6710 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6711 return;
6712
6713 data = r8168_mac_ocp_read(tp, 0xe8de);
6714 data |= (1 << 15);
6715 r8168_mac_ocp_write(tp, 0xe8de, data);
6716
6717 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6718 return;
6719 }
6720
6721 static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6722 {
6723 switch (tp->mac_version) {
6724 case RTL_GIGA_MAC_VER_40:
6725 case RTL_GIGA_MAC_VER_41:
6726 rtl_hw_init_8168g(tp);
6727 break;
6728
6729 default:
6730 break;
6731 }
6732 }
6733
6734 static int __devinit
6735 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6736 {
6737 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6738 const unsigned int region = cfg->region;
6739 struct rtl8169_private *tp;
6740 struct mii_if_info *mii;
6741 struct net_device *dev;
6742 void __iomem *ioaddr;
6743 int chipset, i;
6744 int rc;
6745
6746 if (netif_msg_drv(&debug)) {
6747 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6748 MODULENAME, RTL8169_VERSION);
6749 }
6750
6751 dev = alloc_etherdev(sizeof (*tp));
6752 if (!dev) {
6753 rc = -ENOMEM;
6754 goto out;
6755 }
6756
6757 SET_NETDEV_DEV(dev, &pdev->dev);
6758 dev->netdev_ops = &rtl_netdev_ops;
6759 tp = netdev_priv(dev);
6760 tp->dev = dev;
6761 tp->pci_dev = pdev;
6762 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6763
6764 mii = &tp->mii;
6765 mii->dev = dev;
6766 mii->mdio_read = rtl_mdio_read;
6767 mii->mdio_write = rtl_mdio_write;
6768 mii->phy_id_mask = 0x1f;
6769 mii->reg_num_mask = 0x1f;
6770 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6771
6772 /* disable ASPM completely as that cause random device stop working
6773 * problems as well as full system hangs for some PCIe devices users */
6774 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6775 PCIE_LINK_STATE_CLKPM);
6776
6777 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6778 rc = pci_enable_device(pdev);
6779 if (rc < 0) {
6780 netif_err(tp, probe, dev, "enable failure\n");
6781 goto err_out_free_dev_1;
6782 }
6783
6784 if (pci_set_mwi(pdev) < 0)
6785 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6786
6787 /* make sure PCI base addr 1 is MMIO */
6788 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6789 netif_err(tp, probe, dev,
6790 "region #%d not an MMIO resource, aborting\n",
6791 region);
6792 rc = -ENODEV;
6793 goto err_out_mwi_2;
6794 }
6795
6796 /* check for weird/broken PCI region reporting */
6797 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6798 netif_err(tp, probe, dev,
6799 "Invalid PCI region size(s), aborting\n");
6800 rc = -ENODEV;
6801 goto err_out_mwi_2;
6802 }
6803
6804 rc = pci_request_regions(pdev, MODULENAME);
6805 if (rc < 0) {
6806 netif_err(tp, probe, dev, "could not request regions\n");
6807 goto err_out_mwi_2;
6808 }
6809
6810 tp->cp_cmd = RxChkSum;
6811
6812 if ((sizeof(dma_addr_t) > 4) &&
6813 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6814 tp->cp_cmd |= PCIDAC;
6815 dev->features |= NETIF_F_HIGHDMA;
6816 } else {
6817 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6818 if (rc < 0) {
6819 netif_err(tp, probe, dev, "DMA configuration failed\n");
6820 goto err_out_free_res_3;
6821 }
6822 }
6823
6824 /* ioremap MMIO region */
6825 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6826 if (!ioaddr) {
6827 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6828 rc = -EIO;
6829 goto err_out_free_res_3;
6830 }
6831 tp->mmio_addr = ioaddr;
6832
6833 if (!pci_is_pcie(pdev))
6834 netif_info(tp, probe, dev, "not PCI Express\n");
6835
6836 /* Identify chip attached to board */
6837 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6838
6839 rtl_init_rxcfg(tp);
6840
6841 rtl_irq_disable(tp);
6842
6843 rtl_hw_initialize(tp);
6844
6845 rtl_hw_reset(tp);
6846
6847 rtl_ack_events(tp, 0xffff);
6848
6849 pci_set_master(pdev);
6850
6851 /*
6852 * Pretend we are using VLANs; This bypasses a nasty bug where
6853 * Interrupts stop flowing on high load on 8110SCd controllers.
6854 */
6855 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6856 tp->cp_cmd |= RxVlan;
6857
6858 rtl_init_mdio_ops(tp);
6859 rtl_init_pll_power_ops(tp);
6860 rtl_init_jumbo_ops(tp);
6861 rtl_init_csi_ops(tp);
6862
6863 rtl8169_print_mac_version(tp);
6864
6865 chipset = tp->mac_version;
6866 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6867
6868 RTL_W8(Cfg9346, Cfg9346_Unlock);
6869 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6870 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6871 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6872 tp->features |= RTL_FEATURE_WOL;
6873 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6874 tp->features |= RTL_FEATURE_WOL;
6875 tp->features |= rtl_try_msi(tp, cfg);
6876 RTL_W8(Cfg9346, Cfg9346_Lock);
6877
6878 if (rtl_tbi_enabled(tp)) {
6879 tp->set_speed = rtl8169_set_speed_tbi;
6880 tp->get_settings = rtl8169_gset_tbi;
6881 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6882 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6883 tp->link_ok = rtl8169_tbi_link_ok;
6884 tp->do_ioctl = rtl_tbi_ioctl;
6885 } else {
6886 tp->set_speed = rtl8169_set_speed_xmii;
6887 tp->get_settings = rtl8169_gset_xmii;
6888 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6889 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6890 tp->link_ok = rtl8169_xmii_link_ok;
6891 tp->do_ioctl = rtl_xmii_ioctl;
6892 }
6893
6894 mutex_init(&tp->wk.mutex);
6895
6896 /* Get MAC address */
6897 for (i = 0; i < ETH_ALEN; i++)
6898 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6899 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6900
6901 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6902 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6903
6904 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6905
6906 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6907 * properly for all devices */
6908 dev->features |= NETIF_F_RXCSUM |
6909 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6910
6911 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6912 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6913 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6914 NETIF_F_HIGHDMA;
6915
6916 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6917 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6918 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6919
6920 dev->hw_features |= NETIF_F_RXALL;
6921 dev->hw_features |= NETIF_F_RXFCS;
6922
6923 tp->hw_start = cfg->hw_start;
6924 tp->event_slow = cfg->event_slow;
6925
6926 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6927 ~(RxBOVF | RxFOVF) : ~0;
6928
6929 init_timer(&tp->timer);
6930 tp->timer.data = (unsigned long) dev;
6931 tp->timer.function = rtl8169_phy_timer;
6932
6933 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6934
6935 rc = register_netdev(dev);
6936 if (rc < 0)
6937 goto err_out_msi_4;
6938
6939 pci_set_drvdata(pdev, dev);
6940
6941 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6942 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6943 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6944 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6945 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6946 "tx checksumming: %s]\n",
6947 rtl_chip_infos[chipset].jumbo_max,
6948 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6949 }
6950
6951 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6952 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6953 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6954 rtl8168_driver_start(tp);
6955 }
6956
6957 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6958
6959 if (pci_dev_run_wake(pdev))
6960 pm_runtime_put_noidle(&pdev->dev);
6961
6962 netif_carrier_off(dev);
6963
6964 out:
6965 return rc;
6966
6967 err_out_msi_4:
6968 netif_napi_del(&tp->napi);
6969 rtl_disable_msi(pdev, tp);
6970 iounmap(ioaddr);
6971 err_out_free_res_3:
6972 pci_release_regions(pdev);
6973 err_out_mwi_2:
6974 pci_clear_mwi(pdev);
6975 pci_disable_device(pdev);
6976 err_out_free_dev_1:
6977 free_netdev(dev);
6978 goto out;
6979 }
6980
6981 static struct pci_driver rtl8169_pci_driver = {
6982 .name = MODULENAME,
6983 .id_table = rtl8169_pci_tbl,
6984 .probe = rtl_init_one,
6985 .remove = __devexit_p(rtl_remove_one),
6986 .shutdown = rtl_shutdown,
6987 .driver.pm = RTL8169_PM_OPS,
6988 };
6989
6990 static int __init rtl8169_init_module(void)
6991 {
6992 return pci_register_driver(&rtl8169_pci_driver);
6993 }
6994
6995 static void __exit rtl8169_cleanup_module(void)
6996 {
6997 pci_unregister_driver(&rtl8169_pci_driver);
6998 }
6999
7000 module_init(rtl8169_init_module);
7001 module_exit(rtl8169_cleanup_module);