]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/realtek/r8169.c
regulator: palmas: Fix off-by-one for ramp_delay and register value mapping
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / realtek / r8169.c
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
51
52 #ifdef RTL8169_DEBUG
53 #define assert(expr) \
54 if (!(expr)) { \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
57 }
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60 #else
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
64
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
78
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
82
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
89
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
92
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
100
101 enum mac_version {
102 RTL_GIGA_MAC_VER_01 = 0,
103 RTL_GIGA_MAC_VER_02,
104 RTL_GIGA_MAC_VER_03,
105 RTL_GIGA_MAC_VER_04,
106 RTL_GIGA_MAC_VER_05,
107 RTL_GIGA_MAC_VER_06,
108 RTL_GIGA_MAC_VER_07,
109 RTL_GIGA_MAC_VER_08,
110 RTL_GIGA_MAC_VER_09,
111 RTL_GIGA_MAC_VER_10,
112 RTL_GIGA_MAC_VER_11,
113 RTL_GIGA_MAC_VER_12,
114 RTL_GIGA_MAC_VER_13,
115 RTL_GIGA_MAC_VER_14,
116 RTL_GIGA_MAC_VER_15,
117 RTL_GIGA_MAC_VER_16,
118 RTL_GIGA_MAC_VER_17,
119 RTL_GIGA_MAC_VER_18,
120 RTL_GIGA_MAC_VER_19,
121 RTL_GIGA_MAC_VER_20,
122 RTL_GIGA_MAC_VER_21,
123 RTL_GIGA_MAC_VER_22,
124 RTL_GIGA_MAC_VER_23,
125 RTL_GIGA_MAC_VER_24,
126 RTL_GIGA_MAC_VER_25,
127 RTL_GIGA_MAC_VER_26,
128 RTL_GIGA_MAC_VER_27,
129 RTL_GIGA_MAC_VER_28,
130 RTL_GIGA_MAC_VER_29,
131 RTL_GIGA_MAC_VER_30,
132 RTL_GIGA_MAC_VER_31,
133 RTL_GIGA_MAC_VER_32,
134 RTL_GIGA_MAC_VER_33,
135 RTL_GIGA_MAC_VER_34,
136 RTL_GIGA_MAC_VER_35,
137 RTL_GIGA_MAC_VER_36,
138 RTL_GIGA_MAC_VER_37,
139 RTL_GIGA_MAC_VER_38,
140 RTL_GIGA_MAC_VER_39,
141 RTL_GIGA_MAC_VER_40,
142 RTL_GIGA_MAC_VER_41,
143 RTL_GIGA_MAC_NONE = 0xff,
144 };
145
146 enum rtl_tx_desc_version {
147 RTL_TD_0 = 0,
148 RTL_TD_1 = 1,
149 };
150
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
156
157 #define _R(NAME,TD,FW,SZ,B) { \
158 .name = NAME, \
159 .txd_version = TD, \
160 .fw_name = FW, \
161 .jumbo_max = SZ, \
162 .jumbo_tx_csum = B \
163 }
164
165 static const struct {
166 const char *name;
167 enum rtl_tx_desc_version txd_version;
168 const char *fw_name;
169 u16 jumbo_max;
170 bool jumbo_tx_csum;
171 } rtl_chip_infos[] = {
172 /* PCI devices. */
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
185 /* PCI-E devices. */
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
224 JUMBO_9K, false),
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
227 JUMBO_9K, false),
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
234 JUMBO_1K, true),
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
237 JUMBO_1K, true),
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
242 JUMBO_9K, false),
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
245 JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
257 JUMBO_1K, true),
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
260 JUMBO_9K, false),
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
263 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
269 };
270 #undef _R
271
272 enum cfg_version {
273 RTL_CFG_0 = 0x00,
274 RTL_CFG_1,
275 RTL_CFG_2
276 };
277
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
292 { 0x0001, 0x8168,
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
294 {0,},
295 };
296
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
298
299 static int rx_buf_sz = 16383;
300 static int use_dac;
301 static struct {
302 u32 msg_enable;
303 } debug = { -1 };
304
305 enum rtl_registers {
306 MAC0 = 0, /* Ethernet hardware address. */
307 MAC4 = 4,
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
315 FLASH = 0x30,
316 ERSR = 0x36,
317 ChipCmd = 0x37,
318 TxPoll = 0x38,
319 IntrMask = 0x3c,
320 IntrStatus = 0x3e,
321
322 TxConfig = 0x40,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
325
326 RxConfig = 0x44,
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
335
336 RxMissed = 0x4c,
337 Cfg9346 = 0x50,
338 Config0 = 0x51,
339 Config1 = 0x52,
340 Config2 = 0x53,
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
342
343 Config3 = 0x54,
344 Config4 = 0x55,
345 Config5 = 0x56,
346 MultiIntr = 0x5c,
347 PHYAR = 0x60,
348 PHYstatus = 0x6c,
349 RxMaxSize = 0xda,
350 CPlusCmd = 0xe0,
351 IntrMitigate = 0xe2,
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
355
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
357
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
359
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
362
363 FuncEvent = 0xf0,
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
367 };
368
369 enum rtl8110_registers {
370 TBICSR = 0x64,
371 TBI_ANAR = 0x68,
372 TBI_LPAR = 0x6a,
373 };
374
375 enum rtl8168_8101_registers {
376 CSIDR = 0x64,
377 CSIAR = 0x68,
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
386 PMCH = 0x6f,
387 EPHYAR = 0x80,
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
393 DLLPR = 0xd0,
394 #define PFM_EN (1 << 6)
395 DBG_REG = 0xd1,
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
398 TWSI = 0xd2,
399 MCU = 0xd3,
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
407 EFUSEAR = 0xdc,
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
414 };
415
416 enum rtl8168_registers {
417 LED_FREQ = 0x1a,
418 EEE_LED = 0x1b,
419 ERIDR = 0x70,
420 ERIAR = 0x74,
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
441 OCPAR = 0xb4,
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
445 GPHY_OCP = 0xb8,
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 };
454
455 enum rtl_register_content {
456 /* InterruptStatusBits */
457 SYSErr = 0x8000,
458 PCSTimeout = 0x4000,
459 SWInt = 0x0100,
460 TxDescUnavail = 0x0080,
461 RxFIFOOver = 0x0040,
462 LinkChg = 0x0020,
463 RxOverflow = 0x0010,
464 TxErr = 0x0008,
465 TxOK = 0x0004,
466 RxErr = 0x0002,
467 RxOK = 0x0001,
468
469 /* RxStatusDesc */
470 RxBOVF = (1 << 24),
471 RxFOVF = (1 << 23),
472 RxRWT = (1 << 22),
473 RxRES = (1 << 21),
474 RxRUNT = (1 << 20),
475 RxCRC = (1 << 19),
476
477 /* ChipCmdBits */
478 StopReq = 0x80,
479 CmdReset = 0x10,
480 CmdRxEnb = 0x08,
481 CmdTxEnb = 0x04,
482 RxBufEmpty = 0x01,
483
484 /* TXPoll register p.5 */
485 HPQ = 0x80, /* Poll cmd on the high prio queue */
486 NPQ = 0x40, /* Poll cmd on the low prio queue */
487 FSWInt = 0x01, /* Forced software interrupt */
488
489 /* Cfg9346Bits */
490 Cfg9346_Lock = 0x00,
491 Cfg9346_Unlock = 0xc0,
492
493 /* rx_mode_bits */
494 AcceptErr = 0x20,
495 AcceptRunt = 0x10,
496 AcceptBroadcast = 0x08,
497 AcceptMulticast = 0x04,
498 AcceptMyPhys = 0x02,
499 AcceptAllPhys = 0x01,
500 #define RX_CONFIG_ACCEPT_MASK 0x3f
501
502 /* TxConfigBits */
503 TxInterFrameGapShift = 24,
504 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
505
506 /* Config1 register p.24 */
507 LEDS1 = (1 << 7),
508 LEDS0 = (1 << 6),
509 Speed_down = (1 << 4),
510 MEMMAP = (1 << 3),
511 IOMAP = (1 << 2),
512 VPD = (1 << 1),
513 PMEnable = (1 << 0), /* Power Management Enable */
514
515 /* Config2 register p. 25 */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00,
519
520 /* Config3 register p.25 */
521 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
522 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
523 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
524 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
525
526 /* Config4 register */
527 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
528
529 /* Config5 register p.27 */
530 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
531 MWF = (1 << 5), /* Accept Multicast wakeup frame */
532 UWF = (1 << 4), /* Accept Unicast wakeup frame */
533 Spi_en = (1 << 3),
534 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
536
537 /* TBICSR p.28 */
538 TBIReset = 0x80000000,
539 TBILoopback = 0x40000000,
540 TBINwEnable = 0x20000000,
541 TBINwRestart = 0x10000000,
542 TBILinkOk = 0x02000000,
543 TBINwComplete = 0x01000000,
544
545 /* CPlusCmd p.31 */
546 EnableBist = (1 << 15), // 8168 8101
547 Mac_dbgo_oe = (1 << 14), // 8168 8101
548 Normal_mode = (1 << 13), // unused
549 Force_half_dup = (1 << 12), // 8168 8101
550 Force_rxflow_en = (1 << 11), // 8168 8101
551 Force_txflow_en = (1 << 10), // 8168 8101
552 Cxpl_dbg_sel = (1 << 9), // 8168 8101
553 ASF = (1 << 8), // 8168 8101
554 PktCntrDisable = (1 << 7), // 8168 8101
555 Mac_dbgo_sel = 0x001c, // 8168
556 RxVlan = (1 << 6),
557 RxChkSum = (1 << 5),
558 PCIDAC = (1 << 4),
559 PCIMulRW = (1 << 3),
560 INTT_0 = 0x0000, // 8168
561 INTT_1 = 0x0001, // 8168
562 INTT_2 = 0x0002, // 8168
563 INTT_3 = 0x0003, // 8168
564
565 /* rtl8169_PHYstatus */
566 TBI_Enable = 0x80,
567 TxFlowCtrl = 0x40,
568 RxFlowCtrl = 0x20,
569 _1000bpsF = 0x10,
570 _100bps = 0x08,
571 _10bps = 0x04,
572 LinkStatus = 0x02,
573 FullDup = 0x01,
574
575 /* _TBICSRBit */
576 TBILinkOK = 0x02000000,
577
578 /* DumpCounterCommand */
579 CounterDump = 0x8,
580 };
581
582 enum rtl_desc_bit {
583 /* First doubleword. */
584 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
585 RingEnd = (1 << 30), /* End of descriptor ring */
586 FirstFrag = (1 << 29), /* First segment of a packet */
587 LastFrag = (1 << 28), /* Final segment of a packet */
588 };
589
590 /* Generic case. */
591 enum rtl_tx_desc_bit {
592 /* First doubleword. */
593 TD_LSO = (1 << 27), /* Large Send Offload */
594 #define TD_MSS_MAX 0x07ffu /* MSS value */
595
596 /* Second doubleword. */
597 TxVlanTag = (1 << 17), /* Add VLAN tag */
598 };
599
600 /* 8169, 8168b and 810x except 8102e. */
601 enum rtl_tx_desc_bit_0 {
602 /* First doubleword. */
603 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
604 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
605 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
606 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
607 };
608
609 /* 8102e, 8168c and beyond. */
610 enum rtl_tx_desc_bit_1 {
611 /* Second doubleword. */
612 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
613 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
614 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
615 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
616 };
617
618 static const struct rtl_tx_desc_info {
619 struct {
620 u32 udp;
621 u32 tcp;
622 } checksum;
623 u16 mss_shift;
624 u16 opts_offset;
625 } tx_desc_info [] = {
626 [RTL_TD_0] = {
627 .checksum = {
628 .udp = TD0_IP_CS | TD0_UDP_CS,
629 .tcp = TD0_IP_CS | TD0_TCP_CS
630 },
631 .mss_shift = TD0_MSS_SHIFT,
632 .opts_offset = 0
633 },
634 [RTL_TD_1] = {
635 .checksum = {
636 .udp = TD1_IP_CS | TD1_UDP_CS,
637 .tcp = TD1_IP_CS | TD1_TCP_CS
638 },
639 .mss_shift = TD1_MSS_SHIFT,
640 .opts_offset = 1
641 }
642 };
643
644 enum rtl_rx_desc_bit {
645 /* Rx private */
646 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
647 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
648
649 #define RxProtoUDP (PID1)
650 #define RxProtoTCP (PID0)
651 #define RxProtoIP (PID1 | PID0)
652 #define RxProtoMask RxProtoIP
653
654 IPFail = (1 << 16), /* IP checksum failed */
655 UDPFail = (1 << 15), /* UDP/IP checksum failed */
656 TCPFail = (1 << 14), /* TCP/IP checksum failed */
657 RxVlanTag = (1 << 16), /* VLAN tag available */
658 };
659
660 #define RsvdMask 0x3fffc000
661
662 struct TxDesc {
663 __le32 opts1;
664 __le32 opts2;
665 __le64 addr;
666 };
667
668 struct RxDesc {
669 __le32 opts1;
670 __le32 opts2;
671 __le64 addr;
672 };
673
674 struct ring_info {
675 struct sk_buff *skb;
676 u32 len;
677 u8 __pad[sizeof(void *) - sizeof(u32)];
678 };
679
680 enum features {
681 RTL_FEATURE_WOL = (1 << 0),
682 RTL_FEATURE_MSI = (1 << 1),
683 RTL_FEATURE_GMII = (1 << 2),
684 };
685
686 struct rtl8169_counters {
687 __le64 tx_packets;
688 __le64 rx_packets;
689 __le64 tx_errors;
690 __le32 rx_errors;
691 __le16 rx_missed;
692 __le16 align_errors;
693 __le32 tx_one_collision;
694 __le32 tx_multi_collision;
695 __le64 rx_unicast;
696 __le64 rx_broadcast;
697 __le32 rx_multicast;
698 __le16 tx_aborted;
699 __le16 tx_underun;
700 };
701
702 enum rtl_flag {
703 RTL_FLAG_TASK_ENABLED,
704 RTL_FLAG_TASK_SLOW_PENDING,
705 RTL_FLAG_TASK_RESET_PENDING,
706 RTL_FLAG_TASK_PHY_PENDING,
707 RTL_FLAG_MAX
708 };
709
710 struct rtl8169_stats {
711 u64 packets;
712 u64 bytes;
713 struct u64_stats_sync syncp;
714 };
715
716 struct rtl8169_private {
717 void __iomem *mmio_addr; /* memory map physical address */
718 struct pci_dev *pci_dev;
719 struct net_device *dev;
720 struct napi_struct napi;
721 u32 msg_enable;
722 u16 txd_version;
723 u16 mac_version;
724 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
725 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
726 u32 dirty_tx;
727 struct rtl8169_stats rx_stats;
728 struct rtl8169_stats tx_stats;
729 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
730 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
731 dma_addr_t TxPhyAddr;
732 dma_addr_t RxPhyAddr;
733 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
734 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
735 struct timer_list timer;
736 u16 cp_cmd;
737
738 u16 event_slow;
739
740 struct mdio_ops {
741 void (*write)(struct rtl8169_private *, int, int);
742 int (*read)(struct rtl8169_private *, int);
743 } mdio_ops;
744
745 struct pll_power_ops {
746 void (*down)(struct rtl8169_private *);
747 void (*up)(struct rtl8169_private *);
748 } pll_power_ops;
749
750 struct jumbo_ops {
751 void (*enable)(struct rtl8169_private *);
752 void (*disable)(struct rtl8169_private *);
753 } jumbo_ops;
754
755 struct csi_ops {
756 void (*write)(struct rtl8169_private *, int, int);
757 u32 (*read)(struct rtl8169_private *, int);
758 } csi_ops;
759
760 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
761 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
762 void (*phy_reset_enable)(struct rtl8169_private *tp);
763 void (*hw_start)(struct net_device *);
764 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
765 unsigned int (*link_ok)(void __iomem *);
766 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
767
768 struct {
769 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
770 struct mutex mutex;
771 struct work_struct work;
772 } wk;
773
774 unsigned features;
775
776 struct mii_if_info mii;
777 struct rtl8169_counters counters;
778 u32 saved_wolopts;
779 u32 opts1_mask;
780
781 struct rtl_fw {
782 const struct firmware *fw;
783
784 #define RTL_VER_SIZE 32
785
786 char version[RTL_VER_SIZE];
787
788 struct rtl_fw_phy_action {
789 __le32 *code;
790 size_t size;
791 } phy_action;
792 } *rtl_fw;
793 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
794
795 u32 ocp_base;
796 };
797
798 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
799 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
800 module_param(use_dac, int, 0);
801 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
802 module_param_named(debug, debug.msg_enable, int, 0);
803 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
804 MODULE_LICENSE("GPL");
805 MODULE_VERSION(RTL8169_VERSION);
806 MODULE_FIRMWARE(FIRMWARE_8168D_1);
807 MODULE_FIRMWARE(FIRMWARE_8168D_2);
808 MODULE_FIRMWARE(FIRMWARE_8168E_1);
809 MODULE_FIRMWARE(FIRMWARE_8168E_2);
810 MODULE_FIRMWARE(FIRMWARE_8168E_3);
811 MODULE_FIRMWARE(FIRMWARE_8105E_1);
812 MODULE_FIRMWARE(FIRMWARE_8168F_1);
813 MODULE_FIRMWARE(FIRMWARE_8168F_2);
814 MODULE_FIRMWARE(FIRMWARE_8402_1);
815 MODULE_FIRMWARE(FIRMWARE_8411_1);
816 MODULE_FIRMWARE(FIRMWARE_8106E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168G_1);
818
819 static void rtl_lock_work(struct rtl8169_private *tp)
820 {
821 mutex_lock(&tp->wk.mutex);
822 }
823
824 static void rtl_unlock_work(struct rtl8169_private *tp)
825 {
826 mutex_unlock(&tp->wk.mutex);
827 }
828
829 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
830 {
831 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
832 PCI_EXP_DEVCTL_READRQ, force);
833 }
834
835 struct rtl_cond {
836 bool (*check)(struct rtl8169_private *);
837 const char *msg;
838 };
839
840 static void rtl_udelay(unsigned int d)
841 {
842 udelay(d);
843 }
844
845 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
846 void (*delay)(unsigned int), unsigned int d, int n,
847 bool high)
848 {
849 int i;
850
851 for (i = 0; i < n; i++) {
852 delay(d);
853 if (c->check(tp) == high)
854 return true;
855 }
856 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
857 c->msg, !high, n, d);
858 return false;
859 }
860
861 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
862 const struct rtl_cond *c,
863 unsigned int d, int n)
864 {
865 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
866 }
867
868 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
869 const struct rtl_cond *c,
870 unsigned int d, int n)
871 {
872 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
873 }
874
875 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
876 const struct rtl_cond *c,
877 unsigned int d, int n)
878 {
879 return rtl_loop_wait(tp, c, msleep, d, n, true);
880 }
881
882 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
883 const struct rtl_cond *c,
884 unsigned int d, int n)
885 {
886 return rtl_loop_wait(tp, c, msleep, d, n, false);
887 }
888
889 #define DECLARE_RTL_COND(name) \
890 static bool name ## _check(struct rtl8169_private *); \
891 \
892 static const struct rtl_cond name = { \
893 .check = name ## _check, \
894 .msg = #name \
895 }; \
896 \
897 static bool name ## _check(struct rtl8169_private *tp)
898
899 DECLARE_RTL_COND(rtl_ocpar_cond)
900 {
901 void __iomem *ioaddr = tp->mmio_addr;
902
903 return RTL_R32(OCPAR) & OCPAR_FLAG;
904 }
905
906 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
907 {
908 void __iomem *ioaddr = tp->mmio_addr;
909
910 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
911
912 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
913 RTL_R32(OCPDR) : ~0;
914 }
915
916 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
917 {
918 void __iomem *ioaddr = tp->mmio_addr;
919
920 RTL_W32(OCPDR, data);
921 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
922
923 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
924 }
925
926 DECLARE_RTL_COND(rtl_eriar_cond)
927 {
928 void __iomem *ioaddr = tp->mmio_addr;
929
930 return RTL_R32(ERIAR) & ERIAR_FLAG;
931 }
932
933 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
934 {
935 void __iomem *ioaddr = tp->mmio_addr;
936
937 RTL_W8(ERIDR, cmd);
938 RTL_W32(ERIAR, 0x800010e8);
939 msleep(2);
940
941 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
942 return;
943
944 ocp_write(tp, 0x1, 0x30, 0x00000001);
945 }
946
947 #define OOB_CMD_RESET 0x00
948 #define OOB_CMD_DRIVER_START 0x05
949 #define OOB_CMD_DRIVER_STOP 0x06
950
951 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
952 {
953 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
954 }
955
956 DECLARE_RTL_COND(rtl_ocp_read_cond)
957 {
958 u16 reg;
959
960 reg = rtl8168_get_ocp_reg(tp);
961
962 return ocp_read(tp, 0x0f, reg) & 0x00000800;
963 }
964
965 static void rtl8168_driver_start(struct rtl8169_private *tp)
966 {
967 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
968
969 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
970 }
971
972 static void rtl8168_driver_stop(struct rtl8169_private *tp)
973 {
974 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
975
976 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
977 }
978
979 static int r8168dp_check_dash(struct rtl8169_private *tp)
980 {
981 u16 reg = rtl8168_get_ocp_reg(tp);
982
983 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
984 }
985
986 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
987 {
988 if (reg & 0xffff0001) {
989 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
990 return true;
991 }
992 return false;
993 }
994
995 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
996 {
997 void __iomem *ioaddr = tp->mmio_addr;
998
999 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1000 }
1001
1002 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1003 {
1004 void __iomem *ioaddr = tp->mmio_addr;
1005
1006 if (rtl_ocp_reg_failure(tp, reg))
1007 return;
1008
1009 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1010
1011 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1012 }
1013
1014 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1015 {
1016 void __iomem *ioaddr = tp->mmio_addr;
1017
1018 if (rtl_ocp_reg_failure(tp, reg))
1019 return 0;
1020
1021 RTL_W32(GPHY_OCP, reg << 15);
1022
1023 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1024 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1025 }
1026
1027 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1028 {
1029 int val;
1030
1031 val = r8168_phy_ocp_read(tp, reg);
1032 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1033 }
1034
1035 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1036 {
1037 void __iomem *ioaddr = tp->mmio_addr;
1038
1039 if (rtl_ocp_reg_failure(tp, reg))
1040 return;
1041
1042 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1043 }
1044
1045 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1046 {
1047 void __iomem *ioaddr = tp->mmio_addr;
1048
1049 if (rtl_ocp_reg_failure(tp, reg))
1050 return 0;
1051
1052 RTL_W32(OCPDR, reg << 15);
1053
1054 return RTL_R32(OCPDR);
1055 }
1056
1057 #define OCP_STD_PHY_BASE 0xa400
1058
1059 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1060 {
1061 if (reg == 0x1f) {
1062 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1063 return;
1064 }
1065
1066 if (tp->ocp_base != OCP_STD_PHY_BASE)
1067 reg -= 0x10;
1068
1069 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1070 }
1071
1072 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1073 {
1074 if (tp->ocp_base != OCP_STD_PHY_BASE)
1075 reg -= 0x10;
1076
1077 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1078 }
1079
1080 DECLARE_RTL_COND(rtl_phyar_cond)
1081 {
1082 void __iomem *ioaddr = tp->mmio_addr;
1083
1084 return RTL_R32(PHYAR) & 0x80000000;
1085 }
1086
1087 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1088 {
1089 void __iomem *ioaddr = tp->mmio_addr;
1090
1091 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1092
1093 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1094 /*
1095 * According to hardware specs a 20us delay is required after write
1096 * complete indication, but before sending next command.
1097 */
1098 udelay(20);
1099 }
1100
1101 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1102 {
1103 void __iomem *ioaddr = tp->mmio_addr;
1104 int value;
1105
1106 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1107
1108 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1109 RTL_R32(PHYAR) & 0xffff : ~0;
1110
1111 /*
1112 * According to hardware specs a 20us delay is required after read
1113 * complete indication, but before sending next command.
1114 */
1115 udelay(20);
1116
1117 return value;
1118 }
1119
1120 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1121 {
1122 void __iomem *ioaddr = tp->mmio_addr;
1123
1124 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1125 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1126 RTL_W32(EPHY_RXER_NUM, 0);
1127
1128 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1129 }
1130
1131 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1132 {
1133 r8168dp_1_mdio_access(tp, reg,
1134 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1135 }
1136
1137 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1138 {
1139 void __iomem *ioaddr = tp->mmio_addr;
1140
1141 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1142
1143 mdelay(1);
1144 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1145 RTL_W32(EPHY_RXER_NUM, 0);
1146
1147 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1148 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1149 }
1150
1151 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1152
1153 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1154 {
1155 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1156 }
1157
1158 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1159 {
1160 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1161 }
1162
1163 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1164 {
1165 void __iomem *ioaddr = tp->mmio_addr;
1166
1167 r8168dp_2_mdio_start(ioaddr);
1168
1169 r8169_mdio_write(tp, reg, value);
1170
1171 r8168dp_2_mdio_stop(ioaddr);
1172 }
1173
1174 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1175 {
1176 void __iomem *ioaddr = tp->mmio_addr;
1177 int value;
1178
1179 r8168dp_2_mdio_start(ioaddr);
1180
1181 value = r8169_mdio_read(tp, reg);
1182
1183 r8168dp_2_mdio_stop(ioaddr);
1184
1185 return value;
1186 }
1187
1188 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1189 {
1190 tp->mdio_ops.write(tp, location, val);
1191 }
1192
1193 static int rtl_readphy(struct rtl8169_private *tp, int location)
1194 {
1195 return tp->mdio_ops.read(tp, location);
1196 }
1197
1198 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1199 {
1200 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1201 }
1202
1203 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1204 {
1205 int val;
1206
1207 val = rtl_readphy(tp, reg_addr);
1208 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1209 }
1210
1211 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1212 int val)
1213 {
1214 struct rtl8169_private *tp = netdev_priv(dev);
1215
1216 rtl_writephy(tp, location, val);
1217 }
1218
1219 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1220 {
1221 struct rtl8169_private *tp = netdev_priv(dev);
1222
1223 return rtl_readphy(tp, location);
1224 }
1225
1226 DECLARE_RTL_COND(rtl_ephyar_cond)
1227 {
1228 void __iomem *ioaddr = tp->mmio_addr;
1229
1230 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1231 }
1232
1233 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1234 {
1235 void __iomem *ioaddr = tp->mmio_addr;
1236
1237 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1238 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1239
1240 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1241
1242 udelay(10);
1243 }
1244
1245 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1246 {
1247 void __iomem *ioaddr = tp->mmio_addr;
1248
1249 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1250
1251 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1252 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1253 }
1254
1255 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1256 u32 val, int type)
1257 {
1258 void __iomem *ioaddr = tp->mmio_addr;
1259
1260 BUG_ON((addr & 3) || (mask == 0));
1261 RTL_W32(ERIDR, val);
1262 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1263
1264 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1265 }
1266
1267 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1268 {
1269 void __iomem *ioaddr = tp->mmio_addr;
1270
1271 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1272
1273 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1274 RTL_R32(ERIDR) : ~0;
1275 }
1276
1277 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1278 u32 m, int type)
1279 {
1280 u32 val;
1281
1282 val = rtl_eri_read(tp, addr, type);
1283 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1284 }
1285
1286 struct exgmac_reg {
1287 u16 addr;
1288 u16 mask;
1289 u32 val;
1290 };
1291
1292 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1293 const struct exgmac_reg *r, int len)
1294 {
1295 while (len-- > 0) {
1296 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1297 r++;
1298 }
1299 }
1300
1301 DECLARE_RTL_COND(rtl_efusear_cond)
1302 {
1303 void __iomem *ioaddr = tp->mmio_addr;
1304
1305 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1306 }
1307
1308 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1309 {
1310 void __iomem *ioaddr = tp->mmio_addr;
1311
1312 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1313
1314 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1315 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1316 }
1317
1318 static u16 rtl_get_events(struct rtl8169_private *tp)
1319 {
1320 void __iomem *ioaddr = tp->mmio_addr;
1321
1322 return RTL_R16(IntrStatus);
1323 }
1324
1325 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1326 {
1327 void __iomem *ioaddr = tp->mmio_addr;
1328
1329 RTL_W16(IntrStatus, bits);
1330 mmiowb();
1331 }
1332
1333 static void rtl_irq_disable(struct rtl8169_private *tp)
1334 {
1335 void __iomem *ioaddr = tp->mmio_addr;
1336
1337 RTL_W16(IntrMask, 0);
1338 mmiowb();
1339 }
1340
1341 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1342 {
1343 void __iomem *ioaddr = tp->mmio_addr;
1344
1345 RTL_W16(IntrMask, bits);
1346 }
1347
1348 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1349 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1350 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1351
1352 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1353 {
1354 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1355 }
1356
1357 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1358 {
1359 void __iomem *ioaddr = tp->mmio_addr;
1360
1361 rtl_irq_disable(tp);
1362 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1363 RTL_R8(ChipCmd);
1364 }
1365
1366 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1367 {
1368 void __iomem *ioaddr = tp->mmio_addr;
1369
1370 return RTL_R32(TBICSR) & TBIReset;
1371 }
1372
1373 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1374 {
1375 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1376 }
1377
1378 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1379 {
1380 return RTL_R32(TBICSR) & TBILinkOk;
1381 }
1382
1383 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1384 {
1385 return RTL_R8(PHYstatus) & LinkStatus;
1386 }
1387
1388 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1389 {
1390 void __iomem *ioaddr = tp->mmio_addr;
1391
1392 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1393 }
1394
1395 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1396 {
1397 unsigned int val;
1398
1399 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1400 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1401 }
1402
1403 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1404 {
1405 void __iomem *ioaddr = tp->mmio_addr;
1406 struct net_device *dev = tp->dev;
1407
1408 if (!netif_running(dev))
1409 return;
1410
1411 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1412 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1413 if (RTL_R8(PHYstatus) & _1000bpsF) {
1414 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1415 ERIAR_EXGMAC);
1416 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1417 ERIAR_EXGMAC);
1418 } else if (RTL_R8(PHYstatus) & _100bps) {
1419 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1420 ERIAR_EXGMAC);
1421 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1422 ERIAR_EXGMAC);
1423 } else {
1424 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1425 ERIAR_EXGMAC);
1426 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1427 ERIAR_EXGMAC);
1428 }
1429 /* Reset packet filter */
1430 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1431 ERIAR_EXGMAC);
1432 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1433 ERIAR_EXGMAC);
1434 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1435 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1436 if (RTL_R8(PHYstatus) & _1000bpsF) {
1437 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1438 ERIAR_EXGMAC);
1439 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1440 ERIAR_EXGMAC);
1441 } else {
1442 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1443 ERIAR_EXGMAC);
1444 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1445 ERIAR_EXGMAC);
1446 }
1447 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1448 if (RTL_R8(PHYstatus) & _10bps) {
1449 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1450 ERIAR_EXGMAC);
1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1452 ERIAR_EXGMAC);
1453 } else {
1454 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1455 ERIAR_EXGMAC);
1456 }
1457 }
1458 }
1459
1460 static void __rtl8169_check_link_status(struct net_device *dev,
1461 struct rtl8169_private *tp,
1462 void __iomem *ioaddr, bool pm)
1463 {
1464 if (tp->link_ok(ioaddr)) {
1465 rtl_link_chg_patch(tp);
1466 /* This is to cancel a scheduled suspend if there's one. */
1467 if (pm)
1468 pm_request_resume(&tp->pci_dev->dev);
1469 netif_carrier_on(dev);
1470 if (net_ratelimit())
1471 netif_info(tp, ifup, dev, "link up\n");
1472 } else {
1473 netif_carrier_off(dev);
1474 netif_info(tp, ifdown, dev, "link down\n");
1475 if (pm)
1476 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1477 }
1478 }
1479
1480 static void rtl8169_check_link_status(struct net_device *dev,
1481 struct rtl8169_private *tp,
1482 void __iomem *ioaddr)
1483 {
1484 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1485 }
1486
1487 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1488
1489 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1490 {
1491 void __iomem *ioaddr = tp->mmio_addr;
1492 u8 options;
1493 u32 wolopts = 0;
1494
1495 options = RTL_R8(Config1);
1496 if (!(options & PMEnable))
1497 return 0;
1498
1499 options = RTL_R8(Config3);
1500 if (options & LinkUp)
1501 wolopts |= WAKE_PHY;
1502 if (options & MagicPacket)
1503 wolopts |= WAKE_MAGIC;
1504
1505 options = RTL_R8(Config5);
1506 if (options & UWF)
1507 wolopts |= WAKE_UCAST;
1508 if (options & BWF)
1509 wolopts |= WAKE_BCAST;
1510 if (options & MWF)
1511 wolopts |= WAKE_MCAST;
1512
1513 return wolopts;
1514 }
1515
1516 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1517 {
1518 struct rtl8169_private *tp = netdev_priv(dev);
1519
1520 rtl_lock_work(tp);
1521
1522 wol->supported = WAKE_ANY;
1523 wol->wolopts = __rtl8169_get_wol(tp);
1524
1525 rtl_unlock_work(tp);
1526 }
1527
1528 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1529 {
1530 void __iomem *ioaddr = tp->mmio_addr;
1531 unsigned int i;
1532 static const struct {
1533 u32 opt;
1534 u16 reg;
1535 u8 mask;
1536 } cfg[] = {
1537 { WAKE_PHY, Config3, LinkUp },
1538 { WAKE_MAGIC, Config3, MagicPacket },
1539 { WAKE_UCAST, Config5, UWF },
1540 { WAKE_BCAST, Config5, BWF },
1541 { WAKE_MCAST, Config5, MWF },
1542 { WAKE_ANY, Config5, LanWake }
1543 };
1544 u8 options;
1545
1546 RTL_W8(Cfg9346, Cfg9346_Unlock);
1547
1548 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1549 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1550 if (wolopts & cfg[i].opt)
1551 options |= cfg[i].mask;
1552 RTL_W8(cfg[i].reg, options);
1553 }
1554
1555 switch (tp->mac_version) {
1556 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1557 options = RTL_R8(Config1) & ~PMEnable;
1558 if (wolopts)
1559 options |= PMEnable;
1560 RTL_W8(Config1, options);
1561 break;
1562 default:
1563 options = RTL_R8(Config2) & ~PME_SIGNAL;
1564 if (wolopts)
1565 options |= PME_SIGNAL;
1566 RTL_W8(Config2, options);
1567 break;
1568 }
1569
1570 RTL_W8(Cfg9346, Cfg9346_Lock);
1571 }
1572
1573 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1574 {
1575 struct rtl8169_private *tp = netdev_priv(dev);
1576
1577 rtl_lock_work(tp);
1578
1579 if (wol->wolopts)
1580 tp->features |= RTL_FEATURE_WOL;
1581 else
1582 tp->features &= ~RTL_FEATURE_WOL;
1583 __rtl8169_set_wol(tp, wol->wolopts);
1584
1585 rtl_unlock_work(tp);
1586
1587 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1588
1589 return 0;
1590 }
1591
1592 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1593 {
1594 return rtl_chip_infos[tp->mac_version].fw_name;
1595 }
1596
1597 static void rtl8169_get_drvinfo(struct net_device *dev,
1598 struct ethtool_drvinfo *info)
1599 {
1600 struct rtl8169_private *tp = netdev_priv(dev);
1601 struct rtl_fw *rtl_fw = tp->rtl_fw;
1602
1603 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1604 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1605 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1606 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1607 if (!IS_ERR_OR_NULL(rtl_fw))
1608 strlcpy(info->fw_version, rtl_fw->version,
1609 sizeof(info->fw_version));
1610 }
1611
1612 static int rtl8169_get_regs_len(struct net_device *dev)
1613 {
1614 return R8169_REGS_SIZE;
1615 }
1616
1617 static int rtl8169_set_speed_tbi(struct net_device *dev,
1618 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1619 {
1620 struct rtl8169_private *tp = netdev_priv(dev);
1621 void __iomem *ioaddr = tp->mmio_addr;
1622 int ret = 0;
1623 u32 reg;
1624
1625 reg = RTL_R32(TBICSR);
1626 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1627 (duplex == DUPLEX_FULL)) {
1628 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1629 } else if (autoneg == AUTONEG_ENABLE)
1630 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1631 else {
1632 netif_warn(tp, link, dev,
1633 "incorrect speed setting refused in TBI mode\n");
1634 ret = -EOPNOTSUPP;
1635 }
1636
1637 return ret;
1638 }
1639
1640 static int rtl8169_set_speed_xmii(struct net_device *dev,
1641 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1642 {
1643 struct rtl8169_private *tp = netdev_priv(dev);
1644 int giga_ctrl, bmcr;
1645 int rc = -EINVAL;
1646
1647 rtl_writephy(tp, 0x1f, 0x0000);
1648
1649 if (autoneg == AUTONEG_ENABLE) {
1650 int auto_nego;
1651
1652 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1653 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1654 ADVERTISE_100HALF | ADVERTISE_100FULL);
1655
1656 if (adv & ADVERTISED_10baseT_Half)
1657 auto_nego |= ADVERTISE_10HALF;
1658 if (adv & ADVERTISED_10baseT_Full)
1659 auto_nego |= ADVERTISE_10FULL;
1660 if (adv & ADVERTISED_100baseT_Half)
1661 auto_nego |= ADVERTISE_100HALF;
1662 if (adv & ADVERTISED_100baseT_Full)
1663 auto_nego |= ADVERTISE_100FULL;
1664
1665 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1666
1667 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1668 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1669
1670 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1671 if (tp->mii.supports_gmii) {
1672 if (adv & ADVERTISED_1000baseT_Half)
1673 giga_ctrl |= ADVERTISE_1000HALF;
1674 if (adv & ADVERTISED_1000baseT_Full)
1675 giga_ctrl |= ADVERTISE_1000FULL;
1676 } else if (adv & (ADVERTISED_1000baseT_Half |
1677 ADVERTISED_1000baseT_Full)) {
1678 netif_info(tp, link, dev,
1679 "PHY does not support 1000Mbps\n");
1680 goto out;
1681 }
1682
1683 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1684
1685 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1686 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1687 } else {
1688 giga_ctrl = 0;
1689
1690 if (speed == SPEED_10)
1691 bmcr = 0;
1692 else if (speed == SPEED_100)
1693 bmcr = BMCR_SPEED100;
1694 else
1695 goto out;
1696
1697 if (duplex == DUPLEX_FULL)
1698 bmcr |= BMCR_FULLDPLX;
1699 }
1700
1701 rtl_writephy(tp, MII_BMCR, bmcr);
1702
1703 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1704 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1705 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1706 rtl_writephy(tp, 0x17, 0x2138);
1707 rtl_writephy(tp, 0x0e, 0x0260);
1708 } else {
1709 rtl_writephy(tp, 0x17, 0x2108);
1710 rtl_writephy(tp, 0x0e, 0x0000);
1711 }
1712 }
1713
1714 rc = 0;
1715 out:
1716 return rc;
1717 }
1718
1719 static int rtl8169_set_speed(struct net_device *dev,
1720 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1721 {
1722 struct rtl8169_private *tp = netdev_priv(dev);
1723 int ret;
1724
1725 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1726 if (ret < 0)
1727 goto out;
1728
1729 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1730 (advertising & ADVERTISED_1000baseT_Full)) {
1731 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1732 }
1733 out:
1734 return ret;
1735 }
1736
1737 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1738 {
1739 struct rtl8169_private *tp = netdev_priv(dev);
1740 int ret;
1741
1742 del_timer_sync(&tp->timer);
1743
1744 rtl_lock_work(tp);
1745 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1746 cmd->duplex, cmd->advertising);
1747 rtl_unlock_work(tp);
1748
1749 return ret;
1750 }
1751
1752 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1753 netdev_features_t features)
1754 {
1755 struct rtl8169_private *tp = netdev_priv(dev);
1756
1757 if (dev->mtu > TD_MSS_MAX)
1758 features &= ~NETIF_F_ALL_TSO;
1759
1760 if (dev->mtu > JUMBO_1K &&
1761 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1762 features &= ~NETIF_F_IP_CSUM;
1763
1764 return features;
1765 }
1766
1767 static void __rtl8169_set_features(struct net_device *dev,
1768 netdev_features_t features)
1769 {
1770 struct rtl8169_private *tp = netdev_priv(dev);
1771 netdev_features_t changed = features ^ dev->features;
1772 void __iomem *ioaddr = tp->mmio_addr;
1773
1774 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1775 return;
1776
1777 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1778 if (features & NETIF_F_RXCSUM)
1779 tp->cp_cmd |= RxChkSum;
1780 else
1781 tp->cp_cmd &= ~RxChkSum;
1782
1783 if (dev->features & NETIF_F_HW_VLAN_RX)
1784 tp->cp_cmd |= RxVlan;
1785 else
1786 tp->cp_cmd &= ~RxVlan;
1787
1788 RTL_W16(CPlusCmd, tp->cp_cmd);
1789 RTL_R16(CPlusCmd);
1790 }
1791 if (changed & NETIF_F_RXALL) {
1792 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1793 if (features & NETIF_F_RXALL)
1794 tmp |= (AcceptErr | AcceptRunt);
1795 RTL_W32(RxConfig, tmp);
1796 }
1797 }
1798
1799 static int rtl8169_set_features(struct net_device *dev,
1800 netdev_features_t features)
1801 {
1802 struct rtl8169_private *tp = netdev_priv(dev);
1803
1804 rtl_lock_work(tp);
1805 __rtl8169_set_features(dev, features);
1806 rtl_unlock_work(tp);
1807
1808 return 0;
1809 }
1810
1811
1812 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1813 {
1814 return (vlan_tx_tag_present(skb)) ?
1815 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1816 }
1817
1818 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1819 {
1820 u32 opts2 = le32_to_cpu(desc->opts2);
1821
1822 if (opts2 & RxVlanTag)
1823 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1824 }
1825
1826 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1827 {
1828 struct rtl8169_private *tp = netdev_priv(dev);
1829 void __iomem *ioaddr = tp->mmio_addr;
1830 u32 status;
1831
1832 cmd->supported =
1833 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1834 cmd->port = PORT_FIBRE;
1835 cmd->transceiver = XCVR_INTERNAL;
1836
1837 status = RTL_R32(TBICSR);
1838 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1839 cmd->autoneg = !!(status & TBINwEnable);
1840
1841 ethtool_cmd_speed_set(cmd, SPEED_1000);
1842 cmd->duplex = DUPLEX_FULL; /* Always set */
1843
1844 return 0;
1845 }
1846
1847 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1848 {
1849 struct rtl8169_private *tp = netdev_priv(dev);
1850
1851 return mii_ethtool_gset(&tp->mii, cmd);
1852 }
1853
1854 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1855 {
1856 struct rtl8169_private *tp = netdev_priv(dev);
1857 int rc;
1858
1859 rtl_lock_work(tp);
1860 rc = tp->get_settings(dev, cmd);
1861 rtl_unlock_work(tp);
1862
1863 return rc;
1864 }
1865
1866 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1867 void *p)
1868 {
1869 struct rtl8169_private *tp = netdev_priv(dev);
1870
1871 if (regs->len > R8169_REGS_SIZE)
1872 regs->len = R8169_REGS_SIZE;
1873
1874 rtl_lock_work(tp);
1875 memcpy_fromio(p, tp->mmio_addr, regs->len);
1876 rtl_unlock_work(tp);
1877 }
1878
1879 static u32 rtl8169_get_msglevel(struct net_device *dev)
1880 {
1881 struct rtl8169_private *tp = netdev_priv(dev);
1882
1883 return tp->msg_enable;
1884 }
1885
1886 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1887 {
1888 struct rtl8169_private *tp = netdev_priv(dev);
1889
1890 tp->msg_enable = value;
1891 }
1892
1893 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1894 "tx_packets",
1895 "rx_packets",
1896 "tx_errors",
1897 "rx_errors",
1898 "rx_missed",
1899 "align_errors",
1900 "tx_single_collisions",
1901 "tx_multi_collisions",
1902 "unicast",
1903 "broadcast",
1904 "multicast",
1905 "tx_aborted",
1906 "tx_underrun",
1907 };
1908
1909 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1910 {
1911 switch (sset) {
1912 case ETH_SS_STATS:
1913 return ARRAY_SIZE(rtl8169_gstrings);
1914 default:
1915 return -EOPNOTSUPP;
1916 }
1917 }
1918
1919 DECLARE_RTL_COND(rtl_counters_cond)
1920 {
1921 void __iomem *ioaddr = tp->mmio_addr;
1922
1923 return RTL_R32(CounterAddrLow) & CounterDump;
1924 }
1925
1926 static void rtl8169_update_counters(struct net_device *dev)
1927 {
1928 struct rtl8169_private *tp = netdev_priv(dev);
1929 void __iomem *ioaddr = tp->mmio_addr;
1930 struct device *d = &tp->pci_dev->dev;
1931 struct rtl8169_counters *counters;
1932 dma_addr_t paddr;
1933 u32 cmd;
1934
1935 /*
1936 * Some chips are unable to dump tally counters when the receiver
1937 * is disabled.
1938 */
1939 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1940 return;
1941
1942 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1943 if (!counters)
1944 return;
1945
1946 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1947 cmd = (u64)paddr & DMA_BIT_MASK(32);
1948 RTL_W32(CounterAddrLow, cmd);
1949 RTL_W32(CounterAddrLow, cmd | CounterDump);
1950
1951 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1952 memcpy(&tp->counters, counters, sizeof(*counters));
1953
1954 RTL_W32(CounterAddrLow, 0);
1955 RTL_W32(CounterAddrHigh, 0);
1956
1957 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1958 }
1959
1960 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1961 struct ethtool_stats *stats, u64 *data)
1962 {
1963 struct rtl8169_private *tp = netdev_priv(dev);
1964
1965 ASSERT_RTNL();
1966
1967 rtl8169_update_counters(dev);
1968
1969 data[0] = le64_to_cpu(tp->counters.tx_packets);
1970 data[1] = le64_to_cpu(tp->counters.rx_packets);
1971 data[2] = le64_to_cpu(tp->counters.tx_errors);
1972 data[3] = le32_to_cpu(tp->counters.rx_errors);
1973 data[4] = le16_to_cpu(tp->counters.rx_missed);
1974 data[5] = le16_to_cpu(tp->counters.align_errors);
1975 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1976 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1977 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1978 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1979 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1980 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1981 data[12] = le16_to_cpu(tp->counters.tx_underun);
1982 }
1983
1984 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1985 {
1986 switch(stringset) {
1987 case ETH_SS_STATS:
1988 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1989 break;
1990 }
1991 }
1992
1993 static const struct ethtool_ops rtl8169_ethtool_ops = {
1994 .get_drvinfo = rtl8169_get_drvinfo,
1995 .get_regs_len = rtl8169_get_regs_len,
1996 .get_link = ethtool_op_get_link,
1997 .get_settings = rtl8169_get_settings,
1998 .set_settings = rtl8169_set_settings,
1999 .get_msglevel = rtl8169_get_msglevel,
2000 .set_msglevel = rtl8169_set_msglevel,
2001 .get_regs = rtl8169_get_regs,
2002 .get_wol = rtl8169_get_wol,
2003 .set_wol = rtl8169_set_wol,
2004 .get_strings = rtl8169_get_strings,
2005 .get_sset_count = rtl8169_get_sset_count,
2006 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2007 .get_ts_info = ethtool_op_get_ts_info,
2008 };
2009
2010 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2011 struct net_device *dev, u8 default_version)
2012 {
2013 void __iomem *ioaddr = tp->mmio_addr;
2014 /*
2015 * The driver currently handles the 8168Bf and the 8168Be identically
2016 * but they can be identified more specifically through the test below
2017 * if needed:
2018 *
2019 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2020 *
2021 * Same thing for the 8101Eb and the 8101Ec:
2022 *
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2024 */
2025 static const struct rtl_mac_info {
2026 u32 mask;
2027 u32 val;
2028 int mac_version;
2029 } mac_info[] = {
2030 /* 8168G family. */
2031 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2032 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2033
2034 /* 8168F family. */
2035 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2036 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2037 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2038
2039 /* 8168E family. */
2040 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2041 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2042 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2043 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2044
2045 /* 8168D family. */
2046 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2047 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2048 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2049
2050 /* 8168DP family. */
2051 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2052 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2053 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2054
2055 /* 8168C family. */
2056 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2057 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2058 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2059 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2060 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2061 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2062 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2063 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2064 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2065
2066 /* 8168B family. */
2067 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2068 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2069 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2070 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2071
2072 /* 8101 family. */
2073 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2074 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2075 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2076 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2077 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2078 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2079 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2080 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2081 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2082 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2083 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2084 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2085 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2086 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2087 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2088 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2089 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2090 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2091 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2092 /* FIXME: where did these entries come from ? -- FR */
2093 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2094 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2095
2096 /* 8110 family. */
2097 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2098 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2099 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2100 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2101 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2102 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2103
2104 /* Catch-all */
2105 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2106 };
2107 const struct rtl_mac_info *p = mac_info;
2108 u32 reg;
2109
2110 reg = RTL_R32(TxConfig);
2111 while ((reg & p->mask) != p->val)
2112 p++;
2113 tp->mac_version = p->mac_version;
2114
2115 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2116 netif_notice(tp, probe, dev,
2117 "unknown MAC, using family default\n");
2118 tp->mac_version = default_version;
2119 }
2120 }
2121
2122 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2123 {
2124 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2125 }
2126
2127 struct phy_reg {
2128 u16 reg;
2129 u16 val;
2130 };
2131
2132 static void rtl_writephy_batch(struct rtl8169_private *tp,
2133 const struct phy_reg *regs, int len)
2134 {
2135 while (len-- > 0) {
2136 rtl_writephy(tp, regs->reg, regs->val);
2137 regs++;
2138 }
2139 }
2140
2141 #define PHY_READ 0x00000000
2142 #define PHY_DATA_OR 0x10000000
2143 #define PHY_DATA_AND 0x20000000
2144 #define PHY_BJMPN 0x30000000
2145 #define PHY_READ_EFUSE 0x40000000
2146 #define PHY_READ_MAC_BYTE 0x50000000
2147 #define PHY_WRITE_MAC_BYTE 0x60000000
2148 #define PHY_CLEAR_READCOUNT 0x70000000
2149 #define PHY_WRITE 0x80000000
2150 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2151 #define PHY_COMP_EQ_SKIPN 0xa0000000
2152 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2153 #define PHY_WRITE_PREVIOUS 0xc0000000
2154 #define PHY_SKIPN 0xd0000000
2155 #define PHY_DELAY_MS 0xe0000000
2156 #define PHY_WRITE_ERI_WORD 0xf0000000
2157
2158 struct fw_info {
2159 u32 magic;
2160 char version[RTL_VER_SIZE];
2161 __le32 fw_start;
2162 __le32 fw_len;
2163 u8 chksum;
2164 } __packed;
2165
2166 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2167
2168 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2169 {
2170 const struct firmware *fw = rtl_fw->fw;
2171 struct fw_info *fw_info = (struct fw_info *)fw->data;
2172 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2173 char *version = rtl_fw->version;
2174 bool rc = false;
2175
2176 if (fw->size < FW_OPCODE_SIZE)
2177 goto out;
2178
2179 if (!fw_info->magic) {
2180 size_t i, size, start;
2181 u8 checksum = 0;
2182
2183 if (fw->size < sizeof(*fw_info))
2184 goto out;
2185
2186 for (i = 0; i < fw->size; i++)
2187 checksum += fw->data[i];
2188 if (checksum != 0)
2189 goto out;
2190
2191 start = le32_to_cpu(fw_info->fw_start);
2192 if (start > fw->size)
2193 goto out;
2194
2195 size = le32_to_cpu(fw_info->fw_len);
2196 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2197 goto out;
2198
2199 memcpy(version, fw_info->version, RTL_VER_SIZE);
2200
2201 pa->code = (__le32 *)(fw->data + start);
2202 pa->size = size;
2203 } else {
2204 if (fw->size % FW_OPCODE_SIZE)
2205 goto out;
2206
2207 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2208
2209 pa->code = (__le32 *)fw->data;
2210 pa->size = fw->size / FW_OPCODE_SIZE;
2211 }
2212 version[RTL_VER_SIZE - 1] = 0;
2213
2214 rc = true;
2215 out:
2216 return rc;
2217 }
2218
2219 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2220 struct rtl_fw_phy_action *pa)
2221 {
2222 bool rc = false;
2223 size_t index;
2224
2225 for (index = 0; index < pa->size; index++) {
2226 u32 action = le32_to_cpu(pa->code[index]);
2227 u32 regno = (action & 0x0fff0000) >> 16;
2228
2229 switch(action & 0xf0000000) {
2230 case PHY_READ:
2231 case PHY_DATA_OR:
2232 case PHY_DATA_AND:
2233 case PHY_READ_EFUSE:
2234 case PHY_CLEAR_READCOUNT:
2235 case PHY_WRITE:
2236 case PHY_WRITE_PREVIOUS:
2237 case PHY_DELAY_MS:
2238 break;
2239
2240 case PHY_BJMPN:
2241 if (regno > index) {
2242 netif_err(tp, ifup, tp->dev,
2243 "Out of range of firmware\n");
2244 goto out;
2245 }
2246 break;
2247 case PHY_READCOUNT_EQ_SKIP:
2248 if (index + 2 >= pa->size) {
2249 netif_err(tp, ifup, tp->dev,
2250 "Out of range of firmware\n");
2251 goto out;
2252 }
2253 break;
2254 case PHY_COMP_EQ_SKIPN:
2255 case PHY_COMP_NEQ_SKIPN:
2256 case PHY_SKIPN:
2257 if (index + 1 + regno >= pa->size) {
2258 netif_err(tp, ifup, tp->dev,
2259 "Out of range of firmware\n");
2260 goto out;
2261 }
2262 break;
2263
2264 case PHY_READ_MAC_BYTE:
2265 case PHY_WRITE_MAC_BYTE:
2266 case PHY_WRITE_ERI_WORD:
2267 default:
2268 netif_err(tp, ifup, tp->dev,
2269 "Invalid action 0x%08x\n", action);
2270 goto out;
2271 }
2272 }
2273 rc = true;
2274 out:
2275 return rc;
2276 }
2277
2278 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2279 {
2280 struct net_device *dev = tp->dev;
2281 int rc = -EINVAL;
2282
2283 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2284 netif_err(tp, ifup, dev, "invalid firwmare\n");
2285 goto out;
2286 }
2287
2288 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2289 rc = 0;
2290 out:
2291 return rc;
2292 }
2293
2294 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2295 {
2296 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2297 u32 predata, count;
2298 size_t index;
2299
2300 predata = count = 0;
2301
2302 for (index = 0; index < pa->size; ) {
2303 u32 action = le32_to_cpu(pa->code[index]);
2304 u32 data = action & 0x0000ffff;
2305 u32 regno = (action & 0x0fff0000) >> 16;
2306
2307 if (!action)
2308 break;
2309
2310 switch(action & 0xf0000000) {
2311 case PHY_READ:
2312 predata = rtl_readphy(tp, regno);
2313 count++;
2314 index++;
2315 break;
2316 case PHY_DATA_OR:
2317 predata |= data;
2318 index++;
2319 break;
2320 case PHY_DATA_AND:
2321 predata &= data;
2322 index++;
2323 break;
2324 case PHY_BJMPN:
2325 index -= regno;
2326 break;
2327 case PHY_READ_EFUSE:
2328 predata = rtl8168d_efuse_read(tp, regno);
2329 index++;
2330 break;
2331 case PHY_CLEAR_READCOUNT:
2332 count = 0;
2333 index++;
2334 break;
2335 case PHY_WRITE:
2336 rtl_writephy(tp, regno, data);
2337 index++;
2338 break;
2339 case PHY_READCOUNT_EQ_SKIP:
2340 index += (count == data) ? 2 : 1;
2341 break;
2342 case PHY_COMP_EQ_SKIPN:
2343 if (predata == data)
2344 index += regno;
2345 index++;
2346 break;
2347 case PHY_COMP_NEQ_SKIPN:
2348 if (predata != data)
2349 index += regno;
2350 index++;
2351 break;
2352 case PHY_WRITE_PREVIOUS:
2353 rtl_writephy(tp, regno, predata);
2354 index++;
2355 break;
2356 case PHY_SKIPN:
2357 index += regno + 1;
2358 break;
2359 case PHY_DELAY_MS:
2360 mdelay(data);
2361 index++;
2362 break;
2363
2364 case PHY_READ_MAC_BYTE:
2365 case PHY_WRITE_MAC_BYTE:
2366 case PHY_WRITE_ERI_WORD:
2367 default:
2368 BUG();
2369 }
2370 }
2371 }
2372
2373 static void rtl_release_firmware(struct rtl8169_private *tp)
2374 {
2375 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2376 release_firmware(tp->rtl_fw->fw);
2377 kfree(tp->rtl_fw);
2378 }
2379 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2380 }
2381
2382 static void rtl_apply_firmware(struct rtl8169_private *tp)
2383 {
2384 struct rtl_fw *rtl_fw = tp->rtl_fw;
2385
2386 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2387 if (!IS_ERR_OR_NULL(rtl_fw))
2388 rtl_phy_write_fw(tp, rtl_fw);
2389 }
2390
2391 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2392 {
2393 if (rtl_readphy(tp, reg) != val)
2394 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2395 else
2396 rtl_apply_firmware(tp);
2397 }
2398
2399 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2400 {
2401 static const struct phy_reg phy_reg_init[] = {
2402 { 0x1f, 0x0001 },
2403 { 0x06, 0x006e },
2404 { 0x08, 0x0708 },
2405 { 0x15, 0x4000 },
2406 { 0x18, 0x65c7 },
2407
2408 { 0x1f, 0x0001 },
2409 { 0x03, 0x00a1 },
2410 { 0x02, 0x0008 },
2411 { 0x01, 0x0120 },
2412 { 0x00, 0x1000 },
2413 { 0x04, 0x0800 },
2414 { 0x04, 0x0000 },
2415
2416 { 0x03, 0xff41 },
2417 { 0x02, 0xdf60 },
2418 { 0x01, 0x0140 },
2419 { 0x00, 0x0077 },
2420 { 0x04, 0x7800 },
2421 { 0x04, 0x7000 },
2422
2423 { 0x03, 0x802f },
2424 { 0x02, 0x4f02 },
2425 { 0x01, 0x0409 },
2426 { 0x00, 0xf0f9 },
2427 { 0x04, 0x9800 },
2428 { 0x04, 0x9000 },
2429
2430 { 0x03, 0xdf01 },
2431 { 0x02, 0xdf20 },
2432 { 0x01, 0xff95 },
2433 { 0x00, 0xba00 },
2434 { 0x04, 0xa800 },
2435 { 0x04, 0xa000 },
2436
2437 { 0x03, 0xff41 },
2438 { 0x02, 0xdf20 },
2439 { 0x01, 0x0140 },
2440 { 0x00, 0x00bb },
2441 { 0x04, 0xb800 },
2442 { 0x04, 0xb000 },
2443
2444 { 0x03, 0xdf41 },
2445 { 0x02, 0xdc60 },
2446 { 0x01, 0x6340 },
2447 { 0x00, 0x007d },
2448 { 0x04, 0xd800 },
2449 { 0x04, 0xd000 },
2450
2451 { 0x03, 0xdf01 },
2452 { 0x02, 0xdf20 },
2453 { 0x01, 0x100a },
2454 { 0x00, 0xa0ff },
2455 { 0x04, 0xf800 },
2456 { 0x04, 0xf000 },
2457
2458 { 0x1f, 0x0000 },
2459 { 0x0b, 0x0000 },
2460 { 0x00, 0x9200 }
2461 };
2462
2463 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2464 }
2465
2466 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2467 {
2468 static const struct phy_reg phy_reg_init[] = {
2469 { 0x1f, 0x0002 },
2470 { 0x01, 0x90d0 },
2471 { 0x1f, 0x0000 }
2472 };
2473
2474 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2475 }
2476
2477 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2478 {
2479 struct pci_dev *pdev = tp->pci_dev;
2480
2481 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2482 (pdev->subsystem_device != 0xe000))
2483 return;
2484
2485 rtl_writephy(tp, 0x1f, 0x0001);
2486 rtl_writephy(tp, 0x10, 0xf01b);
2487 rtl_writephy(tp, 0x1f, 0x0000);
2488 }
2489
2490 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2491 {
2492 static const struct phy_reg phy_reg_init[] = {
2493 { 0x1f, 0x0001 },
2494 { 0x04, 0x0000 },
2495 { 0x03, 0x00a1 },
2496 { 0x02, 0x0008 },
2497 { 0x01, 0x0120 },
2498 { 0x00, 0x1000 },
2499 { 0x04, 0x0800 },
2500 { 0x04, 0x9000 },
2501 { 0x03, 0x802f },
2502 { 0x02, 0x4f02 },
2503 { 0x01, 0x0409 },
2504 { 0x00, 0xf099 },
2505 { 0x04, 0x9800 },
2506 { 0x04, 0xa000 },
2507 { 0x03, 0xdf01 },
2508 { 0x02, 0xdf20 },
2509 { 0x01, 0xff95 },
2510 { 0x00, 0xba00 },
2511 { 0x04, 0xa800 },
2512 { 0x04, 0xf000 },
2513 { 0x03, 0xdf01 },
2514 { 0x02, 0xdf20 },
2515 { 0x01, 0x101a },
2516 { 0x00, 0xa0ff },
2517 { 0x04, 0xf800 },
2518 { 0x04, 0x0000 },
2519 { 0x1f, 0x0000 },
2520
2521 { 0x1f, 0x0001 },
2522 { 0x10, 0xf41b },
2523 { 0x14, 0xfb54 },
2524 { 0x18, 0xf5c7 },
2525 { 0x1f, 0x0000 },
2526
2527 { 0x1f, 0x0001 },
2528 { 0x17, 0x0cc0 },
2529 { 0x1f, 0x0000 }
2530 };
2531
2532 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2533
2534 rtl8169scd_hw_phy_config_quirk(tp);
2535 }
2536
2537 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2538 {
2539 static const struct phy_reg phy_reg_init[] = {
2540 { 0x1f, 0x0001 },
2541 { 0x04, 0x0000 },
2542 { 0x03, 0x00a1 },
2543 { 0x02, 0x0008 },
2544 { 0x01, 0x0120 },
2545 { 0x00, 0x1000 },
2546 { 0x04, 0x0800 },
2547 { 0x04, 0x9000 },
2548 { 0x03, 0x802f },
2549 { 0x02, 0x4f02 },
2550 { 0x01, 0x0409 },
2551 { 0x00, 0xf099 },
2552 { 0x04, 0x9800 },
2553 { 0x04, 0xa000 },
2554 { 0x03, 0xdf01 },
2555 { 0x02, 0xdf20 },
2556 { 0x01, 0xff95 },
2557 { 0x00, 0xba00 },
2558 { 0x04, 0xa800 },
2559 { 0x04, 0xf000 },
2560 { 0x03, 0xdf01 },
2561 { 0x02, 0xdf20 },
2562 { 0x01, 0x101a },
2563 { 0x00, 0xa0ff },
2564 { 0x04, 0xf800 },
2565 { 0x04, 0x0000 },
2566 { 0x1f, 0x0000 },
2567
2568 { 0x1f, 0x0001 },
2569 { 0x0b, 0x8480 },
2570 { 0x1f, 0x0000 },
2571
2572 { 0x1f, 0x0001 },
2573 { 0x18, 0x67c7 },
2574 { 0x04, 0x2000 },
2575 { 0x03, 0x002f },
2576 { 0x02, 0x4360 },
2577 { 0x01, 0x0109 },
2578 { 0x00, 0x3022 },
2579 { 0x04, 0x2800 },
2580 { 0x1f, 0x0000 },
2581
2582 { 0x1f, 0x0001 },
2583 { 0x17, 0x0cc0 },
2584 { 0x1f, 0x0000 }
2585 };
2586
2587 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2588 }
2589
2590 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2591 {
2592 static const struct phy_reg phy_reg_init[] = {
2593 { 0x10, 0xf41b },
2594 { 0x1f, 0x0000 }
2595 };
2596
2597 rtl_writephy(tp, 0x1f, 0x0001);
2598 rtl_patchphy(tp, 0x16, 1 << 0);
2599
2600 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2601 }
2602
2603 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2604 {
2605 static const struct phy_reg phy_reg_init[] = {
2606 { 0x1f, 0x0001 },
2607 { 0x10, 0xf41b },
2608 { 0x1f, 0x0000 }
2609 };
2610
2611 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2612 }
2613
2614 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2615 {
2616 static const struct phy_reg phy_reg_init[] = {
2617 { 0x1f, 0x0000 },
2618 { 0x1d, 0x0f00 },
2619 { 0x1f, 0x0002 },
2620 { 0x0c, 0x1ec8 },
2621 { 0x1f, 0x0000 }
2622 };
2623
2624 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2625 }
2626
2627 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2628 {
2629 static const struct phy_reg phy_reg_init[] = {
2630 { 0x1f, 0x0001 },
2631 { 0x1d, 0x3d98 },
2632 { 0x1f, 0x0000 }
2633 };
2634
2635 rtl_writephy(tp, 0x1f, 0x0000);
2636 rtl_patchphy(tp, 0x14, 1 << 5);
2637 rtl_patchphy(tp, 0x0d, 1 << 5);
2638
2639 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2640 }
2641
2642 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2643 {
2644 static const struct phy_reg phy_reg_init[] = {
2645 { 0x1f, 0x0001 },
2646 { 0x12, 0x2300 },
2647 { 0x1f, 0x0002 },
2648 { 0x00, 0x88d4 },
2649 { 0x01, 0x82b1 },
2650 { 0x03, 0x7002 },
2651 { 0x08, 0x9e30 },
2652 { 0x09, 0x01f0 },
2653 { 0x0a, 0x5500 },
2654 { 0x0c, 0x00c8 },
2655 { 0x1f, 0x0003 },
2656 { 0x12, 0xc096 },
2657 { 0x16, 0x000a },
2658 { 0x1f, 0x0000 },
2659 { 0x1f, 0x0000 },
2660 { 0x09, 0x2000 },
2661 { 0x09, 0x0000 }
2662 };
2663
2664 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2665
2666 rtl_patchphy(tp, 0x14, 1 << 5);
2667 rtl_patchphy(tp, 0x0d, 1 << 5);
2668 rtl_writephy(tp, 0x1f, 0x0000);
2669 }
2670
2671 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2672 {
2673 static const struct phy_reg phy_reg_init[] = {
2674 { 0x1f, 0x0001 },
2675 { 0x12, 0x2300 },
2676 { 0x03, 0x802f },
2677 { 0x02, 0x4f02 },
2678 { 0x01, 0x0409 },
2679 { 0x00, 0xf099 },
2680 { 0x04, 0x9800 },
2681 { 0x04, 0x9000 },
2682 { 0x1d, 0x3d98 },
2683 { 0x1f, 0x0002 },
2684 { 0x0c, 0x7eb8 },
2685 { 0x06, 0x0761 },
2686 { 0x1f, 0x0003 },
2687 { 0x16, 0x0f0a },
2688 { 0x1f, 0x0000 }
2689 };
2690
2691 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2692
2693 rtl_patchphy(tp, 0x16, 1 << 0);
2694 rtl_patchphy(tp, 0x14, 1 << 5);
2695 rtl_patchphy(tp, 0x0d, 1 << 5);
2696 rtl_writephy(tp, 0x1f, 0x0000);
2697 }
2698
2699 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2700 {
2701 static const struct phy_reg phy_reg_init[] = {
2702 { 0x1f, 0x0001 },
2703 { 0x12, 0x2300 },
2704 { 0x1d, 0x3d98 },
2705 { 0x1f, 0x0002 },
2706 { 0x0c, 0x7eb8 },
2707 { 0x06, 0x5461 },
2708 { 0x1f, 0x0003 },
2709 { 0x16, 0x0f0a },
2710 { 0x1f, 0x0000 }
2711 };
2712
2713 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2714
2715 rtl_patchphy(tp, 0x16, 1 << 0);
2716 rtl_patchphy(tp, 0x14, 1 << 5);
2717 rtl_patchphy(tp, 0x0d, 1 << 5);
2718 rtl_writephy(tp, 0x1f, 0x0000);
2719 }
2720
2721 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2722 {
2723 rtl8168c_3_hw_phy_config(tp);
2724 }
2725
2726 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2727 {
2728 static const struct phy_reg phy_reg_init_0[] = {
2729 /* Channel Estimation */
2730 { 0x1f, 0x0001 },
2731 { 0x06, 0x4064 },
2732 { 0x07, 0x2863 },
2733 { 0x08, 0x059c },
2734 { 0x09, 0x26b4 },
2735 { 0x0a, 0x6a19 },
2736 { 0x0b, 0xdcc8 },
2737 { 0x10, 0xf06d },
2738 { 0x14, 0x7f68 },
2739 { 0x18, 0x7fd9 },
2740 { 0x1c, 0xf0ff },
2741 { 0x1d, 0x3d9c },
2742 { 0x1f, 0x0003 },
2743 { 0x12, 0xf49f },
2744 { 0x13, 0x070b },
2745 { 0x1a, 0x05ad },
2746 { 0x14, 0x94c0 },
2747
2748 /*
2749 * Tx Error Issue
2750 * Enhance line driver power
2751 */
2752 { 0x1f, 0x0002 },
2753 { 0x06, 0x5561 },
2754 { 0x1f, 0x0005 },
2755 { 0x05, 0x8332 },
2756 { 0x06, 0x5561 },
2757
2758 /*
2759 * Can not link to 1Gbps with bad cable
2760 * Decrease SNR threshold form 21.07dB to 19.04dB
2761 */
2762 { 0x1f, 0x0001 },
2763 { 0x17, 0x0cc0 },
2764
2765 { 0x1f, 0x0000 },
2766 { 0x0d, 0xf880 }
2767 };
2768
2769 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2770
2771 /*
2772 * Rx Error Issue
2773 * Fine Tune Switching regulator parameter
2774 */
2775 rtl_writephy(tp, 0x1f, 0x0002);
2776 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2777 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2778
2779 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2780 static const struct phy_reg phy_reg_init[] = {
2781 { 0x1f, 0x0002 },
2782 { 0x05, 0x669a },
2783 { 0x1f, 0x0005 },
2784 { 0x05, 0x8330 },
2785 { 0x06, 0x669a },
2786 { 0x1f, 0x0002 }
2787 };
2788 int val;
2789
2790 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2791
2792 val = rtl_readphy(tp, 0x0d);
2793
2794 if ((val & 0x00ff) != 0x006c) {
2795 static const u32 set[] = {
2796 0x0065, 0x0066, 0x0067, 0x0068,
2797 0x0069, 0x006a, 0x006b, 0x006c
2798 };
2799 int i;
2800
2801 rtl_writephy(tp, 0x1f, 0x0002);
2802
2803 val &= 0xff00;
2804 for (i = 0; i < ARRAY_SIZE(set); i++)
2805 rtl_writephy(tp, 0x0d, val | set[i]);
2806 }
2807 } else {
2808 static const struct phy_reg phy_reg_init[] = {
2809 { 0x1f, 0x0002 },
2810 { 0x05, 0x6662 },
2811 { 0x1f, 0x0005 },
2812 { 0x05, 0x8330 },
2813 { 0x06, 0x6662 }
2814 };
2815
2816 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2817 }
2818
2819 /* RSET couple improve */
2820 rtl_writephy(tp, 0x1f, 0x0002);
2821 rtl_patchphy(tp, 0x0d, 0x0300);
2822 rtl_patchphy(tp, 0x0f, 0x0010);
2823
2824 /* Fine tune PLL performance */
2825 rtl_writephy(tp, 0x1f, 0x0002);
2826 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2827 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2828
2829 rtl_writephy(tp, 0x1f, 0x0005);
2830 rtl_writephy(tp, 0x05, 0x001b);
2831
2832 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2833
2834 rtl_writephy(tp, 0x1f, 0x0000);
2835 }
2836
2837 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2838 {
2839 static const struct phy_reg phy_reg_init_0[] = {
2840 /* Channel Estimation */
2841 { 0x1f, 0x0001 },
2842 { 0x06, 0x4064 },
2843 { 0x07, 0x2863 },
2844 { 0x08, 0x059c },
2845 { 0x09, 0x26b4 },
2846 { 0x0a, 0x6a19 },
2847 { 0x0b, 0xdcc8 },
2848 { 0x10, 0xf06d },
2849 { 0x14, 0x7f68 },
2850 { 0x18, 0x7fd9 },
2851 { 0x1c, 0xf0ff },
2852 { 0x1d, 0x3d9c },
2853 { 0x1f, 0x0003 },
2854 { 0x12, 0xf49f },
2855 { 0x13, 0x070b },
2856 { 0x1a, 0x05ad },
2857 { 0x14, 0x94c0 },
2858
2859 /*
2860 * Tx Error Issue
2861 * Enhance line driver power
2862 */
2863 { 0x1f, 0x0002 },
2864 { 0x06, 0x5561 },
2865 { 0x1f, 0x0005 },
2866 { 0x05, 0x8332 },
2867 { 0x06, 0x5561 },
2868
2869 /*
2870 * Can not link to 1Gbps with bad cable
2871 * Decrease SNR threshold form 21.07dB to 19.04dB
2872 */
2873 { 0x1f, 0x0001 },
2874 { 0x17, 0x0cc0 },
2875
2876 { 0x1f, 0x0000 },
2877 { 0x0d, 0xf880 }
2878 };
2879
2880 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2881
2882 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2883 static const struct phy_reg phy_reg_init[] = {
2884 { 0x1f, 0x0002 },
2885 { 0x05, 0x669a },
2886 { 0x1f, 0x0005 },
2887 { 0x05, 0x8330 },
2888 { 0x06, 0x669a },
2889
2890 { 0x1f, 0x0002 }
2891 };
2892 int val;
2893
2894 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2895
2896 val = rtl_readphy(tp, 0x0d);
2897 if ((val & 0x00ff) != 0x006c) {
2898 static const u32 set[] = {
2899 0x0065, 0x0066, 0x0067, 0x0068,
2900 0x0069, 0x006a, 0x006b, 0x006c
2901 };
2902 int i;
2903
2904 rtl_writephy(tp, 0x1f, 0x0002);
2905
2906 val &= 0xff00;
2907 for (i = 0; i < ARRAY_SIZE(set); i++)
2908 rtl_writephy(tp, 0x0d, val | set[i]);
2909 }
2910 } else {
2911 static const struct phy_reg phy_reg_init[] = {
2912 { 0x1f, 0x0002 },
2913 { 0x05, 0x2642 },
2914 { 0x1f, 0x0005 },
2915 { 0x05, 0x8330 },
2916 { 0x06, 0x2642 }
2917 };
2918
2919 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2920 }
2921
2922 /* Fine tune PLL performance */
2923 rtl_writephy(tp, 0x1f, 0x0002);
2924 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2925 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2926
2927 /* Switching regulator Slew rate */
2928 rtl_writephy(tp, 0x1f, 0x0002);
2929 rtl_patchphy(tp, 0x0f, 0x0017);
2930
2931 rtl_writephy(tp, 0x1f, 0x0005);
2932 rtl_writephy(tp, 0x05, 0x001b);
2933
2934 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2935
2936 rtl_writephy(tp, 0x1f, 0x0000);
2937 }
2938
2939 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2940 {
2941 static const struct phy_reg phy_reg_init[] = {
2942 { 0x1f, 0x0002 },
2943 { 0x10, 0x0008 },
2944 { 0x0d, 0x006c },
2945
2946 { 0x1f, 0x0000 },
2947 { 0x0d, 0xf880 },
2948
2949 { 0x1f, 0x0001 },
2950 { 0x17, 0x0cc0 },
2951
2952 { 0x1f, 0x0001 },
2953 { 0x0b, 0xa4d8 },
2954 { 0x09, 0x281c },
2955 { 0x07, 0x2883 },
2956 { 0x0a, 0x6b35 },
2957 { 0x1d, 0x3da4 },
2958 { 0x1c, 0xeffd },
2959 { 0x14, 0x7f52 },
2960 { 0x18, 0x7fc6 },
2961 { 0x08, 0x0601 },
2962 { 0x06, 0x4063 },
2963 { 0x10, 0xf074 },
2964 { 0x1f, 0x0003 },
2965 { 0x13, 0x0789 },
2966 { 0x12, 0xf4bd },
2967 { 0x1a, 0x04fd },
2968 { 0x14, 0x84b0 },
2969 { 0x1f, 0x0000 },
2970 { 0x00, 0x9200 },
2971
2972 { 0x1f, 0x0005 },
2973 { 0x01, 0x0340 },
2974 { 0x1f, 0x0001 },
2975 { 0x04, 0x4000 },
2976 { 0x03, 0x1d21 },
2977 { 0x02, 0x0c32 },
2978 { 0x01, 0x0200 },
2979 { 0x00, 0x5554 },
2980 { 0x04, 0x4800 },
2981 { 0x04, 0x4000 },
2982 { 0x04, 0xf000 },
2983 { 0x03, 0xdf01 },
2984 { 0x02, 0xdf20 },
2985 { 0x01, 0x101a },
2986 { 0x00, 0xa0ff },
2987 { 0x04, 0xf800 },
2988 { 0x04, 0xf000 },
2989 { 0x1f, 0x0000 },
2990
2991 { 0x1f, 0x0007 },
2992 { 0x1e, 0x0023 },
2993 { 0x16, 0x0000 },
2994 { 0x1f, 0x0000 }
2995 };
2996
2997 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2998 }
2999
3000 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3001 {
3002 static const struct phy_reg phy_reg_init[] = {
3003 { 0x1f, 0x0001 },
3004 { 0x17, 0x0cc0 },
3005
3006 { 0x1f, 0x0007 },
3007 { 0x1e, 0x002d },
3008 { 0x18, 0x0040 },
3009 { 0x1f, 0x0000 }
3010 };
3011
3012 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3013 rtl_patchphy(tp, 0x0d, 1 << 5);
3014 }
3015
3016 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3017 {
3018 static const struct phy_reg phy_reg_init[] = {
3019 /* Enable Delay cap */
3020 { 0x1f, 0x0005 },
3021 { 0x05, 0x8b80 },
3022 { 0x06, 0xc896 },
3023 { 0x1f, 0x0000 },
3024
3025 /* Channel estimation fine tune */
3026 { 0x1f, 0x0001 },
3027 { 0x0b, 0x6c20 },
3028 { 0x07, 0x2872 },
3029 { 0x1c, 0xefff },
3030 { 0x1f, 0x0003 },
3031 { 0x14, 0x6420 },
3032 { 0x1f, 0x0000 },
3033
3034 /* Update PFM & 10M TX idle timer */
3035 { 0x1f, 0x0007 },
3036 { 0x1e, 0x002f },
3037 { 0x15, 0x1919 },
3038 { 0x1f, 0x0000 },
3039
3040 { 0x1f, 0x0007 },
3041 { 0x1e, 0x00ac },
3042 { 0x18, 0x0006 },
3043 { 0x1f, 0x0000 }
3044 };
3045
3046 rtl_apply_firmware(tp);
3047
3048 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3049
3050 /* DCO enable for 10M IDLE Power */
3051 rtl_writephy(tp, 0x1f, 0x0007);
3052 rtl_writephy(tp, 0x1e, 0x0023);
3053 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3054 rtl_writephy(tp, 0x1f, 0x0000);
3055
3056 /* For impedance matching */
3057 rtl_writephy(tp, 0x1f, 0x0002);
3058 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3059 rtl_writephy(tp, 0x1f, 0x0000);
3060
3061 /* PHY auto speed down */
3062 rtl_writephy(tp, 0x1f, 0x0007);
3063 rtl_writephy(tp, 0x1e, 0x002d);
3064 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3065 rtl_writephy(tp, 0x1f, 0x0000);
3066 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3067
3068 rtl_writephy(tp, 0x1f, 0x0005);
3069 rtl_writephy(tp, 0x05, 0x8b86);
3070 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3071 rtl_writephy(tp, 0x1f, 0x0000);
3072
3073 rtl_writephy(tp, 0x1f, 0x0005);
3074 rtl_writephy(tp, 0x05, 0x8b85);
3075 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3076 rtl_writephy(tp, 0x1f, 0x0007);
3077 rtl_writephy(tp, 0x1e, 0x0020);
3078 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3079 rtl_writephy(tp, 0x1f, 0x0006);
3080 rtl_writephy(tp, 0x00, 0x5a00);
3081 rtl_writephy(tp, 0x1f, 0x0000);
3082 rtl_writephy(tp, 0x0d, 0x0007);
3083 rtl_writephy(tp, 0x0e, 0x003c);
3084 rtl_writephy(tp, 0x0d, 0x4007);
3085 rtl_writephy(tp, 0x0e, 0x0000);
3086 rtl_writephy(tp, 0x0d, 0x0000);
3087 }
3088
3089 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3090 {
3091 const u16 w[] = {
3092 addr[0] | (addr[1] << 8),
3093 addr[2] | (addr[3] << 8),
3094 addr[4] | (addr[5] << 8)
3095 };
3096 const struct exgmac_reg e[] = {
3097 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3098 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3099 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3100 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3101 };
3102
3103 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3104 }
3105
3106 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3107 {
3108 static const struct phy_reg phy_reg_init[] = {
3109 /* Enable Delay cap */
3110 { 0x1f, 0x0004 },
3111 { 0x1f, 0x0007 },
3112 { 0x1e, 0x00ac },
3113 { 0x18, 0x0006 },
3114 { 0x1f, 0x0002 },
3115 { 0x1f, 0x0000 },
3116 { 0x1f, 0x0000 },
3117
3118 /* Channel estimation fine tune */
3119 { 0x1f, 0x0003 },
3120 { 0x09, 0xa20f },
3121 { 0x1f, 0x0000 },
3122 { 0x1f, 0x0000 },
3123
3124 /* Green Setting */
3125 { 0x1f, 0x0005 },
3126 { 0x05, 0x8b5b },
3127 { 0x06, 0x9222 },
3128 { 0x05, 0x8b6d },
3129 { 0x06, 0x8000 },
3130 { 0x05, 0x8b76 },
3131 { 0x06, 0x8000 },
3132 { 0x1f, 0x0000 }
3133 };
3134
3135 rtl_apply_firmware(tp);
3136
3137 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3138
3139 /* For 4-corner performance improve */
3140 rtl_writephy(tp, 0x1f, 0x0005);
3141 rtl_writephy(tp, 0x05, 0x8b80);
3142 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3143 rtl_writephy(tp, 0x1f, 0x0000);
3144
3145 /* PHY auto speed down */
3146 rtl_writephy(tp, 0x1f, 0x0004);
3147 rtl_writephy(tp, 0x1f, 0x0007);
3148 rtl_writephy(tp, 0x1e, 0x002d);
3149 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3150 rtl_writephy(tp, 0x1f, 0x0002);
3151 rtl_writephy(tp, 0x1f, 0x0000);
3152 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3153
3154 /* improve 10M EEE waveform */
3155 rtl_writephy(tp, 0x1f, 0x0005);
3156 rtl_writephy(tp, 0x05, 0x8b86);
3157 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3158 rtl_writephy(tp, 0x1f, 0x0000);
3159
3160 /* Improve 2-pair detection performance */
3161 rtl_writephy(tp, 0x1f, 0x0005);
3162 rtl_writephy(tp, 0x05, 0x8b85);
3163 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3164 rtl_writephy(tp, 0x1f, 0x0000);
3165
3166 /* EEE setting */
3167 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3168 rtl_writephy(tp, 0x1f, 0x0005);
3169 rtl_writephy(tp, 0x05, 0x8b85);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3171 rtl_writephy(tp, 0x1f, 0x0004);
3172 rtl_writephy(tp, 0x1f, 0x0007);
3173 rtl_writephy(tp, 0x1e, 0x0020);
3174 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x1f, 0x0002);
3176 rtl_writephy(tp, 0x1f, 0x0000);
3177 rtl_writephy(tp, 0x0d, 0x0007);
3178 rtl_writephy(tp, 0x0e, 0x003c);
3179 rtl_writephy(tp, 0x0d, 0x4007);
3180 rtl_writephy(tp, 0x0e, 0x0000);
3181 rtl_writephy(tp, 0x0d, 0x0000);
3182
3183 /* Green feature */
3184 rtl_writephy(tp, 0x1f, 0x0003);
3185 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3186 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3187 rtl_writephy(tp, 0x1f, 0x0000);
3188
3189 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3190 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3191 }
3192
3193 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3194 {
3195 /* For 4-corner performance improve */
3196 rtl_writephy(tp, 0x1f, 0x0005);
3197 rtl_writephy(tp, 0x05, 0x8b80);
3198 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3199 rtl_writephy(tp, 0x1f, 0x0000);
3200
3201 /* PHY auto speed down */
3202 rtl_writephy(tp, 0x1f, 0x0007);
3203 rtl_writephy(tp, 0x1e, 0x002d);
3204 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3205 rtl_writephy(tp, 0x1f, 0x0000);
3206 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3207
3208 /* Improve 10M EEE waveform */
3209 rtl_writephy(tp, 0x1f, 0x0005);
3210 rtl_writephy(tp, 0x05, 0x8b86);
3211 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3212 rtl_writephy(tp, 0x1f, 0x0000);
3213 }
3214
3215 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3216 {
3217 static const struct phy_reg phy_reg_init[] = {
3218 /* Channel estimation fine tune */
3219 { 0x1f, 0x0003 },
3220 { 0x09, 0xa20f },
3221 { 0x1f, 0x0000 },
3222
3223 /* Modify green table for giga & fnet */
3224 { 0x1f, 0x0005 },
3225 { 0x05, 0x8b55 },
3226 { 0x06, 0x0000 },
3227 { 0x05, 0x8b5e },
3228 { 0x06, 0x0000 },
3229 { 0x05, 0x8b67 },
3230 { 0x06, 0x0000 },
3231 { 0x05, 0x8b70 },
3232 { 0x06, 0x0000 },
3233 { 0x1f, 0x0000 },
3234 { 0x1f, 0x0007 },
3235 { 0x1e, 0x0078 },
3236 { 0x17, 0x0000 },
3237 { 0x19, 0x00fb },
3238 { 0x1f, 0x0000 },
3239
3240 /* Modify green table for 10M */
3241 { 0x1f, 0x0005 },
3242 { 0x05, 0x8b79 },
3243 { 0x06, 0xaa00 },
3244 { 0x1f, 0x0000 },
3245
3246 /* Disable hiimpedance detection (RTCT) */
3247 { 0x1f, 0x0003 },
3248 { 0x01, 0x328a },
3249 { 0x1f, 0x0000 }
3250 };
3251
3252 rtl_apply_firmware(tp);
3253
3254 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3255
3256 rtl8168f_hw_phy_config(tp);
3257
3258 /* Improve 2-pair detection performance */
3259 rtl_writephy(tp, 0x1f, 0x0005);
3260 rtl_writephy(tp, 0x05, 0x8b85);
3261 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3262 rtl_writephy(tp, 0x1f, 0x0000);
3263 }
3264
3265 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3266 {
3267 rtl_apply_firmware(tp);
3268
3269 rtl8168f_hw_phy_config(tp);
3270 }
3271
3272 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3273 {
3274 static const struct phy_reg phy_reg_init[] = {
3275 /* Channel estimation fine tune */
3276 { 0x1f, 0x0003 },
3277 { 0x09, 0xa20f },
3278 { 0x1f, 0x0000 },
3279
3280 /* Modify green table for giga & fnet */
3281 { 0x1f, 0x0005 },
3282 { 0x05, 0x8b55 },
3283 { 0x06, 0x0000 },
3284 { 0x05, 0x8b5e },
3285 { 0x06, 0x0000 },
3286 { 0x05, 0x8b67 },
3287 { 0x06, 0x0000 },
3288 { 0x05, 0x8b70 },
3289 { 0x06, 0x0000 },
3290 { 0x1f, 0x0000 },
3291 { 0x1f, 0x0007 },
3292 { 0x1e, 0x0078 },
3293 { 0x17, 0x0000 },
3294 { 0x19, 0x00aa },
3295 { 0x1f, 0x0000 },
3296
3297 /* Modify green table for 10M */
3298 { 0x1f, 0x0005 },
3299 { 0x05, 0x8b79 },
3300 { 0x06, 0xaa00 },
3301 { 0x1f, 0x0000 },
3302
3303 /* Disable hiimpedance detection (RTCT) */
3304 { 0x1f, 0x0003 },
3305 { 0x01, 0x328a },
3306 { 0x1f, 0x0000 }
3307 };
3308
3309
3310 rtl_apply_firmware(tp);
3311
3312 rtl8168f_hw_phy_config(tp);
3313
3314 /* Improve 2-pair detection performance */
3315 rtl_writephy(tp, 0x1f, 0x0005);
3316 rtl_writephy(tp, 0x05, 0x8b85);
3317 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3318 rtl_writephy(tp, 0x1f, 0x0000);
3319
3320 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3321
3322 /* Modify green table for giga */
3323 rtl_writephy(tp, 0x1f, 0x0005);
3324 rtl_writephy(tp, 0x05, 0x8b54);
3325 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3326 rtl_writephy(tp, 0x05, 0x8b5d);
3327 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3328 rtl_writephy(tp, 0x05, 0x8a7c);
3329 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3330 rtl_writephy(tp, 0x05, 0x8a7f);
3331 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3332 rtl_writephy(tp, 0x05, 0x8a82);
3333 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3334 rtl_writephy(tp, 0x05, 0x8a85);
3335 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3336 rtl_writephy(tp, 0x05, 0x8a88);
3337 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3338 rtl_writephy(tp, 0x1f, 0x0000);
3339
3340 /* uc same-seed solution */
3341 rtl_writephy(tp, 0x1f, 0x0005);
3342 rtl_writephy(tp, 0x05, 0x8b85);
3343 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3344 rtl_writephy(tp, 0x1f, 0x0000);
3345
3346 /* eee setting */
3347 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3348 rtl_writephy(tp, 0x1f, 0x0005);
3349 rtl_writephy(tp, 0x05, 0x8b85);
3350 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3351 rtl_writephy(tp, 0x1f, 0x0004);
3352 rtl_writephy(tp, 0x1f, 0x0007);
3353 rtl_writephy(tp, 0x1e, 0x0020);
3354 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3355 rtl_writephy(tp, 0x1f, 0x0000);
3356 rtl_writephy(tp, 0x0d, 0x0007);
3357 rtl_writephy(tp, 0x0e, 0x003c);
3358 rtl_writephy(tp, 0x0d, 0x4007);
3359 rtl_writephy(tp, 0x0e, 0x0000);
3360 rtl_writephy(tp, 0x0d, 0x0000);
3361
3362 /* Green feature */
3363 rtl_writephy(tp, 0x1f, 0x0003);
3364 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3365 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3366 rtl_writephy(tp, 0x1f, 0x0000);
3367 }
3368
3369 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3370 {
3371 static const u16 mac_ocp_patch[] = {
3372 0xe008, 0xe01b, 0xe01d, 0xe01f,
3373 0xe021, 0xe023, 0xe025, 0xe027,
3374 0x49d2, 0xf10d, 0x766c, 0x49e2,
3375 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3376
3377 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3378 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3379 0xbe00, 0xb416, 0x0076, 0xe86c,
3380 0xc602, 0xbe00, 0x0000, 0xc602,
3381
3382 0xbe00, 0x0000, 0xc602, 0xbe00,
3383 0x0000, 0xc602, 0xbe00, 0x0000,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385 0xbe00, 0x0000, 0xc602, 0xbe00,
3386
3387 0x0000, 0x0000, 0x0000, 0x0000
3388 };
3389 u32 i;
3390
3391 /* Patch code for GPHY reset */
3392 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3393 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3394 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3395 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3396
3397 rtl_apply_firmware(tp);
3398
3399 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3401 else
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3403
3404 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3405 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3406 else
3407 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3408
3409 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3410 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3411
3412 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3413 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3414
3415 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3416 }
3417
3418 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3419 {
3420 static const struct phy_reg phy_reg_init[] = {
3421 { 0x1f, 0x0003 },
3422 { 0x08, 0x441d },
3423 { 0x01, 0x9100 },
3424 { 0x1f, 0x0000 }
3425 };
3426
3427 rtl_writephy(tp, 0x1f, 0x0000);
3428 rtl_patchphy(tp, 0x11, 1 << 12);
3429 rtl_patchphy(tp, 0x19, 1 << 13);
3430 rtl_patchphy(tp, 0x10, 1 << 15);
3431
3432 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3433 }
3434
3435 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3436 {
3437 static const struct phy_reg phy_reg_init[] = {
3438 { 0x1f, 0x0005 },
3439 { 0x1a, 0x0000 },
3440 { 0x1f, 0x0000 },
3441
3442 { 0x1f, 0x0004 },
3443 { 0x1c, 0x0000 },
3444 { 0x1f, 0x0000 },
3445
3446 { 0x1f, 0x0001 },
3447 { 0x15, 0x7701 },
3448 { 0x1f, 0x0000 }
3449 };
3450
3451 /* Disable ALDPS before ram code */
3452 rtl_writephy(tp, 0x1f, 0x0000);
3453 rtl_writephy(tp, 0x18, 0x0310);
3454 msleep(100);
3455
3456 rtl_apply_firmware(tp);
3457
3458 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3459 }
3460
3461 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3462 {
3463 /* Disable ALDPS before setting firmware */
3464 rtl_writephy(tp, 0x1f, 0x0000);
3465 rtl_writephy(tp, 0x18, 0x0310);
3466 msleep(20);
3467
3468 rtl_apply_firmware(tp);
3469
3470 /* EEE setting */
3471 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3472 rtl_writephy(tp, 0x1f, 0x0004);
3473 rtl_writephy(tp, 0x10, 0x401f);
3474 rtl_writephy(tp, 0x19, 0x7030);
3475 rtl_writephy(tp, 0x1f, 0x0000);
3476 }
3477
3478 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3479 {
3480 static const struct phy_reg phy_reg_init[] = {
3481 { 0x1f, 0x0004 },
3482 { 0x10, 0xc07f },
3483 { 0x19, 0x7030 },
3484 { 0x1f, 0x0000 }
3485 };
3486
3487 /* Disable ALDPS before ram code */
3488 rtl_writephy(tp, 0x1f, 0x0000);
3489 rtl_writephy(tp, 0x18, 0x0310);
3490 msleep(100);
3491
3492 rtl_apply_firmware(tp);
3493
3494 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3496
3497 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3498 }
3499
3500 static void rtl_hw_phy_config(struct net_device *dev)
3501 {
3502 struct rtl8169_private *tp = netdev_priv(dev);
3503
3504 rtl8169_print_mac_version(tp);
3505
3506 switch (tp->mac_version) {
3507 case RTL_GIGA_MAC_VER_01:
3508 break;
3509 case RTL_GIGA_MAC_VER_02:
3510 case RTL_GIGA_MAC_VER_03:
3511 rtl8169s_hw_phy_config(tp);
3512 break;
3513 case RTL_GIGA_MAC_VER_04:
3514 rtl8169sb_hw_phy_config(tp);
3515 break;
3516 case RTL_GIGA_MAC_VER_05:
3517 rtl8169scd_hw_phy_config(tp);
3518 break;
3519 case RTL_GIGA_MAC_VER_06:
3520 rtl8169sce_hw_phy_config(tp);
3521 break;
3522 case RTL_GIGA_MAC_VER_07:
3523 case RTL_GIGA_MAC_VER_08:
3524 case RTL_GIGA_MAC_VER_09:
3525 rtl8102e_hw_phy_config(tp);
3526 break;
3527 case RTL_GIGA_MAC_VER_11:
3528 rtl8168bb_hw_phy_config(tp);
3529 break;
3530 case RTL_GIGA_MAC_VER_12:
3531 rtl8168bef_hw_phy_config(tp);
3532 break;
3533 case RTL_GIGA_MAC_VER_17:
3534 rtl8168bef_hw_phy_config(tp);
3535 break;
3536 case RTL_GIGA_MAC_VER_18:
3537 rtl8168cp_1_hw_phy_config(tp);
3538 break;
3539 case RTL_GIGA_MAC_VER_19:
3540 rtl8168c_1_hw_phy_config(tp);
3541 break;
3542 case RTL_GIGA_MAC_VER_20:
3543 rtl8168c_2_hw_phy_config(tp);
3544 break;
3545 case RTL_GIGA_MAC_VER_21:
3546 rtl8168c_3_hw_phy_config(tp);
3547 break;
3548 case RTL_GIGA_MAC_VER_22:
3549 rtl8168c_4_hw_phy_config(tp);
3550 break;
3551 case RTL_GIGA_MAC_VER_23:
3552 case RTL_GIGA_MAC_VER_24:
3553 rtl8168cp_2_hw_phy_config(tp);
3554 break;
3555 case RTL_GIGA_MAC_VER_25:
3556 rtl8168d_1_hw_phy_config(tp);
3557 break;
3558 case RTL_GIGA_MAC_VER_26:
3559 rtl8168d_2_hw_phy_config(tp);
3560 break;
3561 case RTL_GIGA_MAC_VER_27:
3562 rtl8168d_3_hw_phy_config(tp);
3563 break;
3564 case RTL_GIGA_MAC_VER_28:
3565 rtl8168d_4_hw_phy_config(tp);
3566 break;
3567 case RTL_GIGA_MAC_VER_29:
3568 case RTL_GIGA_MAC_VER_30:
3569 rtl8105e_hw_phy_config(tp);
3570 break;
3571 case RTL_GIGA_MAC_VER_31:
3572 /* None. */
3573 break;
3574 case RTL_GIGA_MAC_VER_32:
3575 case RTL_GIGA_MAC_VER_33:
3576 rtl8168e_1_hw_phy_config(tp);
3577 break;
3578 case RTL_GIGA_MAC_VER_34:
3579 rtl8168e_2_hw_phy_config(tp);
3580 break;
3581 case RTL_GIGA_MAC_VER_35:
3582 rtl8168f_1_hw_phy_config(tp);
3583 break;
3584 case RTL_GIGA_MAC_VER_36:
3585 rtl8168f_2_hw_phy_config(tp);
3586 break;
3587
3588 case RTL_GIGA_MAC_VER_37:
3589 rtl8402_hw_phy_config(tp);
3590 break;
3591
3592 case RTL_GIGA_MAC_VER_38:
3593 rtl8411_hw_phy_config(tp);
3594 break;
3595
3596 case RTL_GIGA_MAC_VER_39:
3597 rtl8106e_hw_phy_config(tp);
3598 break;
3599
3600 case RTL_GIGA_MAC_VER_40:
3601 rtl8168g_1_hw_phy_config(tp);
3602 break;
3603
3604 case RTL_GIGA_MAC_VER_41:
3605 default:
3606 break;
3607 }
3608 }
3609
3610 static void rtl_phy_work(struct rtl8169_private *tp)
3611 {
3612 struct timer_list *timer = &tp->timer;
3613 void __iomem *ioaddr = tp->mmio_addr;
3614 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3615
3616 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3617
3618 if (tp->phy_reset_pending(tp)) {
3619 /*
3620 * A busy loop could burn quite a few cycles on nowadays CPU.
3621 * Let's delay the execution of the timer for a few ticks.
3622 */
3623 timeout = HZ/10;
3624 goto out_mod_timer;
3625 }
3626
3627 if (tp->link_ok(ioaddr))
3628 return;
3629
3630 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3631
3632 tp->phy_reset_enable(tp);
3633
3634 out_mod_timer:
3635 mod_timer(timer, jiffies + timeout);
3636 }
3637
3638 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3639 {
3640 if (!test_and_set_bit(flag, tp->wk.flags))
3641 schedule_work(&tp->wk.work);
3642 }
3643
3644 static void rtl8169_phy_timer(unsigned long __opaque)
3645 {
3646 struct net_device *dev = (struct net_device *)__opaque;
3647 struct rtl8169_private *tp = netdev_priv(dev);
3648
3649 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3650 }
3651
3652 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3653 void __iomem *ioaddr)
3654 {
3655 iounmap(ioaddr);
3656 pci_release_regions(pdev);
3657 pci_clear_mwi(pdev);
3658 pci_disable_device(pdev);
3659 free_netdev(dev);
3660 }
3661
3662 DECLARE_RTL_COND(rtl_phy_reset_cond)
3663 {
3664 return tp->phy_reset_pending(tp);
3665 }
3666
3667 static void rtl8169_phy_reset(struct net_device *dev,
3668 struct rtl8169_private *tp)
3669 {
3670 tp->phy_reset_enable(tp);
3671 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3672 }
3673
3674 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3675 {
3676 void __iomem *ioaddr = tp->mmio_addr;
3677
3678 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3679 (RTL_R8(PHYstatus) & TBI_Enable);
3680 }
3681
3682 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3683 {
3684 void __iomem *ioaddr = tp->mmio_addr;
3685
3686 rtl_hw_phy_config(dev);
3687
3688 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3689 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3690 RTL_W8(0x82, 0x01);
3691 }
3692
3693 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3694
3695 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3696 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3697
3698 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3699 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3700 RTL_W8(0x82, 0x01);
3701 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3702 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3703 }
3704
3705 rtl8169_phy_reset(dev, tp);
3706
3707 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3708 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3709 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3710 (tp->mii.supports_gmii ?
3711 ADVERTISED_1000baseT_Half |
3712 ADVERTISED_1000baseT_Full : 0));
3713
3714 if (rtl_tbi_enabled(tp))
3715 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3716 }
3717
3718 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3719 {
3720 void __iomem *ioaddr = tp->mmio_addr;
3721
3722 rtl_lock_work(tp);
3723
3724 RTL_W8(Cfg9346, Cfg9346_Unlock);
3725
3726 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3727 RTL_R32(MAC4);
3728
3729 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3730 RTL_R32(MAC0);
3731
3732 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3733 rtl_rar_exgmac_set(tp, addr);
3734
3735 RTL_W8(Cfg9346, Cfg9346_Lock);
3736
3737 rtl_unlock_work(tp);
3738 }
3739
3740 static int rtl_set_mac_address(struct net_device *dev, void *p)
3741 {
3742 struct rtl8169_private *tp = netdev_priv(dev);
3743 struct sockaddr *addr = p;
3744
3745 if (!is_valid_ether_addr(addr->sa_data))
3746 return -EADDRNOTAVAIL;
3747
3748 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3749
3750 rtl_rar_set(tp, dev->dev_addr);
3751
3752 return 0;
3753 }
3754
3755 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3756 {
3757 struct rtl8169_private *tp = netdev_priv(dev);
3758 struct mii_ioctl_data *data = if_mii(ifr);
3759
3760 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3761 }
3762
3763 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3764 struct mii_ioctl_data *data, int cmd)
3765 {
3766 switch (cmd) {
3767 case SIOCGMIIPHY:
3768 data->phy_id = 32; /* Internal PHY */
3769 return 0;
3770
3771 case SIOCGMIIREG:
3772 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3773 return 0;
3774
3775 case SIOCSMIIREG:
3776 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3777 return 0;
3778 }
3779 return -EOPNOTSUPP;
3780 }
3781
3782 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3783 {
3784 return -EOPNOTSUPP;
3785 }
3786
3787 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3788 {
3789 if (tp->features & RTL_FEATURE_MSI) {
3790 pci_disable_msi(pdev);
3791 tp->features &= ~RTL_FEATURE_MSI;
3792 }
3793 }
3794
3795 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3796 {
3797 struct mdio_ops *ops = &tp->mdio_ops;
3798
3799 switch (tp->mac_version) {
3800 case RTL_GIGA_MAC_VER_27:
3801 ops->write = r8168dp_1_mdio_write;
3802 ops->read = r8168dp_1_mdio_read;
3803 break;
3804 case RTL_GIGA_MAC_VER_28:
3805 case RTL_GIGA_MAC_VER_31:
3806 ops->write = r8168dp_2_mdio_write;
3807 ops->read = r8168dp_2_mdio_read;
3808 break;
3809 case RTL_GIGA_MAC_VER_40:
3810 case RTL_GIGA_MAC_VER_41:
3811 ops->write = r8168g_mdio_write;
3812 ops->read = r8168g_mdio_read;
3813 break;
3814 default:
3815 ops->write = r8169_mdio_write;
3816 ops->read = r8169_mdio_read;
3817 break;
3818 }
3819 }
3820
3821 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3822 {
3823 void __iomem *ioaddr = tp->mmio_addr;
3824
3825 switch (tp->mac_version) {
3826 case RTL_GIGA_MAC_VER_25:
3827 case RTL_GIGA_MAC_VER_26:
3828 case RTL_GIGA_MAC_VER_29:
3829 case RTL_GIGA_MAC_VER_30:
3830 case RTL_GIGA_MAC_VER_32:
3831 case RTL_GIGA_MAC_VER_33:
3832 case RTL_GIGA_MAC_VER_34:
3833 case RTL_GIGA_MAC_VER_37:
3834 case RTL_GIGA_MAC_VER_38:
3835 case RTL_GIGA_MAC_VER_39:
3836 case RTL_GIGA_MAC_VER_40:
3837 case RTL_GIGA_MAC_VER_41:
3838 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3839 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3840 break;
3841 default:
3842 break;
3843 }
3844 }
3845
3846 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3847 {
3848 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3849 return false;
3850
3851 rtl_writephy(tp, 0x1f, 0x0000);
3852 rtl_writephy(tp, MII_BMCR, 0x0000);
3853
3854 rtl_wol_suspend_quirk(tp);
3855
3856 return true;
3857 }
3858
3859 static void r810x_phy_power_down(struct rtl8169_private *tp)
3860 {
3861 rtl_writephy(tp, 0x1f, 0x0000);
3862 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3863 }
3864
3865 static void r810x_phy_power_up(struct rtl8169_private *tp)
3866 {
3867 rtl_writephy(tp, 0x1f, 0x0000);
3868 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3869 }
3870
3871 static void r810x_pll_power_down(struct rtl8169_private *tp)
3872 {
3873 void __iomem *ioaddr = tp->mmio_addr;
3874
3875 if (rtl_wol_pll_power_down(tp))
3876 return;
3877
3878 r810x_phy_power_down(tp);
3879
3880 switch (tp->mac_version) {
3881 case RTL_GIGA_MAC_VER_07:
3882 case RTL_GIGA_MAC_VER_08:
3883 case RTL_GIGA_MAC_VER_09:
3884 case RTL_GIGA_MAC_VER_10:
3885 case RTL_GIGA_MAC_VER_13:
3886 case RTL_GIGA_MAC_VER_16:
3887 break;
3888 default:
3889 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3890 break;
3891 }
3892 }
3893
3894 static void r810x_pll_power_up(struct rtl8169_private *tp)
3895 {
3896 void __iomem *ioaddr = tp->mmio_addr;
3897
3898 r810x_phy_power_up(tp);
3899
3900 switch (tp->mac_version) {
3901 case RTL_GIGA_MAC_VER_07:
3902 case RTL_GIGA_MAC_VER_08:
3903 case RTL_GIGA_MAC_VER_09:
3904 case RTL_GIGA_MAC_VER_10:
3905 case RTL_GIGA_MAC_VER_13:
3906 case RTL_GIGA_MAC_VER_16:
3907 break;
3908 default:
3909 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3910 break;
3911 }
3912 }
3913
3914 static void r8168_phy_power_up(struct rtl8169_private *tp)
3915 {
3916 rtl_writephy(tp, 0x1f, 0x0000);
3917 switch (tp->mac_version) {
3918 case RTL_GIGA_MAC_VER_11:
3919 case RTL_GIGA_MAC_VER_12:
3920 case RTL_GIGA_MAC_VER_17:
3921 case RTL_GIGA_MAC_VER_18:
3922 case RTL_GIGA_MAC_VER_19:
3923 case RTL_GIGA_MAC_VER_20:
3924 case RTL_GIGA_MAC_VER_21:
3925 case RTL_GIGA_MAC_VER_22:
3926 case RTL_GIGA_MAC_VER_23:
3927 case RTL_GIGA_MAC_VER_24:
3928 case RTL_GIGA_MAC_VER_25:
3929 case RTL_GIGA_MAC_VER_26:
3930 case RTL_GIGA_MAC_VER_27:
3931 case RTL_GIGA_MAC_VER_28:
3932 case RTL_GIGA_MAC_VER_31:
3933 rtl_writephy(tp, 0x0e, 0x0000);
3934 break;
3935 default:
3936 break;
3937 }
3938 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3939 }
3940
3941 static void r8168_phy_power_down(struct rtl8169_private *tp)
3942 {
3943 rtl_writephy(tp, 0x1f, 0x0000);
3944 switch (tp->mac_version) {
3945 case RTL_GIGA_MAC_VER_32:
3946 case RTL_GIGA_MAC_VER_33:
3947 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3948 break;
3949
3950 case RTL_GIGA_MAC_VER_11:
3951 case RTL_GIGA_MAC_VER_12:
3952 case RTL_GIGA_MAC_VER_17:
3953 case RTL_GIGA_MAC_VER_18:
3954 case RTL_GIGA_MAC_VER_19:
3955 case RTL_GIGA_MAC_VER_20:
3956 case RTL_GIGA_MAC_VER_21:
3957 case RTL_GIGA_MAC_VER_22:
3958 case RTL_GIGA_MAC_VER_23:
3959 case RTL_GIGA_MAC_VER_24:
3960 case RTL_GIGA_MAC_VER_25:
3961 case RTL_GIGA_MAC_VER_26:
3962 case RTL_GIGA_MAC_VER_27:
3963 case RTL_GIGA_MAC_VER_28:
3964 case RTL_GIGA_MAC_VER_31:
3965 rtl_writephy(tp, 0x0e, 0x0200);
3966 default:
3967 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3968 break;
3969 }
3970 }
3971
3972 static void r8168_pll_power_down(struct rtl8169_private *tp)
3973 {
3974 void __iomem *ioaddr = tp->mmio_addr;
3975
3976 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3977 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3978 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3979 r8168dp_check_dash(tp)) {
3980 return;
3981 }
3982
3983 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3984 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3985 (RTL_R16(CPlusCmd) & ASF)) {
3986 return;
3987 }
3988
3989 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3990 tp->mac_version == RTL_GIGA_MAC_VER_33)
3991 rtl_ephy_write(tp, 0x19, 0xff64);
3992
3993 if (rtl_wol_pll_power_down(tp))
3994 return;
3995
3996 r8168_phy_power_down(tp);
3997
3998 switch (tp->mac_version) {
3999 case RTL_GIGA_MAC_VER_25:
4000 case RTL_GIGA_MAC_VER_26:
4001 case RTL_GIGA_MAC_VER_27:
4002 case RTL_GIGA_MAC_VER_28:
4003 case RTL_GIGA_MAC_VER_31:
4004 case RTL_GIGA_MAC_VER_32:
4005 case RTL_GIGA_MAC_VER_33:
4006 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4007 break;
4008 }
4009 }
4010
4011 static void r8168_pll_power_up(struct rtl8169_private *tp)
4012 {
4013 void __iomem *ioaddr = tp->mmio_addr;
4014
4015 switch (tp->mac_version) {
4016 case RTL_GIGA_MAC_VER_25:
4017 case RTL_GIGA_MAC_VER_26:
4018 case RTL_GIGA_MAC_VER_27:
4019 case RTL_GIGA_MAC_VER_28:
4020 case RTL_GIGA_MAC_VER_31:
4021 case RTL_GIGA_MAC_VER_32:
4022 case RTL_GIGA_MAC_VER_33:
4023 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4024 break;
4025 }
4026
4027 r8168_phy_power_up(tp);
4028 }
4029
4030 static void rtl_generic_op(struct rtl8169_private *tp,
4031 void (*op)(struct rtl8169_private *))
4032 {
4033 if (op)
4034 op(tp);
4035 }
4036
4037 static void rtl_pll_power_down(struct rtl8169_private *tp)
4038 {
4039 rtl_generic_op(tp, tp->pll_power_ops.down);
4040 }
4041
4042 static void rtl_pll_power_up(struct rtl8169_private *tp)
4043 {
4044 rtl_generic_op(tp, tp->pll_power_ops.up);
4045 }
4046
4047 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4048 {
4049 struct pll_power_ops *ops = &tp->pll_power_ops;
4050
4051 switch (tp->mac_version) {
4052 case RTL_GIGA_MAC_VER_07:
4053 case RTL_GIGA_MAC_VER_08:
4054 case RTL_GIGA_MAC_VER_09:
4055 case RTL_GIGA_MAC_VER_10:
4056 case RTL_GIGA_MAC_VER_16:
4057 case RTL_GIGA_MAC_VER_29:
4058 case RTL_GIGA_MAC_VER_30:
4059 case RTL_GIGA_MAC_VER_37:
4060 case RTL_GIGA_MAC_VER_39:
4061 ops->down = r810x_pll_power_down;
4062 ops->up = r810x_pll_power_up;
4063 break;
4064
4065 case RTL_GIGA_MAC_VER_11:
4066 case RTL_GIGA_MAC_VER_12:
4067 case RTL_GIGA_MAC_VER_17:
4068 case RTL_GIGA_MAC_VER_18:
4069 case RTL_GIGA_MAC_VER_19:
4070 case RTL_GIGA_MAC_VER_20:
4071 case RTL_GIGA_MAC_VER_21:
4072 case RTL_GIGA_MAC_VER_22:
4073 case RTL_GIGA_MAC_VER_23:
4074 case RTL_GIGA_MAC_VER_24:
4075 case RTL_GIGA_MAC_VER_25:
4076 case RTL_GIGA_MAC_VER_26:
4077 case RTL_GIGA_MAC_VER_27:
4078 case RTL_GIGA_MAC_VER_28:
4079 case RTL_GIGA_MAC_VER_31:
4080 case RTL_GIGA_MAC_VER_32:
4081 case RTL_GIGA_MAC_VER_33:
4082 case RTL_GIGA_MAC_VER_34:
4083 case RTL_GIGA_MAC_VER_35:
4084 case RTL_GIGA_MAC_VER_36:
4085 case RTL_GIGA_MAC_VER_38:
4086 case RTL_GIGA_MAC_VER_40:
4087 case RTL_GIGA_MAC_VER_41:
4088 ops->down = r8168_pll_power_down;
4089 ops->up = r8168_pll_power_up;
4090 break;
4091
4092 default:
4093 ops->down = NULL;
4094 ops->up = NULL;
4095 break;
4096 }
4097 }
4098
4099 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4100 {
4101 void __iomem *ioaddr = tp->mmio_addr;
4102
4103 switch (tp->mac_version) {
4104 case RTL_GIGA_MAC_VER_01:
4105 case RTL_GIGA_MAC_VER_02:
4106 case RTL_GIGA_MAC_VER_03:
4107 case RTL_GIGA_MAC_VER_04:
4108 case RTL_GIGA_MAC_VER_05:
4109 case RTL_GIGA_MAC_VER_06:
4110 case RTL_GIGA_MAC_VER_10:
4111 case RTL_GIGA_MAC_VER_11:
4112 case RTL_GIGA_MAC_VER_12:
4113 case RTL_GIGA_MAC_VER_13:
4114 case RTL_GIGA_MAC_VER_14:
4115 case RTL_GIGA_MAC_VER_15:
4116 case RTL_GIGA_MAC_VER_16:
4117 case RTL_GIGA_MAC_VER_17:
4118 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4119 break;
4120 case RTL_GIGA_MAC_VER_18:
4121 case RTL_GIGA_MAC_VER_19:
4122 case RTL_GIGA_MAC_VER_20:
4123 case RTL_GIGA_MAC_VER_21:
4124 case RTL_GIGA_MAC_VER_22:
4125 case RTL_GIGA_MAC_VER_23:
4126 case RTL_GIGA_MAC_VER_24:
4127 case RTL_GIGA_MAC_VER_34:
4128 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4129 break;
4130 default:
4131 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4132 break;
4133 }
4134 }
4135
4136 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4137 {
4138 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4139 }
4140
4141 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4142 {
4143 void __iomem *ioaddr = tp->mmio_addr;
4144
4145 RTL_W8(Cfg9346, Cfg9346_Unlock);
4146 rtl_generic_op(tp, tp->jumbo_ops.enable);
4147 RTL_W8(Cfg9346, Cfg9346_Lock);
4148 }
4149
4150 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4151 {
4152 void __iomem *ioaddr = tp->mmio_addr;
4153
4154 RTL_W8(Cfg9346, Cfg9346_Unlock);
4155 rtl_generic_op(tp, tp->jumbo_ops.disable);
4156 RTL_W8(Cfg9346, Cfg9346_Lock);
4157 }
4158
4159 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4160 {
4161 void __iomem *ioaddr = tp->mmio_addr;
4162
4163 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4164 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4165 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4166 }
4167
4168 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4169 {
4170 void __iomem *ioaddr = tp->mmio_addr;
4171
4172 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4173 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4174 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4175 }
4176
4177 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4178 {
4179 void __iomem *ioaddr = tp->mmio_addr;
4180
4181 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4182 }
4183
4184 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4185 {
4186 void __iomem *ioaddr = tp->mmio_addr;
4187
4188 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4189 }
4190
4191 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4192 {
4193 void __iomem *ioaddr = tp->mmio_addr;
4194
4195 RTL_W8(MaxTxPacketSize, 0x3f);
4196 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4197 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4198 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4199 }
4200
4201 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4202 {
4203 void __iomem *ioaddr = tp->mmio_addr;
4204
4205 RTL_W8(MaxTxPacketSize, 0x0c);
4206 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4207 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4208 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4209 }
4210
4211 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4212 {
4213 rtl_tx_performance_tweak(tp->pci_dev,
4214 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4215 }
4216
4217 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4218 {
4219 rtl_tx_performance_tweak(tp->pci_dev,
4220 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4221 }
4222
4223 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4224 {
4225 void __iomem *ioaddr = tp->mmio_addr;
4226
4227 r8168b_0_hw_jumbo_enable(tp);
4228
4229 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4230 }
4231
4232 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4233 {
4234 void __iomem *ioaddr = tp->mmio_addr;
4235
4236 r8168b_0_hw_jumbo_disable(tp);
4237
4238 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4239 }
4240
4241 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4242 {
4243 struct jumbo_ops *ops = &tp->jumbo_ops;
4244
4245 switch (tp->mac_version) {
4246 case RTL_GIGA_MAC_VER_11:
4247 ops->disable = r8168b_0_hw_jumbo_disable;
4248 ops->enable = r8168b_0_hw_jumbo_enable;
4249 break;
4250 case RTL_GIGA_MAC_VER_12:
4251 case RTL_GIGA_MAC_VER_17:
4252 ops->disable = r8168b_1_hw_jumbo_disable;
4253 ops->enable = r8168b_1_hw_jumbo_enable;
4254 break;
4255 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4256 case RTL_GIGA_MAC_VER_19:
4257 case RTL_GIGA_MAC_VER_20:
4258 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4259 case RTL_GIGA_MAC_VER_22:
4260 case RTL_GIGA_MAC_VER_23:
4261 case RTL_GIGA_MAC_VER_24:
4262 case RTL_GIGA_MAC_VER_25:
4263 case RTL_GIGA_MAC_VER_26:
4264 ops->disable = r8168c_hw_jumbo_disable;
4265 ops->enable = r8168c_hw_jumbo_enable;
4266 break;
4267 case RTL_GIGA_MAC_VER_27:
4268 case RTL_GIGA_MAC_VER_28:
4269 ops->disable = r8168dp_hw_jumbo_disable;
4270 ops->enable = r8168dp_hw_jumbo_enable;
4271 break;
4272 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4273 case RTL_GIGA_MAC_VER_32:
4274 case RTL_GIGA_MAC_VER_33:
4275 case RTL_GIGA_MAC_VER_34:
4276 ops->disable = r8168e_hw_jumbo_disable;
4277 ops->enable = r8168e_hw_jumbo_enable;
4278 break;
4279
4280 /*
4281 * No action needed for jumbo frames with 8169.
4282 * No jumbo for 810x at all.
4283 */
4284 case RTL_GIGA_MAC_VER_40:
4285 case RTL_GIGA_MAC_VER_41:
4286 default:
4287 ops->disable = NULL;
4288 ops->enable = NULL;
4289 break;
4290 }
4291 }
4292
4293 DECLARE_RTL_COND(rtl_chipcmd_cond)
4294 {
4295 void __iomem *ioaddr = tp->mmio_addr;
4296
4297 return RTL_R8(ChipCmd) & CmdReset;
4298 }
4299
4300 static void rtl_hw_reset(struct rtl8169_private *tp)
4301 {
4302 void __iomem *ioaddr = tp->mmio_addr;
4303
4304 RTL_W8(ChipCmd, CmdReset);
4305
4306 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4307 }
4308
4309 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4310 {
4311 struct rtl_fw *rtl_fw;
4312 const char *name;
4313 int rc = -ENOMEM;
4314
4315 name = rtl_lookup_firmware_name(tp);
4316 if (!name)
4317 goto out_no_firmware;
4318
4319 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4320 if (!rtl_fw)
4321 goto err_warn;
4322
4323 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4324 if (rc < 0)
4325 goto err_free;
4326
4327 rc = rtl_check_firmware(tp, rtl_fw);
4328 if (rc < 0)
4329 goto err_release_firmware;
4330
4331 tp->rtl_fw = rtl_fw;
4332 out:
4333 return;
4334
4335 err_release_firmware:
4336 release_firmware(rtl_fw->fw);
4337 err_free:
4338 kfree(rtl_fw);
4339 err_warn:
4340 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4341 name, rc);
4342 out_no_firmware:
4343 tp->rtl_fw = NULL;
4344 goto out;
4345 }
4346
4347 static void rtl_request_firmware(struct rtl8169_private *tp)
4348 {
4349 if (IS_ERR(tp->rtl_fw))
4350 rtl_request_uncached_firmware(tp);
4351 }
4352
4353 static void rtl_rx_close(struct rtl8169_private *tp)
4354 {
4355 void __iomem *ioaddr = tp->mmio_addr;
4356
4357 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4358 }
4359
4360 DECLARE_RTL_COND(rtl_npq_cond)
4361 {
4362 void __iomem *ioaddr = tp->mmio_addr;
4363
4364 return RTL_R8(TxPoll) & NPQ;
4365 }
4366
4367 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4368 {
4369 void __iomem *ioaddr = tp->mmio_addr;
4370
4371 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4372 }
4373
4374 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4375 {
4376 void __iomem *ioaddr = tp->mmio_addr;
4377
4378 /* Disable interrupts */
4379 rtl8169_irq_mask_and_ack(tp);
4380
4381 rtl_rx_close(tp);
4382
4383 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4384 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4385 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4386 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4387 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4388 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4389 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4390 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4391 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4392 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4393 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4394 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4395 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4396 } else {
4397 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4398 udelay(100);
4399 }
4400
4401 rtl_hw_reset(tp);
4402 }
4403
4404 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4405 {
4406 void __iomem *ioaddr = tp->mmio_addr;
4407
4408 /* Set DMA burst size and Interframe Gap Time */
4409 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4410 (InterFrameGap << TxInterFrameGapShift));
4411 }
4412
4413 static void rtl_hw_start(struct net_device *dev)
4414 {
4415 struct rtl8169_private *tp = netdev_priv(dev);
4416
4417 tp->hw_start(dev);
4418
4419 rtl_irq_enable_all(tp);
4420 }
4421
4422 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4423 void __iomem *ioaddr)
4424 {
4425 /*
4426 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4427 * register to be written before TxDescAddrLow to work.
4428 * Switching from MMIO to I/O access fixes the issue as well.
4429 */
4430 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4431 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4432 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4433 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4434 }
4435
4436 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4437 {
4438 u16 cmd;
4439
4440 cmd = RTL_R16(CPlusCmd);
4441 RTL_W16(CPlusCmd, cmd);
4442 return cmd;
4443 }
4444
4445 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4446 {
4447 /* Low hurts. Let's disable the filtering. */
4448 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4449 }
4450
4451 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4452 {
4453 static const struct rtl_cfg2_info {
4454 u32 mac_version;
4455 u32 clk;
4456 u32 val;
4457 } cfg2_info [] = {
4458 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4459 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4460 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4461 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4462 };
4463 const struct rtl_cfg2_info *p = cfg2_info;
4464 unsigned int i;
4465 u32 clk;
4466
4467 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4468 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4469 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4470 RTL_W32(0x7c, p->val);
4471 break;
4472 }
4473 }
4474 }
4475
4476 static void rtl_set_rx_mode(struct net_device *dev)
4477 {
4478 struct rtl8169_private *tp = netdev_priv(dev);
4479 void __iomem *ioaddr = tp->mmio_addr;
4480 u32 mc_filter[2]; /* Multicast hash filter */
4481 int rx_mode;
4482 u32 tmp = 0;
4483
4484 if (dev->flags & IFF_PROMISC) {
4485 /* Unconditionally log net taps. */
4486 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4487 rx_mode =
4488 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4489 AcceptAllPhys;
4490 mc_filter[1] = mc_filter[0] = 0xffffffff;
4491 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4492 (dev->flags & IFF_ALLMULTI)) {
4493 /* Too many to filter perfectly -- accept all multicasts. */
4494 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4495 mc_filter[1] = mc_filter[0] = 0xffffffff;
4496 } else {
4497 struct netdev_hw_addr *ha;
4498
4499 rx_mode = AcceptBroadcast | AcceptMyPhys;
4500 mc_filter[1] = mc_filter[0] = 0;
4501 netdev_for_each_mc_addr(ha, dev) {
4502 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4503 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4504 rx_mode |= AcceptMulticast;
4505 }
4506 }
4507
4508 if (dev->features & NETIF_F_RXALL)
4509 rx_mode |= (AcceptErr | AcceptRunt);
4510
4511 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4512
4513 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4514 u32 data = mc_filter[0];
4515
4516 mc_filter[0] = swab32(mc_filter[1]);
4517 mc_filter[1] = swab32(data);
4518 }
4519
4520 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4521 mc_filter[1] = mc_filter[0] = 0xffffffff;
4522
4523 RTL_W32(MAR0 + 4, mc_filter[1]);
4524 RTL_W32(MAR0 + 0, mc_filter[0]);
4525
4526 RTL_W32(RxConfig, tmp);
4527 }
4528
4529 static void rtl_hw_start_8169(struct net_device *dev)
4530 {
4531 struct rtl8169_private *tp = netdev_priv(dev);
4532 void __iomem *ioaddr = tp->mmio_addr;
4533 struct pci_dev *pdev = tp->pci_dev;
4534
4535 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4536 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4537 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4538 }
4539
4540 RTL_W8(Cfg9346, Cfg9346_Unlock);
4541 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4542 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4543 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4544 tp->mac_version == RTL_GIGA_MAC_VER_04)
4545 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4546
4547 rtl_init_rxcfg(tp);
4548
4549 RTL_W8(EarlyTxThres, NoEarlyTx);
4550
4551 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4552
4553 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4554 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4555 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4556 tp->mac_version == RTL_GIGA_MAC_VER_04)
4557 rtl_set_rx_tx_config_registers(tp);
4558
4559 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4560
4561 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4562 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4563 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4564 "Bit-3 and bit-14 MUST be 1\n");
4565 tp->cp_cmd |= (1 << 14);
4566 }
4567
4568 RTL_W16(CPlusCmd, tp->cp_cmd);
4569
4570 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4571
4572 /*
4573 * Undocumented corner. Supposedly:
4574 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4575 */
4576 RTL_W16(IntrMitigate, 0x0000);
4577
4578 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4579
4580 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4581 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4582 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4583 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4584 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4585 rtl_set_rx_tx_config_registers(tp);
4586 }
4587
4588 RTL_W8(Cfg9346, Cfg9346_Lock);
4589
4590 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4591 RTL_R8(IntrMask);
4592
4593 RTL_W32(RxMissed, 0);
4594
4595 rtl_set_rx_mode(dev);
4596
4597 /* no early-rx interrupts */
4598 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4599 }
4600
4601 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4602 {
4603 if (tp->csi_ops.write)
4604 tp->csi_ops.write(tp, addr, value);
4605 }
4606
4607 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4608 {
4609 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4610 }
4611
4612 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4613 {
4614 u32 csi;
4615
4616 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4617 rtl_csi_write(tp, 0x070c, csi | bits);
4618 }
4619
4620 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4621 {
4622 rtl_csi_access_enable(tp, 0x17000000);
4623 }
4624
4625 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4626 {
4627 rtl_csi_access_enable(tp, 0x27000000);
4628 }
4629
4630 DECLARE_RTL_COND(rtl_csiar_cond)
4631 {
4632 void __iomem *ioaddr = tp->mmio_addr;
4633
4634 return RTL_R32(CSIAR) & CSIAR_FLAG;
4635 }
4636
4637 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4638 {
4639 void __iomem *ioaddr = tp->mmio_addr;
4640
4641 RTL_W32(CSIDR, value);
4642 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4643 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4644
4645 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4646 }
4647
4648 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4649 {
4650 void __iomem *ioaddr = tp->mmio_addr;
4651
4652 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4653 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4654
4655 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4656 RTL_R32(CSIDR) : ~0;
4657 }
4658
4659 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4660 {
4661 void __iomem *ioaddr = tp->mmio_addr;
4662
4663 RTL_W32(CSIDR, value);
4664 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4665 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4666 CSIAR_FUNC_NIC);
4667
4668 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4669 }
4670
4671 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4672 {
4673 void __iomem *ioaddr = tp->mmio_addr;
4674
4675 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4676 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4677
4678 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4679 RTL_R32(CSIDR) : ~0;
4680 }
4681
4682 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4683 {
4684 struct csi_ops *ops = &tp->csi_ops;
4685
4686 switch (tp->mac_version) {
4687 case RTL_GIGA_MAC_VER_01:
4688 case RTL_GIGA_MAC_VER_02:
4689 case RTL_GIGA_MAC_VER_03:
4690 case RTL_GIGA_MAC_VER_04:
4691 case RTL_GIGA_MAC_VER_05:
4692 case RTL_GIGA_MAC_VER_06:
4693 case RTL_GIGA_MAC_VER_10:
4694 case RTL_GIGA_MAC_VER_11:
4695 case RTL_GIGA_MAC_VER_12:
4696 case RTL_GIGA_MAC_VER_13:
4697 case RTL_GIGA_MAC_VER_14:
4698 case RTL_GIGA_MAC_VER_15:
4699 case RTL_GIGA_MAC_VER_16:
4700 case RTL_GIGA_MAC_VER_17:
4701 ops->write = NULL;
4702 ops->read = NULL;
4703 break;
4704
4705 case RTL_GIGA_MAC_VER_37:
4706 case RTL_GIGA_MAC_VER_38:
4707 ops->write = r8402_csi_write;
4708 ops->read = r8402_csi_read;
4709 break;
4710
4711 default:
4712 ops->write = r8169_csi_write;
4713 ops->read = r8169_csi_read;
4714 break;
4715 }
4716 }
4717
4718 struct ephy_info {
4719 unsigned int offset;
4720 u16 mask;
4721 u16 bits;
4722 };
4723
4724 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4725 int len)
4726 {
4727 u16 w;
4728
4729 while (len-- > 0) {
4730 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4731 rtl_ephy_write(tp, e->offset, w);
4732 e++;
4733 }
4734 }
4735
4736 static void rtl_disable_clock_request(struct pci_dev *pdev)
4737 {
4738 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4739 PCI_EXP_LNKCTL_CLKREQ_EN);
4740 }
4741
4742 static void rtl_enable_clock_request(struct pci_dev *pdev)
4743 {
4744 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4745 PCI_EXP_LNKCTL_CLKREQ_EN);
4746 }
4747
4748 #define R8168_CPCMD_QUIRK_MASK (\
4749 EnableBist | \
4750 Mac_dbgo_oe | \
4751 Force_half_dup | \
4752 Force_rxflow_en | \
4753 Force_txflow_en | \
4754 Cxpl_dbg_sel | \
4755 ASF | \
4756 PktCntrDisable | \
4757 Mac_dbgo_sel)
4758
4759 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4760 {
4761 void __iomem *ioaddr = tp->mmio_addr;
4762 struct pci_dev *pdev = tp->pci_dev;
4763
4764 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4765
4766 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4767
4768 rtl_tx_performance_tweak(pdev,
4769 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4770 }
4771
4772 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4773 {
4774 void __iomem *ioaddr = tp->mmio_addr;
4775
4776 rtl_hw_start_8168bb(tp);
4777
4778 RTL_W8(MaxTxPacketSize, TxPacketMax);
4779
4780 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4781 }
4782
4783 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4784 {
4785 void __iomem *ioaddr = tp->mmio_addr;
4786 struct pci_dev *pdev = tp->pci_dev;
4787
4788 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4789
4790 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4791
4792 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4793
4794 rtl_disable_clock_request(pdev);
4795
4796 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4797 }
4798
4799 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4800 {
4801 static const struct ephy_info e_info_8168cp[] = {
4802 { 0x01, 0, 0x0001 },
4803 { 0x02, 0x0800, 0x1000 },
4804 { 0x03, 0, 0x0042 },
4805 { 0x06, 0x0080, 0x0000 },
4806 { 0x07, 0, 0x2000 }
4807 };
4808
4809 rtl_csi_access_enable_2(tp);
4810
4811 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4812
4813 __rtl_hw_start_8168cp(tp);
4814 }
4815
4816 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4817 {
4818 void __iomem *ioaddr = tp->mmio_addr;
4819 struct pci_dev *pdev = tp->pci_dev;
4820
4821 rtl_csi_access_enable_2(tp);
4822
4823 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4824
4825 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4826
4827 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4828 }
4829
4830 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4831 {
4832 void __iomem *ioaddr = tp->mmio_addr;
4833 struct pci_dev *pdev = tp->pci_dev;
4834
4835 rtl_csi_access_enable_2(tp);
4836
4837 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4838
4839 /* Magic. */
4840 RTL_W8(DBG_REG, 0x20);
4841
4842 RTL_W8(MaxTxPacketSize, TxPacketMax);
4843
4844 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4845
4846 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4847 }
4848
4849 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4850 {
4851 void __iomem *ioaddr = tp->mmio_addr;
4852 static const struct ephy_info e_info_8168c_1[] = {
4853 { 0x02, 0x0800, 0x1000 },
4854 { 0x03, 0, 0x0002 },
4855 { 0x06, 0x0080, 0x0000 }
4856 };
4857
4858 rtl_csi_access_enable_2(tp);
4859
4860 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4861
4862 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4863
4864 __rtl_hw_start_8168cp(tp);
4865 }
4866
4867 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4868 {
4869 static const struct ephy_info e_info_8168c_2[] = {
4870 { 0x01, 0, 0x0001 },
4871 { 0x03, 0x0400, 0x0220 }
4872 };
4873
4874 rtl_csi_access_enable_2(tp);
4875
4876 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4877
4878 __rtl_hw_start_8168cp(tp);
4879 }
4880
4881 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4882 {
4883 rtl_hw_start_8168c_2(tp);
4884 }
4885
4886 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4887 {
4888 rtl_csi_access_enable_2(tp);
4889
4890 __rtl_hw_start_8168cp(tp);
4891 }
4892
4893 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4894 {
4895 void __iomem *ioaddr = tp->mmio_addr;
4896 struct pci_dev *pdev = tp->pci_dev;
4897
4898 rtl_csi_access_enable_2(tp);
4899
4900 rtl_disable_clock_request(pdev);
4901
4902 RTL_W8(MaxTxPacketSize, TxPacketMax);
4903
4904 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4905
4906 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4907 }
4908
4909 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4910 {
4911 void __iomem *ioaddr = tp->mmio_addr;
4912 struct pci_dev *pdev = tp->pci_dev;
4913
4914 rtl_csi_access_enable_1(tp);
4915
4916 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4917
4918 RTL_W8(MaxTxPacketSize, TxPacketMax);
4919
4920 rtl_disable_clock_request(pdev);
4921 }
4922
4923 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4924 {
4925 void __iomem *ioaddr = tp->mmio_addr;
4926 struct pci_dev *pdev = tp->pci_dev;
4927 static const struct ephy_info e_info_8168d_4[] = {
4928 { 0x0b, ~0, 0x48 },
4929 { 0x19, 0x20, 0x50 },
4930 { 0x0c, ~0, 0x20 }
4931 };
4932 int i;
4933
4934 rtl_csi_access_enable_1(tp);
4935
4936 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4937
4938 RTL_W8(MaxTxPacketSize, TxPacketMax);
4939
4940 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4941 const struct ephy_info *e = e_info_8168d_4 + i;
4942 u16 w;
4943
4944 w = rtl_ephy_read(tp, e->offset);
4945 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4946 }
4947
4948 rtl_enable_clock_request(pdev);
4949 }
4950
4951 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4952 {
4953 void __iomem *ioaddr = tp->mmio_addr;
4954 struct pci_dev *pdev = tp->pci_dev;
4955 static const struct ephy_info e_info_8168e_1[] = {
4956 { 0x00, 0x0200, 0x0100 },
4957 { 0x00, 0x0000, 0x0004 },
4958 { 0x06, 0x0002, 0x0001 },
4959 { 0x06, 0x0000, 0x0030 },
4960 { 0x07, 0x0000, 0x2000 },
4961 { 0x00, 0x0000, 0x0020 },
4962 { 0x03, 0x5800, 0x2000 },
4963 { 0x03, 0x0000, 0x0001 },
4964 { 0x01, 0x0800, 0x1000 },
4965 { 0x07, 0x0000, 0x4000 },
4966 { 0x1e, 0x0000, 0x2000 },
4967 { 0x19, 0xffff, 0xfe6c },
4968 { 0x0a, 0x0000, 0x0040 }
4969 };
4970
4971 rtl_csi_access_enable_2(tp);
4972
4973 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4974
4975 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4976
4977 RTL_W8(MaxTxPacketSize, TxPacketMax);
4978
4979 rtl_disable_clock_request(pdev);
4980
4981 /* Reset tx FIFO pointer */
4982 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4983 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4984
4985 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4986 }
4987
4988 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4989 {
4990 void __iomem *ioaddr = tp->mmio_addr;
4991 struct pci_dev *pdev = tp->pci_dev;
4992 static const struct ephy_info e_info_8168e_2[] = {
4993 { 0x09, 0x0000, 0x0080 },
4994 { 0x19, 0x0000, 0x0224 }
4995 };
4996
4997 rtl_csi_access_enable_1(tp);
4998
4999 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5000
5001 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5002
5003 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5004 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5005 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5006 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5007 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5008 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5009 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5010 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5011
5012 RTL_W8(MaxTxPacketSize, EarlySize);
5013
5014 rtl_disable_clock_request(pdev);
5015
5016 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5017 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5018
5019 /* Adjust EEE LED frequency */
5020 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5021
5022 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5023 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5024 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5025 }
5026
5027 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5028 {
5029 void __iomem *ioaddr = tp->mmio_addr;
5030 struct pci_dev *pdev = tp->pci_dev;
5031
5032 rtl_csi_access_enable_2(tp);
5033
5034 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5035
5036 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5037 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5038 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5039 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5040 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5041 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5042 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5043 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5044 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5045 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5046
5047 RTL_W8(MaxTxPacketSize, EarlySize);
5048
5049 rtl_disable_clock_request(pdev);
5050
5051 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5052 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5053 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5054 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5055 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5056 }
5057
5058 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5059 {
5060 void __iomem *ioaddr = tp->mmio_addr;
5061 static const struct ephy_info e_info_8168f_1[] = {
5062 { 0x06, 0x00c0, 0x0020 },
5063 { 0x08, 0x0001, 0x0002 },
5064 { 0x09, 0x0000, 0x0080 },
5065 { 0x19, 0x0000, 0x0224 }
5066 };
5067
5068 rtl_hw_start_8168f(tp);
5069
5070 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5071
5072 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5073
5074 /* Adjust EEE LED frequency */
5075 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5076 }
5077
5078 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5079 {
5080 static const struct ephy_info e_info_8168f_1[] = {
5081 { 0x06, 0x00c0, 0x0020 },
5082 { 0x0f, 0xffff, 0x5200 },
5083 { 0x1e, 0x0000, 0x4000 },
5084 { 0x19, 0x0000, 0x0224 }
5085 };
5086
5087 rtl_hw_start_8168f(tp);
5088
5089 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5090
5091 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5092 }
5093
5094 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5095 {
5096 void __iomem *ioaddr = tp->mmio_addr;
5097 struct pci_dev *pdev = tp->pci_dev;
5098
5099 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5100 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5101 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5102 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5103
5104 rtl_csi_access_enable_1(tp);
5105
5106 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5107
5108 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5109 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5110
5111 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5112 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5113 RTL_W8(MaxTxPacketSize, EarlySize);
5114
5115 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5116 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5117
5118 /* Adjust EEE LED frequency */
5119 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5120
5121 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5122 }
5123
5124 static void rtl_hw_start_8168(struct net_device *dev)
5125 {
5126 struct rtl8169_private *tp = netdev_priv(dev);
5127 void __iomem *ioaddr = tp->mmio_addr;
5128
5129 RTL_W8(Cfg9346, Cfg9346_Unlock);
5130
5131 RTL_W8(MaxTxPacketSize, TxPacketMax);
5132
5133 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5134
5135 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5136
5137 RTL_W16(CPlusCmd, tp->cp_cmd);
5138
5139 RTL_W16(IntrMitigate, 0x5151);
5140
5141 /* Work around for RxFIFO overflow. */
5142 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5143 tp->event_slow |= RxFIFOOver | PCSTimeout;
5144 tp->event_slow &= ~RxOverflow;
5145 }
5146
5147 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5148
5149 rtl_set_rx_mode(dev);
5150
5151 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5152 (InterFrameGap << TxInterFrameGapShift));
5153
5154 RTL_R8(IntrMask);
5155
5156 switch (tp->mac_version) {
5157 case RTL_GIGA_MAC_VER_11:
5158 rtl_hw_start_8168bb(tp);
5159 break;
5160
5161 case RTL_GIGA_MAC_VER_12:
5162 case RTL_GIGA_MAC_VER_17:
5163 rtl_hw_start_8168bef(tp);
5164 break;
5165
5166 case RTL_GIGA_MAC_VER_18:
5167 rtl_hw_start_8168cp_1(tp);
5168 break;
5169
5170 case RTL_GIGA_MAC_VER_19:
5171 rtl_hw_start_8168c_1(tp);
5172 break;
5173
5174 case RTL_GIGA_MAC_VER_20:
5175 rtl_hw_start_8168c_2(tp);
5176 break;
5177
5178 case RTL_GIGA_MAC_VER_21:
5179 rtl_hw_start_8168c_3(tp);
5180 break;
5181
5182 case RTL_GIGA_MAC_VER_22:
5183 rtl_hw_start_8168c_4(tp);
5184 break;
5185
5186 case RTL_GIGA_MAC_VER_23:
5187 rtl_hw_start_8168cp_2(tp);
5188 break;
5189
5190 case RTL_GIGA_MAC_VER_24:
5191 rtl_hw_start_8168cp_3(tp);
5192 break;
5193
5194 case RTL_GIGA_MAC_VER_25:
5195 case RTL_GIGA_MAC_VER_26:
5196 case RTL_GIGA_MAC_VER_27:
5197 rtl_hw_start_8168d(tp);
5198 break;
5199
5200 case RTL_GIGA_MAC_VER_28:
5201 rtl_hw_start_8168d_4(tp);
5202 break;
5203
5204 case RTL_GIGA_MAC_VER_31:
5205 rtl_hw_start_8168dp(tp);
5206 break;
5207
5208 case RTL_GIGA_MAC_VER_32:
5209 case RTL_GIGA_MAC_VER_33:
5210 rtl_hw_start_8168e_1(tp);
5211 break;
5212 case RTL_GIGA_MAC_VER_34:
5213 rtl_hw_start_8168e_2(tp);
5214 break;
5215
5216 case RTL_GIGA_MAC_VER_35:
5217 case RTL_GIGA_MAC_VER_36:
5218 rtl_hw_start_8168f_1(tp);
5219 break;
5220
5221 case RTL_GIGA_MAC_VER_38:
5222 rtl_hw_start_8411(tp);
5223 break;
5224
5225 case RTL_GIGA_MAC_VER_40:
5226 case RTL_GIGA_MAC_VER_41:
5227 rtl_hw_start_8168g_1(tp);
5228 break;
5229
5230 default:
5231 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5232 dev->name, tp->mac_version);
5233 break;
5234 }
5235
5236 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5237
5238 RTL_W8(Cfg9346, Cfg9346_Lock);
5239
5240 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5241 }
5242
5243 #define R810X_CPCMD_QUIRK_MASK (\
5244 EnableBist | \
5245 Mac_dbgo_oe | \
5246 Force_half_dup | \
5247 Force_rxflow_en | \
5248 Force_txflow_en | \
5249 Cxpl_dbg_sel | \
5250 ASF | \
5251 PktCntrDisable | \
5252 Mac_dbgo_sel)
5253
5254 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5255 {
5256 void __iomem *ioaddr = tp->mmio_addr;
5257 struct pci_dev *pdev = tp->pci_dev;
5258 static const struct ephy_info e_info_8102e_1[] = {
5259 { 0x01, 0, 0x6e65 },
5260 { 0x02, 0, 0x091f },
5261 { 0x03, 0, 0xc2f9 },
5262 { 0x06, 0, 0xafb5 },
5263 { 0x07, 0, 0x0e00 },
5264 { 0x19, 0, 0xec80 },
5265 { 0x01, 0, 0x2e65 },
5266 { 0x01, 0, 0x6e65 }
5267 };
5268 u8 cfg1;
5269
5270 rtl_csi_access_enable_2(tp);
5271
5272 RTL_W8(DBG_REG, FIX_NAK_1);
5273
5274 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5275
5276 RTL_W8(Config1,
5277 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5278 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5279
5280 cfg1 = RTL_R8(Config1);
5281 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5282 RTL_W8(Config1, cfg1 & ~LEDS0);
5283
5284 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5285 }
5286
5287 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5288 {
5289 void __iomem *ioaddr = tp->mmio_addr;
5290 struct pci_dev *pdev = tp->pci_dev;
5291
5292 rtl_csi_access_enable_2(tp);
5293
5294 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5295
5296 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5297 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5298 }
5299
5300 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5301 {
5302 rtl_hw_start_8102e_2(tp);
5303
5304 rtl_ephy_write(tp, 0x03, 0xc2f9);
5305 }
5306
5307 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5308 {
5309 void __iomem *ioaddr = tp->mmio_addr;
5310 static const struct ephy_info e_info_8105e_1[] = {
5311 { 0x07, 0, 0x4000 },
5312 { 0x19, 0, 0x0200 },
5313 { 0x19, 0, 0x0020 },
5314 { 0x1e, 0, 0x2000 },
5315 { 0x03, 0, 0x0001 },
5316 { 0x19, 0, 0x0100 },
5317 { 0x19, 0, 0x0004 },
5318 { 0x0a, 0, 0x0020 }
5319 };
5320
5321 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5322 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5323
5324 /* Disable Early Tally Counter */
5325 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5326
5327 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5328 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5329
5330 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5331 }
5332
5333 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5334 {
5335 rtl_hw_start_8105e_1(tp);
5336 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5337 }
5338
5339 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5340 {
5341 void __iomem *ioaddr = tp->mmio_addr;
5342 static const struct ephy_info e_info_8402[] = {
5343 { 0x19, 0xffff, 0xff64 },
5344 { 0x1e, 0, 0x4000 }
5345 };
5346
5347 rtl_csi_access_enable_2(tp);
5348
5349 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5350 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5351
5352 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5353 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5354
5355 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5356
5357 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5358
5359 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5360 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5361 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5362 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5363 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5364 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5365 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5366 }
5367
5368 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5369 {
5370 void __iomem *ioaddr = tp->mmio_addr;
5371
5372 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5373 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5374
5375 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5376 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5377 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5378 }
5379
5380 static void rtl_hw_start_8101(struct net_device *dev)
5381 {
5382 struct rtl8169_private *tp = netdev_priv(dev);
5383 void __iomem *ioaddr = tp->mmio_addr;
5384 struct pci_dev *pdev = tp->pci_dev;
5385
5386 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5387 tp->event_slow &= ~RxFIFOOver;
5388
5389 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5390 tp->mac_version == RTL_GIGA_MAC_VER_16)
5391 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5392 PCI_EXP_DEVCTL_NOSNOOP_EN);
5393
5394 RTL_W8(Cfg9346, Cfg9346_Unlock);
5395
5396 switch (tp->mac_version) {
5397 case RTL_GIGA_MAC_VER_07:
5398 rtl_hw_start_8102e_1(tp);
5399 break;
5400
5401 case RTL_GIGA_MAC_VER_08:
5402 rtl_hw_start_8102e_3(tp);
5403 break;
5404
5405 case RTL_GIGA_MAC_VER_09:
5406 rtl_hw_start_8102e_2(tp);
5407 break;
5408
5409 case RTL_GIGA_MAC_VER_29:
5410 rtl_hw_start_8105e_1(tp);
5411 break;
5412 case RTL_GIGA_MAC_VER_30:
5413 rtl_hw_start_8105e_2(tp);
5414 break;
5415
5416 case RTL_GIGA_MAC_VER_37:
5417 rtl_hw_start_8402(tp);
5418 break;
5419
5420 case RTL_GIGA_MAC_VER_39:
5421 rtl_hw_start_8106(tp);
5422 break;
5423 }
5424
5425 RTL_W8(Cfg9346, Cfg9346_Lock);
5426
5427 RTL_W8(MaxTxPacketSize, TxPacketMax);
5428
5429 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5430
5431 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5432 RTL_W16(CPlusCmd, tp->cp_cmd);
5433
5434 RTL_W16(IntrMitigate, 0x0000);
5435
5436 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5437
5438 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5439 rtl_set_rx_tx_config_registers(tp);
5440
5441 RTL_R8(IntrMask);
5442
5443 rtl_set_rx_mode(dev);
5444
5445 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5446 }
5447
5448 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5449 {
5450 struct rtl8169_private *tp = netdev_priv(dev);
5451
5452 if (new_mtu < ETH_ZLEN ||
5453 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5454 return -EINVAL;
5455
5456 if (new_mtu > ETH_DATA_LEN)
5457 rtl_hw_jumbo_enable(tp);
5458 else
5459 rtl_hw_jumbo_disable(tp);
5460
5461 dev->mtu = new_mtu;
5462 netdev_update_features(dev);
5463
5464 return 0;
5465 }
5466
5467 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5468 {
5469 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5470 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5471 }
5472
5473 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5474 void **data_buff, struct RxDesc *desc)
5475 {
5476 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5477 DMA_FROM_DEVICE);
5478
5479 kfree(*data_buff);
5480 *data_buff = NULL;
5481 rtl8169_make_unusable_by_asic(desc);
5482 }
5483
5484 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5485 {
5486 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5487
5488 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5489 }
5490
5491 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5492 u32 rx_buf_sz)
5493 {
5494 desc->addr = cpu_to_le64(mapping);
5495 wmb();
5496 rtl8169_mark_to_asic(desc, rx_buf_sz);
5497 }
5498
5499 static inline void *rtl8169_align(void *data)
5500 {
5501 return (void *)ALIGN((long)data, 16);
5502 }
5503
5504 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5505 struct RxDesc *desc)
5506 {
5507 void *data;
5508 dma_addr_t mapping;
5509 struct device *d = &tp->pci_dev->dev;
5510 struct net_device *dev = tp->dev;
5511 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5512
5513 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5514 if (!data)
5515 return NULL;
5516
5517 if (rtl8169_align(data) != data) {
5518 kfree(data);
5519 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5520 if (!data)
5521 return NULL;
5522 }
5523
5524 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5525 DMA_FROM_DEVICE);
5526 if (unlikely(dma_mapping_error(d, mapping))) {
5527 if (net_ratelimit())
5528 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5529 goto err_out;
5530 }
5531
5532 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5533 return data;
5534
5535 err_out:
5536 kfree(data);
5537 return NULL;
5538 }
5539
5540 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5541 {
5542 unsigned int i;
5543
5544 for (i = 0; i < NUM_RX_DESC; i++) {
5545 if (tp->Rx_databuff[i]) {
5546 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5547 tp->RxDescArray + i);
5548 }
5549 }
5550 }
5551
5552 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5553 {
5554 desc->opts1 |= cpu_to_le32(RingEnd);
5555 }
5556
5557 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5558 {
5559 unsigned int i;
5560
5561 for (i = 0; i < NUM_RX_DESC; i++) {
5562 void *data;
5563
5564 if (tp->Rx_databuff[i])
5565 continue;
5566
5567 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5568 if (!data) {
5569 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5570 goto err_out;
5571 }
5572 tp->Rx_databuff[i] = data;
5573 }
5574
5575 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5576 return 0;
5577
5578 err_out:
5579 rtl8169_rx_clear(tp);
5580 return -ENOMEM;
5581 }
5582
5583 static int rtl8169_init_ring(struct net_device *dev)
5584 {
5585 struct rtl8169_private *tp = netdev_priv(dev);
5586
5587 rtl8169_init_ring_indexes(tp);
5588
5589 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5590 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5591
5592 return rtl8169_rx_fill(tp);
5593 }
5594
5595 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5596 struct TxDesc *desc)
5597 {
5598 unsigned int len = tx_skb->len;
5599
5600 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5601
5602 desc->opts1 = 0x00;
5603 desc->opts2 = 0x00;
5604 desc->addr = 0x00;
5605 tx_skb->len = 0;
5606 }
5607
5608 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5609 unsigned int n)
5610 {
5611 unsigned int i;
5612
5613 for (i = 0; i < n; i++) {
5614 unsigned int entry = (start + i) % NUM_TX_DESC;
5615 struct ring_info *tx_skb = tp->tx_skb + entry;
5616 unsigned int len = tx_skb->len;
5617
5618 if (len) {
5619 struct sk_buff *skb = tx_skb->skb;
5620
5621 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5622 tp->TxDescArray + entry);
5623 if (skb) {
5624 tp->dev->stats.tx_dropped++;
5625 dev_kfree_skb(skb);
5626 tx_skb->skb = NULL;
5627 }
5628 }
5629 }
5630 }
5631
5632 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5633 {
5634 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5635 tp->cur_tx = tp->dirty_tx = 0;
5636 }
5637
5638 static void rtl_reset_work(struct rtl8169_private *tp)
5639 {
5640 struct net_device *dev = tp->dev;
5641 int i;
5642
5643 napi_disable(&tp->napi);
5644 netif_stop_queue(dev);
5645 synchronize_sched();
5646
5647 rtl8169_hw_reset(tp);
5648
5649 for (i = 0; i < NUM_RX_DESC; i++)
5650 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5651
5652 rtl8169_tx_clear(tp);
5653 rtl8169_init_ring_indexes(tp);
5654
5655 napi_enable(&tp->napi);
5656 rtl_hw_start(dev);
5657 netif_wake_queue(dev);
5658 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5659 }
5660
5661 static void rtl8169_tx_timeout(struct net_device *dev)
5662 {
5663 struct rtl8169_private *tp = netdev_priv(dev);
5664
5665 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5666 }
5667
5668 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5669 u32 *opts)
5670 {
5671 struct skb_shared_info *info = skb_shinfo(skb);
5672 unsigned int cur_frag, entry;
5673 struct TxDesc * uninitialized_var(txd);
5674 struct device *d = &tp->pci_dev->dev;
5675
5676 entry = tp->cur_tx;
5677 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5678 const skb_frag_t *frag = info->frags + cur_frag;
5679 dma_addr_t mapping;
5680 u32 status, len;
5681 void *addr;
5682
5683 entry = (entry + 1) % NUM_TX_DESC;
5684
5685 txd = tp->TxDescArray + entry;
5686 len = skb_frag_size(frag);
5687 addr = skb_frag_address(frag);
5688 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5689 if (unlikely(dma_mapping_error(d, mapping))) {
5690 if (net_ratelimit())
5691 netif_err(tp, drv, tp->dev,
5692 "Failed to map TX fragments DMA!\n");
5693 goto err_out;
5694 }
5695
5696 /* Anti gcc 2.95.3 bugware (sic) */
5697 status = opts[0] | len |
5698 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5699
5700 txd->opts1 = cpu_to_le32(status);
5701 txd->opts2 = cpu_to_le32(opts[1]);
5702 txd->addr = cpu_to_le64(mapping);
5703
5704 tp->tx_skb[entry].len = len;
5705 }
5706
5707 if (cur_frag) {
5708 tp->tx_skb[entry].skb = skb;
5709 txd->opts1 |= cpu_to_le32(LastFrag);
5710 }
5711
5712 return cur_frag;
5713
5714 err_out:
5715 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5716 return -EIO;
5717 }
5718
5719 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5720 struct sk_buff *skb, u32 *opts)
5721 {
5722 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5723 u32 mss = skb_shinfo(skb)->gso_size;
5724 int offset = info->opts_offset;
5725
5726 if (mss) {
5727 opts[0] |= TD_LSO;
5728 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5729 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5730 const struct iphdr *ip = ip_hdr(skb);
5731
5732 if (ip->protocol == IPPROTO_TCP)
5733 opts[offset] |= info->checksum.tcp;
5734 else if (ip->protocol == IPPROTO_UDP)
5735 opts[offset] |= info->checksum.udp;
5736 else
5737 WARN_ON_ONCE(1);
5738 }
5739 }
5740
5741 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5742 struct net_device *dev)
5743 {
5744 struct rtl8169_private *tp = netdev_priv(dev);
5745 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5746 struct TxDesc *txd = tp->TxDescArray + entry;
5747 void __iomem *ioaddr = tp->mmio_addr;
5748 struct device *d = &tp->pci_dev->dev;
5749 dma_addr_t mapping;
5750 u32 status, len;
5751 u32 opts[2];
5752 int frags;
5753
5754 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5755 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5756 goto err_stop_0;
5757 }
5758
5759 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5760 goto err_stop_0;
5761
5762 len = skb_headlen(skb);
5763 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5764 if (unlikely(dma_mapping_error(d, mapping))) {
5765 if (net_ratelimit())
5766 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5767 goto err_dma_0;
5768 }
5769
5770 tp->tx_skb[entry].len = len;
5771 txd->addr = cpu_to_le64(mapping);
5772
5773 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5774 opts[0] = DescOwn;
5775
5776 rtl8169_tso_csum(tp, skb, opts);
5777
5778 frags = rtl8169_xmit_frags(tp, skb, opts);
5779 if (frags < 0)
5780 goto err_dma_1;
5781 else if (frags)
5782 opts[0] |= FirstFrag;
5783 else {
5784 opts[0] |= FirstFrag | LastFrag;
5785 tp->tx_skb[entry].skb = skb;
5786 }
5787
5788 txd->opts2 = cpu_to_le32(opts[1]);
5789
5790 skb_tx_timestamp(skb);
5791
5792 wmb();
5793
5794 /* Anti gcc 2.95.3 bugware (sic) */
5795 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5796 txd->opts1 = cpu_to_le32(status);
5797
5798 tp->cur_tx += frags + 1;
5799
5800 wmb();
5801
5802 RTL_W8(TxPoll, NPQ);
5803
5804 mmiowb();
5805
5806 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5807 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5808 * not miss a ring update when it notices a stopped queue.
5809 */
5810 smp_wmb();
5811 netif_stop_queue(dev);
5812 /* Sync with rtl_tx:
5813 * - publish queue status and cur_tx ring index (write barrier)
5814 * - refresh dirty_tx ring index (read barrier).
5815 * May the current thread have a pessimistic view of the ring
5816 * status and forget to wake up queue, a racing rtl_tx thread
5817 * can't.
5818 */
5819 smp_mb();
5820 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5821 netif_wake_queue(dev);
5822 }
5823
5824 return NETDEV_TX_OK;
5825
5826 err_dma_1:
5827 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5828 err_dma_0:
5829 dev_kfree_skb(skb);
5830 dev->stats.tx_dropped++;
5831 return NETDEV_TX_OK;
5832
5833 err_stop_0:
5834 netif_stop_queue(dev);
5835 dev->stats.tx_dropped++;
5836 return NETDEV_TX_BUSY;
5837 }
5838
5839 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5840 {
5841 struct rtl8169_private *tp = netdev_priv(dev);
5842 struct pci_dev *pdev = tp->pci_dev;
5843 u16 pci_status, pci_cmd;
5844
5845 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5846 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5847
5848 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5849 pci_cmd, pci_status);
5850
5851 /*
5852 * The recovery sequence below admits a very elaborated explanation:
5853 * - it seems to work;
5854 * - I did not see what else could be done;
5855 * - it makes iop3xx happy.
5856 *
5857 * Feel free to adjust to your needs.
5858 */
5859 if (pdev->broken_parity_status)
5860 pci_cmd &= ~PCI_COMMAND_PARITY;
5861 else
5862 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5863
5864 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5865
5866 pci_write_config_word(pdev, PCI_STATUS,
5867 pci_status & (PCI_STATUS_DETECTED_PARITY |
5868 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5869 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5870
5871 /* The infamous DAC f*ckup only happens at boot time */
5872 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
5873 void __iomem *ioaddr = tp->mmio_addr;
5874
5875 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5876 tp->cp_cmd &= ~PCIDAC;
5877 RTL_W16(CPlusCmd, tp->cp_cmd);
5878 dev->features &= ~NETIF_F_HIGHDMA;
5879 }
5880
5881 rtl8169_hw_reset(tp);
5882
5883 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5884 }
5885
5886 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5887 {
5888 unsigned int dirty_tx, tx_left;
5889
5890 dirty_tx = tp->dirty_tx;
5891 smp_rmb();
5892 tx_left = tp->cur_tx - dirty_tx;
5893
5894 while (tx_left > 0) {
5895 unsigned int entry = dirty_tx % NUM_TX_DESC;
5896 struct ring_info *tx_skb = tp->tx_skb + entry;
5897 u32 status;
5898
5899 rmb();
5900 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5901 if (status & DescOwn)
5902 break;
5903
5904 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5905 tp->TxDescArray + entry);
5906 if (status & LastFrag) {
5907 u64_stats_update_begin(&tp->tx_stats.syncp);
5908 tp->tx_stats.packets++;
5909 tp->tx_stats.bytes += tx_skb->skb->len;
5910 u64_stats_update_end(&tp->tx_stats.syncp);
5911 dev_kfree_skb(tx_skb->skb);
5912 tx_skb->skb = NULL;
5913 }
5914 dirty_tx++;
5915 tx_left--;
5916 }
5917
5918 if (tp->dirty_tx != dirty_tx) {
5919 tp->dirty_tx = dirty_tx;
5920 /* Sync with rtl8169_start_xmit:
5921 * - publish dirty_tx ring index (write barrier)
5922 * - refresh cur_tx ring index and queue status (read barrier)
5923 * May the current thread miss the stopped queue condition,
5924 * a racing xmit thread can only have a right view of the
5925 * ring status.
5926 */
5927 smp_mb();
5928 if (netif_queue_stopped(dev) &&
5929 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5930 netif_wake_queue(dev);
5931 }
5932 /*
5933 * 8168 hack: TxPoll requests are lost when the Tx packets are
5934 * too close. Let's kick an extra TxPoll request when a burst
5935 * of start_xmit activity is detected (if it is not detected,
5936 * it is slow enough). -- FR
5937 */
5938 if (tp->cur_tx != dirty_tx) {
5939 void __iomem *ioaddr = tp->mmio_addr;
5940
5941 RTL_W8(TxPoll, NPQ);
5942 }
5943 }
5944 }
5945
5946 static inline int rtl8169_fragmented_frame(u32 status)
5947 {
5948 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5949 }
5950
5951 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5952 {
5953 u32 status = opts1 & RxProtoMask;
5954
5955 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5956 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5957 skb->ip_summed = CHECKSUM_UNNECESSARY;
5958 else
5959 skb_checksum_none_assert(skb);
5960 }
5961
5962 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5963 struct rtl8169_private *tp,
5964 int pkt_size,
5965 dma_addr_t addr)
5966 {
5967 struct sk_buff *skb;
5968 struct device *d = &tp->pci_dev->dev;
5969
5970 data = rtl8169_align(data);
5971 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5972 prefetch(data);
5973 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5974 if (skb)
5975 memcpy(skb->data, data, pkt_size);
5976 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5977
5978 return skb;
5979 }
5980
5981 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5982 {
5983 unsigned int cur_rx, rx_left;
5984 unsigned int count;
5985
5986 cur_rx = tp->cur_rx;
5987
5988 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
5989 unsigned int entry = cur_rx % NUM_RX_DESC;
5990 struct RxDesc *desc = tp->RxDescArray + entry;
5991 u32 status;
5992
5993 rmb();
5994 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5995
5996 if (status & DescOwn)
5997 break;
5998 if (unlikely(status & RxRES)) {
5999 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6000 status);
6001 dev->stats.rx_errors++;
6002 if (status & (RxRWT | RxRUNT))
6003 dev->stats.rx_length_errors++;
6004 if (status & RxCRC)
6005 dev->stats.rx_crc_errors++;
6006 if (status & RxFOVF) {
6007 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6008 dev->stats.rx_fifo_errors++;
6009 }
6010 if ((status & (RxRUNT | RxCRC)) &&
6011 !(status & (RxRWT | RxFOVF)) &&
6012 (dev->features & NETIF_F_RXALL))
6013 goto process_pkt;
6014 } else {
6015 struct sk_buff *skb;
6016 dma_addr_t addr;
6017 int pkt_size;
6018
6019 process_pkt:
6020 addr = le64_to_cpu(desc->addr);
6021 if (likely(!(dev->features & NETIF_F_RXFCS)))
6022 pkt_size = (status & 0x00003fff) - 4;
6023 else
6024 pkt_size = status & 0x00003fff;
6025
6026 /*
6027 * The driver does not support incoming fragmented
6028 * frames. They are seen as a symptom of over-mtu
6029 * sized frames.
6030 */
6031 if (unlikely(rtl8169_fragmented_frame(status))) {
6032 dev->stats.rx_dropped++;
6033 dev->stats.rx_length_errors++;
6034 goto release_descriptor;
6035 }
6036
6037 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6038 tp, pkt_size, addr);
6039 if (!skb) {
6040 dev->stats.rx_dropped++;
6041 goto release_descriptor;
6042 }
6043
6044 rtl8169_rx_csum(skb, status);
6045 skb_put(skb, pkt_size);
6046 skb->protocol = eth_type_trans(skb, dev);
6047
6048 rtl8169_rx_vlan_tag(desc, skb);
6049
6050 napi_gro_receive(&tp->napi, skb);
6051
6052 u64_stats_update_begin(&tp->rx_stats.syncp);
6053 tp->rx_stats.packets++;
6054 tp->rx_stats.bytes += pkt_size;
6055 u64_stats_update_end(&tp->rx_stats.syncp);
6056 }
6057 release_descriptor:
6058 desc->opts2 = 0;
6059 wmb();
6060 rtl8169_mark_to_asic(desc, rx_buf_sz);
6061 }
6062
6063 count = cur_rx - tp->cur_rx;
6064 tp->cur_rx = cur_rx;
6065
6066 return count;
6067 }
6068
6069 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6070 {
6071 struct net_device *dev = dev_instance;
6072 struct rtl8169_private *tp = netdev_priv(dev);
6073 int handled = 0;
6074 u16 status;
6075
6076 status = rtl_get_events(tp);
6077 if (status && status != 0xffff) {
6078 status &= RTL_EVENT_NAPI | tp->event_slow;
6079 if (status) {
6080 handled = 1;
6081
6082 rtl_irq_disable(tp);
6083 napi_schedule(&tp->napi);
6084 }
6085 }
6086 return IRQ_RETVAL(handled);
6087 }
6088
6089 /*
6090 * Workqueue context.
6091 */
6092 static void rtl_slow_event_work(struct rtl8169_private *tp)
6093 {
6094 struct net_device *dev = tp->dev;
6095 u16 status;
6096
6097 status = rtl_get_events(tp) & tp->event_slow;
6098 rtl_ack_events(tp, status);
6099
6100 if (unlikely(status & RxFIFOOver)) {
6101 switch (tp->mac_version) {
6102 /* Work around for rx fifo overflow */
6103 case RTL_GIGA_MAC_VER_11:
6104 netif_stop_queue(dev);
6105 /* XXX - Hack alert. See rtl_task(). */
6106 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6107 default:
6108 break;
6109 }
6110 }
6111
6112 if (unlikely(status & SYSErr))
6113 rtl8169_pcierr_interrupt(dev);
6114
6115 if (status & LinkChg)
6116 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6117
6118 rtl_irq_enable_all(tp);
6119 }
6120
6121 static void rtl_task(struct work_struct *work)
6122 {
6123 static const struct {
6124 int bitnr;
6125 void (*action)(struct rtl8169_private *);
6126 } rtl_work[] = {
6127 /* XXX - keep rtl_slow_event_work() as first element. */
6128 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6129 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6130 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6131 };
6132 struct rtl8169_private *tp =
6133 container_of(work, struct rtl8169_private, wk.work);
6134 struct net_device *dev = tp->dev;
6135 int i;
6136
6137 rtl_lock_work(tp);
6138
6139 if (!netif_running(dev) ||
6140 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6141 goto out_unlock;
6142
6143 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6144 bool pending;
6145
6146 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6147 if (pending)
6148 rtl_work[i].action(tp);
6149 }
6150
6151 out_unlock:
6152 rtl_unlock_work(tp);
6153 }
6154
6155 static int rtl8169_poll(struct napi_struct *napi, int budget)
6156 {
6157 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6158 struct net_device *dev = tp->dev;
6159 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6160 int work_done= 0;
6161 u16 status;
6162
6163 status = rtl_get_events(tp);
6164 rtl_ack_events(tp, status & ~tp->event_slow);
6165
6166 if (status & RTL_EVENT_NAPI_RX)
6167 work_done = rtl_rx(dev, tp, (u32) budget);
6168
6169 if (status & RTL_EVENT_NAPI_TX)
6170 rtl_tx(dev, tp);
6171
6172 if (status & tp->event_slow) {
6173 enable_mask &= ~tp->event_slow;
6174
6175 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6176 }
6177
6178 if (work_done < budget) {
6179 napi_complete(napi);
6180
6181 rtl_irq_enable(tp, enable_mask);
6182 mmiowb();
6183 }
6184
6185 return work_done;
6186 }
6187
6188 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6189 {
6190 struct rtl8169_private *tp = netdev_priv(dev);
6191
6192 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6193 return;
6194
6195 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6196 RTL_W32(RxMissed, 0);
6197 }
6198
6199 static void rtl8169_down(struct net_device *dev)
6200 {
6201 struct rtl8169_private *tp = netdev_priv(dev);
6202 void __iomem *ioaddr = tp->mmio_addr;
6203
6204 del_timer_sync(&tp->timer);
6205
6206 napi_disable(&tp->napi);
6207 netif_stop_queue(dev);
6208
6209 rtl8169_hw_reset(tp);
6210 /*
6211 * At this point device interrupts can not be enabled in any function,
6212 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6213 * and napi is disabled (rtl8169_poll).
6214 */
6215 rtl8169_rx_missed(dev, ioaddr);
6216
6217 /* Give a racing hard_start_xmit a few cycles to complete. */
6218 synchronize_sched();
6219
6220 rtl8169_tx_clear(tp);
6221
6222 rtl8169_rx_clear(tp);
6223
6224 rtl_pll_power_down(tp);
6225 }
6226
6227 static int rtl8169_close(struct net_device *dev)
6228 {
6229 struct rtl8169_private *tp = netdev_priv(dev);
6230 struct pci_dev *pdev = tp->pci_dev;
6231
6232 pm_runtime_get_sync(&pdev->dev);
6233
6234 /* Update counters before going down */
6235 rtl8169_update_counters(dev);
6236
6237 rtl_lock_work(tp);
6238 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6239
6240 rtl8169_down(dev);
6241 rtl_unlock_work(tp);
6242
6243 free_irq(pdev->irq, dev);
6244
6245 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6246 tp->RxPhyAddr);
6247 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6248 tp->TxPhyAddr);
6249 tp->TxDescArray = NULL;
6250 tp->RxDescArray = NULL;
6251
6252 pm_runtime_put_sync(&pdev->dev);
6253
6254 return 0;
6255 }
6256
6257 #ifdef CONFIG_NET_POLL_CONTROLLER
6258 static void rtl8169_netpoll(struct net_device *dev)
6259 {
6260 struct rtl8169_private *tp = netdev_priv(dev);
6261
6262 rtl8169_interrupt(tp->pci_dev->irq, dev);
6263 }
6264 #endif
6265
6266 static int rtl_open(struct net_device *dev)
6267 {
6268 struct rtl8169_private *tp = netdev_priv(dev);
6269 void __iomem *ioaddr = tp->mmio_addr;
6270 struct pci_dev *pdev = tp->pci_dev;
6271 int retval = -ENOMEM;
6272
6273 pm_runtime_get_sync(&pdev->dev);
6274
6275 /*
6276 * Rx and Tx descriptors needs 256 bytes alignment.
6277 * dma_alloc_coherent provides more.
6278 */
6279 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6280 &tp->TxPhyAddr, GFP_KERNEL);
6281 if (!tp->TxDescArray)
6282 goto err_pm_runtime_put;
6283
6284 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6285 &tp->RxPhyAddr, GFP_KERNEL);
6286 if (!tp->RxDescArray)
6287 goto err_free_tx_0;
6288
6289 retval = rtl8169_init_ring(dev);
6290 if (retval < 0)
6291 goto err_free_rx_1;
6292
6293 INIT_WORK(&tp->wk.work, rtl_task);
6294
6295 smp_mb();
6296
6297 rtl_request_firmware(tp);
6298
6299 retval = request_irq(pdev->irq, rtl8169_interrupt,
6300 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6301 dev->name, dev);
6302 if (retval < 0)
6303 goto err_release_fw_2;
6304
6305 rtl_lock_work(tp);
6306
6307 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6308
6309 napi_enable(&tp->napi);
6310
6311 rtl8169_init_phy(dev, tp);
6312
6313 __rtl8169_set_features(dev, dev->features);
6314
6315 rtl_pll_power_up(tp);
6316
6317 rtl_hw_start(dev);
6318
6319 netif_start_queue(dev);
6320
6321 rtl_unlock_work(tp);
6322
6323 tp->saved_wolopts = 0;
6324 pm_runtime_put_noidle(&pdev->dev);
6325
6326 rtl8169_check_link_status(dev, tp, ioaddr);
6327 out:
6328 return retval;
6329
6330 err_release_fw_2:
6331 rtl_release_firmware(tp);
6332 rtl8169_rx_clear(tp);
6333 err_free_rx_1:
6334 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6335 tp->RxPhyAddr);
6336 tp->RxDescArray = NULL;
6337 err_free_tx_0:
6338 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6339 tp->TxPhyAddr);
6340 tp->TxDescArray = NULL;
6341 err_pm_runtime_put:
6342 pm_runtime_put_noidle(&pdev->dev);
6343 goto out;
6344 }
6345
6346 static struct rtnl_link_stats64 *
6347 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6348 {
6349 struct rtl8169_private *tp = netdev_priv(dev);
6350 void __iomem *ioaddr = tp->mmio_addr;
6351 unsigned int start;
6352
6353 if (netif_running(dev))
6354 rtl8169_rx_missed(dev, ioaddr);
6355
6356 do {
6357 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6358 stats->rx_packets = tp->rx_stats.packets;
6359 stats->rx_bytes = tp->rx_stats.bytes;
6360 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6361
6362
6363 do {
6364 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6365 stats->tx_packets = tp->tx_stats.packets;
6366 stats->tx_bytes = tp->tx_stats.bytes;
6367 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6368
6369 stats->rx_dropped = dev->stats.rx_dropped;
6370 stats->tx_dropped = dev->stats.tx_dropped;
6371 stats->rx_length_errors = dev->stats.rx_length_errors;
6372 stats->rx_errors = dev->stats.rx_errors;
6373 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6374 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6375 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6376
6377 return stats;
6378 }
6379
6380 static void rtl8169_net_suspend(struct net_device *dev)
6381 {
6382 struct rtl8169_private *tp = netdev_priv(dev);
6383
6384 if (!netif_running(dev))
6385 return;
6386
6387 netif_device_detach(dev);
6388 netif_stop_queue(dev);
6389
6390 rtl_lock_work(tp);
6391 napi_disable(&tp->napi);
6392 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6393 rtl_unlock_work(tp);
6394
6395 rtl_pll_power_down(tp);
6396 }
6397
6398 #ifdef CONFIG_PM
6399
6400 static int rtl8169_suspend(struct device *device)
6401 {
6402 struct pci_dev *pdev = to_pci_dev(device);
6403 struct net_device *dev = pci_get_drvdata(pdev);
6404
6405 rtl8169_net_suspend(dev);
6406
6407 return 0;
6408 }
6409
6410 static void __rtl8169_resume(struct net_device *dev)
6411 {
6412 struct rtl8169_private *tp = netdev_priv(dev);
6413
6414 netif_device_attach(dev);
6415
6416 rtl_pll_power_up(tp);
6417
6418 rtl_lock_work(tp);
6419 napi_enable(&tp->napi);
6420 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6421 rtl_unlock_work(tp);
6422
6423 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6424 }
6425
6426 static int rtl8169_resume(struct device *device)
6427 {
6428 struct pci_dev *pdev = to_pci_dev(device);
6429 struct net_device *dev = pci_get_drvdata(pdev);
6430 struct rtl8169_private *tp = netdev_priv(dev);
6431
6432 rtl8169_init_phy(dev, tp);
6433
6434 if (netif_running(dev))
6435 __rtl8169_resume(dev);
6436
6437 return 0;
6438 }
6439
6440 static int rtl8169_runtime_suspend(struct device *device)
6441 {
6442 struct pci_dev *pdev = to_pci_dev(device);
6443 struct net_device *dev = pci_get_drvdata(pdev);
6444 struct rtl8169_private *tp = netdev_priv(dev);
6445
6446 if (!tp->TxDescArray)
6447 return 0;
6448
6449 rtl_lock_work(tp);
6450 tp->saved_wolopts = __rtl8169_get_wol(tp);
6451 __rtl8169_set_wol(tp, WAKE_ANY);
6452 rtl_unlock_work(tp);
6453
6454 rtl8169_net_suspend(dev);
6455
6456 return 0;
6457 }
6458
6459 static int rtl8169_runtime_resume(struct device *device)
6460 {
6461 struct pci_dev *pdev = to_pci_dev(device);
6462 struct net_device *dev = pci_get_drvdata(pdev);
6463 struct rtl8169_private *tp = netdev_priv(dev);
6464
6465 if (!tp->TxDescArray)
6466 return 0;
6467
6468 rtl_lock_work(tp);
6469 __rtl8169_set_wol(tp, tp->saved_wolopts);
6470 tp->saved_wolopts = 0;
6471 rtl_unlock_work(tp);
6472
6473 rtl8169_init_phy(dev, tp);
6474
6475 __rtl8169_resume(dev);
6476
6477 return 0;
6478 }
6479
6480 static int rtl8169_runtime_idle(struct device *device)
6481 {
6482 struct pci_dev *pdev = to_pci_dev(device);
6483 struct net_device *dev = pci_get_drvdata(pdev);
6484 struct rtl8169_private *tp = netdev_priv(dev);
6485
6486 return tp->TxDescArray ? -EBUSY : 0;
6487 }
6488
6489 static const struct dev_pm_ops rtl8169_pm_ops = {
6490 .suspend = rtl8169_suspend,
6491 .resume = rtl8169_resume,
6492 .freeze = rtl8169_suspend,
6493 .thaw = rtl8169_resume,
6494 .poweroff = rtl8169_suspend,
6495 .restore = rtl8169_resume,
6496 .runtime_suspend = rtl8169_runtime_suspend,
6497 .runtime_resume = rtl8169_runtime_resume,
6498 .runtime_idle = rtl8169_runtime_idle,
6499 };
6500
6501 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6502
6503 #else /* !CONFIG_PM */
6504
6505 #define RTL8169_PM_OPS NULL
6506
6507 #endif /* !CONFIG_PM */
6508
6509 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6510 {
6511 void __iomem *ioaddr = tp->mmio_addr;
6512
6513 /* WoL fails with 8168b when the receiver is disabled. */
6514 switch (tp->mac_version) {
6515 case RTL_GIGA_MAC_VER_11:
6516 case RTL_GIGA_MAC_VER_12:
6517 case RTL_GIGA_MAC_VER_17:
6518 pci_clear_master(tp->pci_dev);
6519
6520 RTL_W8(ChipCmd, CmdRxEnb);
6521 /* PCI commit */
6522 RTL_R8(ChipCmd);
6523 break;
6524 default:
6525 break;
6526 }
6527 }
6528
6529 static void rtl_shutdown(struct pci_dev *pdev)
6530 {
6531 struct net_device *dev = pci_get_drvdata(pdev);
6532 struct rtl8169_private *tp = netdev_priv(dev);
6533 struct device *d = &pdev->dev;
6534
6535 pm_runtime_get_sync(d);
6536
6537 rtl8169_net_suspend(dev);
6538
6539 /* Restore original MAC address */
6540 rtl_rar_set(tp, dev->perm_addr);
6541
6542 rtl8169_hw_reset(tp);
6543
6544 if (system_state == SYSTEM_POWER_OFF) {
6545 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6546 rtl_wol_suspend_quirk(tp);
6547 rtl_wol_shutdown_quirk(tp);
6548 }
6549
6550 pci_wake_from_d3(pdev, true);
6551 pci_set_power_state(pdev, PCI_D3hot);
6552 }
6553
6554 pm_runtime_put_noidle(d);
6555 }
6556
6557 static void rtl_remove_one(struct pci_dev *pdev)
6558 {
6559 struct net_device *dev = pci_get_drvdata(pdev);
6560 struct rtl8169_private *tp = netdev_priv(dev);
6561
6562 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6563 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6564 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6565 rtl8168_driver_stop(tp);
6566 }
6567
6568 cancel_work_sync(&tp->wk.work);
6569
6570 netif_napi_del(&tp->napi);
6571
6572 unregister_netdev(dev);
6573
6574 rtl_release_firmware(tp);
6575
6576 if (pci_dev_run_wake(pdev))
6577 pm_runtime_get_noresume(&pdev->dev);
6578
6579 /* restore original MAC address */
6580 rtl_rar_set(tp, dev->perm_addr);
6581
6582 rtl_disable_msi(pdev, tp);
6583 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6584 pci_set_drvdata(pdev, NULL);
6585 }
6586
6587 static const struct net_device_ops rtl_netdev_ops = {
6588 .ndo_open = rtl_open,
6589 .ndo_stop = rtl8169_close,
6590 .ndo_get_stats64 = rtl8169_get_stats64,
6591 .ndo_start_xmit = rtl8169_start_xmit,
6592 .ndo_tx_timeout = rtl8169_tx_timeout,
6593 .ndo_validate_addr = eth_validate_addr,
6594 .ndo_change_mtu = rtl8169_change_mtu,
6595 .ndo_fix_features = rtl8169_fix_features,
6596 .ndo_set_features = rtl8169_set_features,
6597 .ndo_set_mac_address = rtl_set_mac_address,
6598 .ndo_do_ioctl = rtl8169_ioctl,
6599 .ndo_set_rx_mode = rtl_set_rx_mode,
6600 #ifdef CONFIG_NET_POLL_CONTROLLER
6601 .ndo_poll_controller = rtl8169_netpoll,
6602 #endif
6603
6604 };
6605
6606 static const struct rtl_cfg_info {
6607 void (*hw_start)(struct net_device *);
6608 unsigned int region;
6609 unsigned int align;
6610 u16 event_slow;
6611 unsigned features;
6612 u8 default_ver;
6613 } rtl_cfg_infos [] = {
6614 [RTL_CFG_0] = {
6615 .hw_start = rtl_hw_start_8169,
6616 .region = 1,
6617 .align = 0,
6618 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6619 .features = RTL_FEATURE_GMII,
6620 .default_ver = RTL_GIGA_MAC_VER_01,
6621 },
6622 [RTL_CFG_1] = {
6623 .hw_start = rtl_hw_start_8168,
6624 .region = 2,
6625 .align = 8,
6626 .event_slow = SYSErr | LinkChg | RxOverflow,
6627 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6628 .default_ver = RTL_GIGA_MAC_VER_11,
6629 },
6630 [RTL_CFG_2] = {
6631 .hw_start = rtl_hw_start_8101,
6632 .region = 2,
6633 .align = 8,
6634 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6635 PCSTimeout,
6636 .features = RTL_FEATURE_MSI,
6637 .default_ver = RTL_GIGA_MAC_VER_13,
6638 }
6639 };
6640
6641 /* Cfg9346_Unlock assumed. */
6642 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6643 const struct rtl_cfg_info *cfg)
6644 {
6645 void __iomem *ioaddr = tp->mmio_addr;
6646 unsigned msi = 0;
6647 u8 cfg2;
6648
6649 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6650 if (cfg->features & RTL_FEATURE_MSI) {
6651 if (pci_enable_msi(tp->pci_dev)) {
6652 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6653 } else {
6654 cfg2 |= MSIEnable;
6655 msi = RTL_FEATURE_MSI;
6656 }
6657 }
6658 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6659 RTL_W8(Config2, cfg2);
6660 return msi;
6661 }
6662
6663 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6664 {
6665 void __iomem *ioaddr = tp->mmio_addr;
6666
6667 return RTL_R8(MCU) & LINK_LIST_RDY;
6668 }
6669
6670 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6671 {
6672 void __iomem *ioaddr = tp->mmio_addr;
6673
6674 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6675 }
6676
6677 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6678 {
6679 void __iomem *ioaddr = tp->mmio_addr;
6680 u32 data;
6681
6682 tp->ocp_base = OCP_STD_PHY_BASE;
6683
6684 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6685
6686 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6687 return;
6688
6689 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6690 return;
6691
6692 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6693 msleep(1);
6694 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6695
6696 data = r8168_mac_ocp_read(tp, 0xe8de);
6697 data &= ~(1 << 14);
6698 r8168_mac_ocp_write(tp, 0xe8de, data);
6699
6700 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6701 return;
6702
6703 data = r8168_mac_ocp_read(tp, 0xe8de);
6704 data |= (1 << 15);
6705 r8168_mac_ocp_write(tp, 0xe8de, data);
6706
6707 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6708 return;
6709 }
6710
6711 static void rtl_hw_initialize(struct rtl8169_private *tp)
6712 {
6713 switch (tp->mac_version) {
6714 case RTL_GIGA_MAC_VER_40:
6715 case RTL_GIGA_MAC_VER_41:
6716 rtl_hw_init_8168g(tp);
6717 break;
6718
6719 default:
6720 break;
6721 }
6722 }
6723
6724 static int
6725 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6726 {
6727 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6728 const unsigned int region = cfg->region;
6729 struct rtl8169_private *tp;
6730 struct mii_if_info *mii;
6731 struct net_device *dev;
6732 void __iomem *ioaddr;
6733 int chipset, i;
6734 int rc;
6735
6736 if (netif_msg_drv(&debug)) {
6737 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6738 MODULENAME, RTL8169_VERSION);
6739 }
6740
6741 dev = alloc_etherdev(sizeof (*tp));
6742 if (!dev) {
6743 rc = -ENOMEM;
6744 goto out;
6745 }
6746
6747 SET_NETDEV_DEV(dev, &pdev->dev);
6748 dev->netdev_ops = &rtl_netdev_ops;
6749 tp = netdev_priv(dev);
6750 tp->dev = dev;
6751 tp->pci_dev = pdev;
6752 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6753
6754 mii = &tp->mii;
6755 mii->dev = dev;
6756 mii->mdio_read = rtl_mdio_read;
6757 mii->mdio_write = rtl_mdio_write;
6758 mii->phy_id_mask = 0x1f;
6759 mii->reg_num_mask = 0x1f;
6760 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6761
6762 /* disable ASPM completely as that cause random device stop working
6763 * problems as well as full system hangs for some PCIe devices users */
6764 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6765 PCIE_LINK_STATE_CLKPM);
6766
6767 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6768 rc = pci_enable_device(pdev);
6769 if (rc < 0) {
6770 netif_err(tp, probe, dev, "enable failure\n");
6771 goto err_out_free_dev_1;
6772 }
6773
6774 if (pci_set_mwi(pdev) < 0)
6775 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6776
6777 /* make sure PCI base addr 1 is MMIO */
6778 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6779 netif_err(tp, probe, dev,
6780 "region #%d not an MMIO resource, aborting\n",
6781 region);
6782 rc = -ENODEV;
6783 goto err_out_mwi_2;
6784 }
6785
6786 /* check for weird/broken PCI region reporting */
6787 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6788 netif_err(tp, probe, dev,
6789 "Invalid PCI region size(s), aborting\n");
6790 rc = -ENODEV;
6791 goto err_out_mwi_2;
6792 }
6793
6794 rc = pci_request_regions(pdev, MODULENAME);
6795 if (rc < 0) {
6796 netif_err(tp, probe, dev, "could not request regions\n");
6797 goto err_out_mwi_2;
6798 }
6799
6800 tp->cp_cmd = RxChkSum;
6801
6802 if ((sizeof(dma_addr_t) > 4) &&
6803 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6804 tp->cp_cmd |= PCIDAC;
6805 dev->features |= NETIF_F_HIGHDMA;
6806 } else {
6807 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6808 if (rc < 0) {
6809 netif_err(tp, probe, dev, "DMA configuration failed\n");
6810 goto err_out_free_res_3;
6811 }
6812 }
6813
6814 /* ioremap MMIO region */
6815 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6816 if (!ioaddr) {
6817 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6818 rc = -EIO;
6819 goto err_out_free_res_3;
6820 }
6821 tp->mmio_addr = ioaddr;
6822
6823 if (!pci_is_pcie(pdev))
6824 netif_info(tp, probe, dev, "not PCI Express\n");
6825
6826 /* Identify chip attached to board */
6827 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6828
6829 rtl_init_rxcfg(tp);
6830
6831 rtl_irq_disable(tp);
6832
6833 rtl_hw_initialize(tp);
6834
6835 rtl_hw_reset(tp);
6836
6837 rtl_ack_events(tp, 0xffff);
6838
6839 pci_set_master(pdev);
6840
6841 /*
6842 * Pretend we are using VLANs; This bypasses a nasty bug where
6843 * Interrupts stop flowing on high load on 8110SCd controllers.
6844 */
6845 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6846 tp->cp_cmd |= RxVlan;
6847
6848 rtl_init_mdio_ops(tp);
6849 rtl_init_pll_power_ops(tp);
6850 rtl_init_jumbo_ops(tp);
6851 rtl_init_csi_ops(tp);
6852
6853 rtl8169_print_mac_version(tp);
6854
6855 chipset = tp->mac_version;
6856 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6857
6858 RTL_W8(Cfg9346, Cfg9346_Unlock);
6859 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6860 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6861 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6862 tp->features |= RTL_FEATURE_WOL;
6863 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6864 tp->features |= RTL_FEATURE_WOL;
6865 tp->features |= rtl_try_msi(tp, cfg);
6866 RTL_W8(Cfg9346, Cfg9346_Lock);
6867
6868 if (rtl_tbi_enabled(tp)) {
6869 tp->set_speed = rtl8169_set_speed_tbi;
6870 tp->get_settings = rtl8169_gset_tbi;
6871 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6872 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6873 tp->link_ok = rtl8169_tbi_link_ok;
6874 tp->do_ioctl = rtl_tbi_ioctl;
6875 } else {
6876 tp->set_speed = rtl8169_set_speed_xmii;
6877 tp->get_settings = rtl8169_gset_xmii;
6878 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6879 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6880 tp->link_ok = rtl8169_xmii_link_ok;
6881 tp->do_ioctl = rtl_xmii_ioctl;
6882 }
6883
6884 mutex_init(&tp->wk.mutex);
6885
6886 /* Get MAC address */
6887 for (i = 0; i < ETH_ALEN; i++)
6888 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6889
6890 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6891 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6892
6893 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6894
6895 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6896 * properly for all devices */
6897 dev->features |= NETIF_F_RXCSUM |
6898 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6899
6900 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6901 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6902 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6903 NETIF_F_HIGHDMA;
6904
6905 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6906 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6907 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6908
6909 dev->hw_features |= NETIF_F_RXALL;
6910 dev->hw_features |= NETIF_F_RXFCS;
6911
6912 tp->hw_start = cfg->hw_start;
6913 tp->event_slow = cfg->event_slow;
6914
6915 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6916 ~(RxBOVF | RxFOVF) : ~0;
6917
6918 init_timer(&tp->timer);
6919 tp->timer.data = (unsigned long) dev;
6920 tp->timer.function = rtl8169_phy_timer;
6921
6922 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6923
6924 rc = register_netdev(dev);
6925 if (rc < 0)
6926 goto err_out_msi_4;
6927
6928 pci_set_drvdata(pdev, dev);
6929
6930 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6931 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6932 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6933 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6934 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6935 "tx checksumming: %s]\n",
6936 rtl_chip_infos[chipset].jumbo_max,
6937 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6938 }
6939
6940 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6941 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6942 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6943 rtl8168_driver_start(tp);
6944 }
6945
6946 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6947
6948 if (pci_dev_run_wake(pdev))
6949 pm_runtime_put_noidle(&pdev->dev);
6950
6951 netif_carrier_off(dev);
6952
6953 out:
6954 return rc;
6955
6956 err_out_msi_4:
6957 netif_napi_del(&tp->napi);
6958 rtl_disable_msi(pdev, tp);
6959 iounmap(ioaddr);
6960 err_out_free_res_3:
6961 pci_release_regions(pdev);
6962 err_out_mwi_2:
6963 pci_clear_mwi(pdev);
6964 pci_disable_device(pdev);
6965 err_out_free_dev_1:
6966 free_netdev(dev);
6967 goto out;
6968 }
6969
6970 static struct pci_driver rtl8169_pci_driver = {
6971 .name = MODULENAME,
6972 .id_table = rtl8169_pci_tbl,
6973 .probe = rtl_init_one,
6974 .remove = rtl_remove_one,
6975 .shutdown = rtl_shutdown,
6976 .driver.pm = RTL8169_PM_OPS,
6977 };
6978
6979 module_pci_driver(rtl8169_pci_driver);