]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/realtek/r8169.c
r8169: support the new RTL8402 chip.
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / realtek / r8169.c
CommitLineData
1da177e4 1/*
07d3f51f
FR
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
1da177e4
LT
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/delay.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/if_vlan.h>
20#include <linux/crc32.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/tcp.h>
24#include <linux/init.h>
a6b7a407 25#include <linux/interrupt.h>
1da177e4 26#include <linux/dma-mapping.h>
e1759441 27#include <linux/pm_runtime.h>
bca03d5f 28#include <linux/firmware.h>
ba04c7c9 29#include <linux/pci-aspm.h>
70c71606 30#include <linux/prefetch.h>
1da177e4
LT
31
32#include <asm/io.h>
33#include <asm/irq.h>
34
865c652d 35#define RTL8169_VERSION "2.3LK-NAPI"
1da177e4
LT
36#define MODULENAME "r8169"
37#define PFX MODULENAME ": "
38
bca03d5f 39#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
01dc7fec 41#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
70090424 43#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
c2218925
HW
44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
5a5e4443 46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
7e18dca1 47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
bca03d5f 48
1da177e4
LT
49#ifdef RTL8169_DEBUG
50#define assert(expr) \
5b0384f4
FR
51 if (!(expr)) { \
52 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
b39d66a8 53 #expr,__FILE__,__func__,__LINE__); \
5b0384f4 54 }
06fa7358
JP
55#define dprintk(fmt, args...) \
56 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
1da177e4
LT
57#else
58#define assert(expr) do {} while (0)
59#define dprintk(fmt, args...) do {} while (0)
60#endif /* RTL8169_DEBUG */
61
b57b7e5a 62#define R8169_MSG_DEFAULT \
f0e837d9 63 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 64
1da177e4
LT
65#define TX_BUFFS_AVAIL(tp) \
66 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
67
1da177e4
LT
68/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
69 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 70static const int multicast_filter_limit = 32;
1da177e4 71
9c14ceaf 72#define MAX_READ_REQUEST_SHIFT 12
1da177e4 73#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
1da177e4
LT
74#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
75#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
76
77#define R8169_REGS_SIZE 256
78#define R8169_NAPI_WEIGHT 64
79#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
80#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
81#define RX_BUF_SIZE 1536 /* Rx Buffer size */
82#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
83#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
84
85#define RTL8169_TX_TIMEOUT (6*HZ)
86#define RTL8169_PHY_TIMEOUT (10*HZ)
87
ea8dbdd1 88#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
89#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
e1564ec9
FR
90#define RTL_EEPROM_SIG_ADDR 0x0000
91
1da177e4
LT
92/* write/read MMIO register */
93#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
94#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
95#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
96#define RTL_R8(reg) readb (ioaddr + (reg))
97#define RTL_R16(reg) readw (ioaddr + (reg))
06f555f3 98#define RTL_R32(reg) readl (ioaddr + (reg))
1da177e4
LT
99
100enum mac_version {
85bffe6c
FR
101 RTL_GIGA_MAC_VER_01 = 0,
102 RTL_GIGA_MAC_VER_02,
103 RTL_GIGA_MAC_VER_03,
104 RTL_GIGA_MAC_VER_04,
105 RTL_GIGA_MAC_VER_05,
106 RTL_GIGA_MAC_VER_06,
107 RTL_GIGA_MAC_VER_07,
108 RTL_GIGA_MAC_VER_08,
109 RTL_GIGA_MAC_VER_09,
110 RTL_GIGA_MAC_VER_10,
111 RTL_GIGA_MAC_VER_11,
112 RTL_GIGA_MAC_VER_12,
113 RTL_GIGA_MAC_VER_13,
114 RTL_GIGA_MAC_VER_14,
115 RTL_GIGA_MAC_VER_15,
116 RTL_GIGA_MAC_VER_16,
117 RTL_GIGA_MAC_VER_17,
118 RTL_GIGA_MAC_VER_18,
119 RTL_GIGA_MAC_VER_19,
120 RTL_GIGA_MAC_VER_20,
121 RTL_GIGA_MAC_VER_21,
122 RTL_GIGA_MAC_VER_22,
123 RTL_GIGA_MAC_VER_23,
124 RTL_GIGA_MAC_VER_24,
125 RTL_GIGA_MAC_VER_25,
126 RTL_GIGA_MAC_VER_26,
127 RTL_GIGA_MAC_VER_27,
128 RTL_GIGA_MAC_VER_28,
129 RTL_GIGA_MAC_VER_29,
130 RTL_GIGA_MAC_VER_30,
131 RTL_GIGA_MAC_VER_31,
132 RTL_GIGA_MAC_VER_32,
133 RTL_GIGA_MAC_VER_33,
70090424 134 RTL_GIGA_MAC_VER_34,
c2218925
HW
135 RTL_GIGA_MAC_VER_35,
136 RTL_GIGA_MAC_VER_36,
7e18dca1 137 RTL_GIGA_MAC_VER_37,
85bffe6c 138 RTL_GIGA_MAC_NONE = 0xff,
1da177e4
LT
139};
140
2b7b4318
FR
141enum rtl_tx_desc_version {
142 RTL_TD_0 = 0,
143 RTL_TD_1 = 1,
144};
145
d58d46b5
FR
146#define JUMBO_1K ETH_DATA_LEN
147#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
148#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
149#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
150#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
151
152#define _R(NAME,TD,FW,SZ,B) { \
153 .name = NAME, \
154 .txd_version = TD, \
155 .fw_name = FW, \
156 .jumbo_max = SZ, \
157 .jumbo_tx_csum = B \
158}
1da177e4 159
3c6bee1d 160static const struct {
1da177e4 161 const char *name;
2b7b4318 162 enum rtl_tx_desc_version txd_version;
953a12cc 163 const char *fw_name;
d58d46b5
FR
164 u16 jumbo_max;
165 bool jumbo_tx_csum;
85bffe6c
FR
166} rtl_chip_infos[] = {
167 /* PCI devices. */
168 [RTL_GIGA_MAC_VER_01] =
d58d46b5 169 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 170 [RTL_GIGA_MAC_VER_02] =
d58d46b5 171 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 172 [RTL_GIGA_MAC_VER_03] =
d58d46b5 173 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 174 [RTL_GIGA_MAC_VER_04] =
d58d46b5 175 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 176 [RTL_GIGA_MAC_VER_05] =
d58d46b5 177 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c 178 [RTL_GIGA_MAC_VER_06] =
d58d46b5 179 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
85bffe6c
FR
180 /* PCI-E devices. */
181 [RTL_GIGA_MAC_VER_07] =
d58d46b5 182 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
85bffe6c 183 [RTL_GIGA_MAC_VER_08] =
d58d46b5 184 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
85bffe6c 185 [RTL_GIGA_MAC_VER_09] =
d58d46b5 186 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
85bffe6c 187 [RTL_GIGA_MAC_VER_10] =
d58d46b5 188 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 189 [RTL_GIGA_MAC_VER_11] =
d58d46b5 190 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
85bffe6c 191 [RTL_GIGA_MAC_VER_12] =
d58d46b5 192 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
85bffe6c 193 [RTL_GIGA_MAC_VER_13] =
d58d46b5 194 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 195 [RTL_GIGA_MAC_VER_14] =
d58d46b5 196 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 197 [RTL_GIGA_MAC_VER_15] =
d58d46b5 198 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 199 [RTL_GIGA_MAC_VER_16] =
d58d46b5 200 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
85bffe6c 201 [RTL_GIGA_MAC_VER_17] =
d58d46b5 202 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
85bffe6c 203 [RTL_GIGA_MAC_VER_18] =
d58d46b5 204 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 205 [RTL_GIGA_MAC_VER_19] =
d58d46b5 206 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 207 [RTL_GIGA_MAC_VER_20] =
d58d46b5 208 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 209 [RTL_GIGA_MAC_VER_21] =
d58d46b5 210 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 211 [RTL_GIGA_MAC_VER_22] =
d58d46b5 212 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 213 [RTL_GIGA_MAC_VER_23] =
d58d46b5 214 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 215 [RTL_GIGA_MAC_VER_24] =
d58d46b5 216 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
85bffe6c 217 [RTL_GIGA_MAC_VER_25] =
d58d46b5
FR
218 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
219 JUMBO_9K, false),
85bffe6c 220 [RTL_GIGA_MAC_VER_26] =
d58d46b5
FR
221 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
222 JUMBO_9K, false),
85bffe6c 223 [RTL_GIGA_MAC_VER_27] =
d58d46b5 224 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
85bffe6c 225 [RTL_GIGA_MAC_VER_28] =
d58d46b5 226 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
85bffe6c 227 [RTL_GIGA_MAC_VER_29] =
d58d46b5
FR
228 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
229 JUMBO_1K, true),
85bffe6c 230 [RTL_GIGA_MAC_VER_30] =
d58d46b5
FR
231 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
232 JUMBO_1K, true),
85bffe6c 233 [RTL_GIGA_MAC_VER_31] =
d58d46b5 234 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
85bffe6c 235 [RTL_GIGA_MAC_VER_32] =
d58d46b5
FR
236 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
237 JUMBO_9K, false),
85bffe6c 238 [RTL_GIGA_MAC_VER_33] =
d58d46b5
FR
239 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
240 JUMBO_9K, false),
70090424 241 [RTL_GIGA_MAC_VER_34] =
d58d46b5
FR
242 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
243 JUMBO_9K, false),
c2218925 244 [RTL_GIGA_MAC_VER_35] =
d58d46b5
FR
245 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
246 JUMBO_9K, false),
c2218925 247 [RTL_GIGA_MAC_VER_36] =
d58d46b5
FR
248 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
249 JUMBO_9K, false),
7e18dca1
HW
250 [RTL_GIGA_MAC_VER_37] =
251 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
252 JUMBO_1K, true),
953a12cc 253};
85bffe6c 254#undef _R
953a12cc 255
bcf0bf90
FR
256enum cfg_version {
257 RTL_CFG_0 = 0x00,
258 RTL_CFG_1,
259 RTL_CFG_2
260};
261
a3aa1884 262static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
bcf0bf90 263 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 264 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 265 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
07ce4064 266 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
bcf0bf90
FR
267 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
268 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
93a3aa25 269 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
bc1660b5 270 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
271 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
272 { PCI_VENDOR_ID_LINKSYS, 0x1032,
273 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
11d2e282
CM
274 { 0x0001, 0x8168,
275 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
1da177e4
LT
276 {0,},
277};
278
279MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
280
6f0333b8 281static int rx_buf_sz = 16383;
4300e8c7 282static int use_dac;
b57b7e5a
SH
283static struct {
284 u32 msg_enable;
285} debug = { -1 };
1da177e4 286
07d3f51f
FR
287enum rtl_registers {
288 MAC0 = 0, /* Ethernet hardware address. */
773d2021 289 MAC4 = 4,
07d3f51f
FR
290 MAR0 = 8, /* Multicast filter. */
291 CounterAddrLow = 0x10,
292 CounterAddrHigh = 0x14,
293 TxDescStartAddrLow = 0x20,
294 TxDescStartAddrHigh = 0x24,
295 TxHDescStartAddrLow = 0x28,
296 TxHDescStartAddrHigh = 0x2c,
297 FLASH = 0x30,
298 ERSR = 0x36,
299 ChipCmd = 0x37,
300 TxPoll = 0x38,
301 IntrMask = 0x3c,
302 IntrStatus = 0x3e,
4f6b00e5 303
07d3f51f 304 TxConfig = 0x40,
4f6b00e5
HW
305#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
306#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
2b7b4318 307
4f6b00e5
HW
308 RxConfig = 0x44,
309#define RX128_INT_EN (1 << 15) /* 8111c and later */
310#define RX_MULTI_EN (1 << 14) /* 8111c only */
311#define RXCFG_FIFO_SHIFT 13
312 /* No threshold before first PCI xfer */
313#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
314#define RXCFG_DMA_SHIFT 8
315 /* Unlimited maximum PCI burst. */
316#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
2b7b4318 317
07d3f51f
FR
318 RxMissed = 0x4c,
319 Cfg9346 = 0x50,
320 Config0 = 0x51,
321 Config1 = 0x52,
322 Config2 = 0x53,
d387b427
FR
323#define PME_SIGNAL (1 << 5) /* 8168c and later */
324
07d3f51f
FR
325 Config3 = 0x54,
326 Config4 = 0x55,
327 Config5 = 0x56,
328 MultiIntr = 0x5c,
329 PHYAR = 0x60,
07d3f51f
FR
330 PHYstatus = 0x6c,
331 RxMaxSize = 0xda,
332 CPlusCmd = 0xe0,
333 IntrMitigate = 0xe2,
334 RxDescAddrLow = 0xe4,
335 RxDescAddrHigh = 0xe8,
f0298f81 336 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
337
338#define NoEarlyTx 0x3f /* Max value : no early transmit. */
339
340 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
341
342#define TxPacketMax (8064 >> 7)
3090bd9a 343#define EarlySize 0x27
f0298f81 344
07d3f51f
FR
345 FuncEvent = 0xf0,
346 FuncEventMask = 0xf4,
347 FuncPresetState = 0xf8,
348 FuncForceEvent = 0xfc,
1da177e4
LT
349};
350
f162a5d1
FR
351enum rtl8110_registers {
352 TBICSR = 0x64,
353 TBI_ANAR = 0x68,
354 TBI_LPAR = 0x6a,
355};
356
357enum rtl8168_8101_registers {
358 CSIDR = 0x64,
359 CSIAR = 0x68,
360#define CSIAR_FLAG 0x80000000
361#define CSIAR_WRITE_CMD 0x80000000
362#define CSIAR_BYTE_ENABLE 0x0f
363#define CSIAR_BYTE_ENABLE_SHIFT 12
364#define CSIAR_ADDR_MASK 0x0fff
7e18dca1
HW
365#define CSIAR_FUNC_CARD 0x00000000
366#define CSIAR_FUNC_SDIO 0x00010000
367#define CSIAR_FUNC_NIC 0x00020000
065c27c1 368 PMCH = 0x6f,
f162a5d1
FR
369 EPHYAR = 0x80,
370#define EPHYAR_FLAG 0x80000000
371#define EPHYAR_WRITE_CMD 0x80000000
372#define EPHYAR_REG_MASK 0x1f
373#define EPHYAR_REG_SHIFT 16
374#define EPHYAR_DATA_MASK 0xffff
5a5e4443 375 DLLPR = 0xd0,
4f6b00e5 376#define PFM_EN (1 << 6)
f162a5d1
FR
377 DBG_REG = 0xd1,
378#define FIX_NAK_1 (1 << 4)
379#define FIX_NAK_2 (1 << 3)
5a5e4443
HW
380 TWSI = 0xd2,
381 MCU = 0xd3,
4f6b00e5 382#define NOW_IS_OOB (1 << 7)
5a5e4443
HW
383#define EN_NDP (1 << 3)
384#define EN_OOB_RESET (1 << 2)
daf9df6d 385 EFUSEAR = 0xdc,
386#define EFUSEAR_FLAG 0x80000000
387#define EFUSEAR_WRITE_CMD 0x80000000
388#define EFUSEAR_READ_CMD 0x00000000
389#define EFUSEAR_REG_MASK 0x03ff
390#define EFUSEAR_REG_SHIFT 8
391#define EFUSEAR_DATA_MASK 0xff
f162a5d1
FR
392};
393
c0e45c1c 394enum rtl8168_registers {
4f6b00e5
HW
395 LED_FREQ = 0x1a,
396 EEE_LED = 0x1b,
b646d900 397 ERIDR = 0x70,
398 ERIAR = 0x74,
399#define ERIAR_FLAG 0x80000000
400#define ERIAR_WRITE_CMD 0x80000000
401#define ERIAR_READ_CMD 0x00000000
402#define ERIAR_ADDR_BYTE_ALIGN 4
b646d900 403#define ERIAR_TYPE_SHIFT 16
4f6b00e5
HW
404#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
405#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
406#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
407#define ERIAR_MASK_SHIFT 12
408#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
409#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
410#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
c0e45c1c 411 EPHY_RXER_NUM = 0x7c,
412 OCPDR = 0xb0, /* OCP GPHY access */
413#define OCPDR_WRITE_CMD 0x80000000
414#define OCPDR_READ_CMD 0x00000000
415#define OCPDR_REG_MASK 0x7f
416#define OCPDR_GPHY_REG_SHIFT 16
417#define OCPDR_DATA_MASK 0xffff
418 OCPAR = 0xb4,
419#define OCPAR_FLAG 0x80000000
420#define OCPAR_GPHY_WRITE_CMD 0x8000f060
421#define OCPAR_GPHY_READ_CMD 0x0000f060
01dc7fec 422 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
423 MISC = 0xf0, /* 8168e only. */
cecb5fd7 424#define TXPLA_RST (1 << 29)
4f6b00e5 425#define PWM_EN (1 << 22)
c0e45c1c 426};
427
07d3f51f 428enum rtl_register_content {
1da177e4 429 /* InterruptStatusBits */
07d3f51f
FR
430 SYSErr = 0x8000,
431 PCSTimeout = 0x4000,
432 SWInt = 0x0100,
433 TxDescUnavail = 0x0080,
434 RxFIFOOver = 0x0040,
435 LinkChg = 0x0020,
436 RxOverflow = 0x0010,
437 TxErr = 0x0008,
438 TxOK = 0x0004,
439 RxErr = 0x0002,
440 RxOK = 0x0001,
1da177e4
LT
441
442 /* RxStatusDesc */
e03f33af 443 RxBOVF = (1 << 24),
9dccf611
FR
444 RxFOVF = (1 << 23),
445 RxRWT = (1 << 22),
446 RxRES = (1 << 21),
447 RxRUNT = (1 << 20),
448 RxCRC = (1 << 19),
1da177e4
LT
449
450 /* ChipCmdBits */
4f6b00e5 451 StopReq = 0x80,
07d3f51f
FR
452 CmdReset = 0x10,
453 CmdRxEnb = 0x08,
454 CmdTxEnb = 0x04,
455 RxBufEmpty = 0x01,
1da177e4 456
275391a4
FR
457 /* TXPoll register p.5 */
458 HPQ = 0x80, /* Poll cmd on the high prio queue */
459 NPQ = 0x40, /* Poll cmd on the low prio queue */
460 FSWInt = 0x01, /* Forced software interrupt */
461
1da177e4 462 /* Cfg9346Bits */
07d3f51f
FR
463 Cfg9346_Lock = 0x00,
464 Cfg9346_Unlock = 0xc0,
1da177e4
LT
465
466 /* rx_mode_bits */
07d3f51f
FR
467 AcceptErr = 0x20,
468 AcceptRunt = 0x10,
469 AcceptBroadcast = 0x08,
470 AcceptMulticast = 0x04,
471 AcceptMyPhys = 0x02,
472 AcceptAllPhys = 0x01,
1687b566 473#define RX_CONFIG_ACCEPT_MASK 0x3f
1da177e4 474
1da177e4
LT
475 /* TxConfigBits */
476 TxInterFrameGapShift = 24,
477 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
478
5d06a99f 479 /* Config1 register p.24 */
f162a5d1
FR
480 LEDS1 = (1 << 7),
481 LEDS0 = (1 << 6),
f162a5d1
FR
482 Speed_down = (1 << 4),
483 MEMMAP = (1 << 3),
484 IOMAP = (1 << 2),
485 VPD = (1 << 1),
5d06a99f
FR
486 PMEnable = (1 << 0), /* Power Management Enable */
487
6dccd16b 488 /* Config2 register p. 25 */
2ca6cf06 489 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
6dccd16b
FR
490 PCI_Clock_66MHz = 0x01,
491 PCI_Clock_33MHz = 0x00,
492
61a4dcc2
FR
493 /* Config3 register p.25 */
494 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
495 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
d58d46b5 496 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
f162a5d1 497 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
61a4dcc2 498
d58d46b5
FR
499 /* Config4 register */
500 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
501
5d06a99f 502 /* Config5 register p.27 */
61a4dcc2
FR
503 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
504 MWF = (1 << 5), /* Accept Multicast wakeup frame */
505 UWF = (1 << 4), /* Accept Unicast wakeup frame */
cecb5fd7 506 Spi_en = (1 << 3),
61a4dcc2 507 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
508 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
509
1da177e4
LT
510 /* TBICSR p.28 */
511 TBIReset = 0x80000000,
512 TBILoopback = 0x40000000,
513 TBINwEnable = 0x20000000,
514 TBINwRestart = 0x10000000,
515 TBILinkOk = 0x02000000,
516 TBINwComplete = 0x01000000,
517
518 /* CPlusCmd p.31 */
f162a5d1
FR
519 EnableBist = (1 << 15), // 8168 8101
520 Mac_dbgo_oe = (1 << 14), // 8168 8101
521 Normal_mode = (1 << 13), // unused
522 Force_half_dup = (1 << 12), // 8168 8101
523 Force_rxflow_en = (1 << 11), // 8168 8101
524 Force_txflow_en = (1 << 10), // 8168 8101
525 Cxpl_dbg_sel = (1 << 9), // 8168 8101
526 ASF = (1 << 8), // 8168 8101
527 PktCntrDisable = (1 << 7), // 8168 8101
528 Mac_dbgo_sel = 0x001c, // 8168
1da177e4
LT
529 RxVlan = (1 << 6),
530 RxChkSum = (1 << 5),
531 PCIDAC = (1 << 4),
532 PCIMulRW = (1 << 3),
0e485150
FR
533 INTT_0 = 0x0000, // 8168
534 INTT_1 = 0x0001, // 8168
535 INTT_2 = 0x0002, // 8168
536 INTT_3 = 0x0003, // 8168
1da177e4
LT
537
538 /* rtl8169_PHYstatus */
07d3f51f
FR
539 TBI_Enable = 0x80,
540 TxFlowCtrl = 0x40,
541 RxFlowCtrl = 0x20,
542 _1000bpsF = 0x10,
543 _100bps = 0x08,
544 _10bps = 0x04,
545 LinkStatus = 0x02,
546 FullDup = 0x01,
1da177e4 547
1da177e4 548 /* _TBICSRBit */
07d3f51f 549 TBILinkOK = 0x02000000,
d4a3a0fc
SH
550
551 /* DumpCounterCommand */
07d3f51f 552 CounterDump = 0x8,
1da177e4
LT
553};
554
2b7b4318
FR
555enum rtl_desc_bit {
556 /* First doubleword. */
1da177e4
LT
557 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
558 RingEnd = (1 << 30), /* End of descriptor ring */
559 FirstFrag = (1 << 29), /* First segment of a packet */
560 LastFrag = (1 << 28), /* Final segment of a packet */
2b7b4318
FR
561};
562
563/* Generic case. */
564enum rtl_tx_desc_bit {
565 /* First doubleword. */
566 TD_LSO = (1 << 27), /* Large Send Offload */
567#define TD_MSS_MAX 0x07ffu /* MSS value */
1da177e4 568
2b7b4318
FR
569 /* Second doubleword. */
570 TxVlanTag = (1 << 17), /* Add VLAN tag */
571};
572
573/* 8169, 8168b and 810x except 8102e. */
574enum rtl_tx_desc_bit_0 {
575 /* First doubleword. */
576#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
577 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
578 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
579 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
580};
581
582/* 8102e, 8168c and beyond. */
583enum rtl_tx_desc_bit_1 {
584 /* Second doubleword. */
585#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
586 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
587 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
588 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
589};
1da177e4 590
2b7b4318
FR
591static const struct rtl_tx_desc_info {
592 struct {
593 u32 udp;
594 u32 tcp;
595 } checksum;
596 u16 mss_shift;
597 u16 opts_offset;
598} tx_desc_info [] = {
599 [RTL_TD_0] = {
600 .checksum = {
601 .udp = TD0_IP_CS | TD0_UDP_CS,
602 .tcp = TD0_IP_CS | TD0_TCP_CS
603 },
604 .mss_shift = TD0_MSS_SHIFT,
605 .opts_offset = 0
606 },
607 [RTL_TD_1] = {
608 .checksum = {
609 .udp = TD1_IP_CS | TD1_UDP_CS,
610 .tcp = TD1_IP_CS | TD1_TCP_CS
611 },
612 .mss_shift = TD1_MSS_SHIFT,
613 .opts_offset = 1
614 }
615};
616
617enum rtl_rx_desc_bit {
1da177e4
LT
618 /* Rx private */
619 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
620 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
621
622#define RxProtoUDP (PID1)
623#define RxProtoTCP (PID0)
624#define RxProtoIP (PID1 | PID0)
625#define RxProtoMask RxProtoIP
626
627 IPFail = (1 << 16), /* IP checksum failed */
628 UDPFail = (1 << 15), /* UDP/IP checksum failed */
629 TCPFail = (1 << 14), /* TCP/IP checksum failed */
630 RxVlanTag = (1 << 16), /* VLAN tag available */
631};
632
633#define RsvdMask 0x3fffc000
634
635struct TxDesc {
6cccd6e7
REB
636 __le32 opts1;
637 __le32 opts2;
638 __le64 addr;
1da177e4
LT
639};
640
641struct RxDesc {
6cccd6e7
REB
642 __le32 opts1;
643 __le32 opts2;
644 __le64 addr;
1da177e4
LT
645};
646
647struct ring_info {
648 struct sk_buff *skb;
649 u32 len;
650 u8 __pad[sizeof(void *) - sizeof(u32)];
651};
652
f23e7fda 653enum features {
ccdffb9a
FR
654 RTL_FEATURE_WOL = (1 << 0),
655 RTL_FEATURE_MSI = (1 << 1),
656 RTL_FEATURE_GMII = (1 << 2),
f23e7fda
FR
657};
658
355423d0
IV
659struct rtl8169_counters {
660 __le64 tx_packets;
661 __le64 rx_packets;
662 __le64 tx_errors;
663 __le32 rx_errors;
664 __le16 rx_missed;
665 __le16 align_errors;
666 __le32 tx_one_collision;
667 __le32 tx_multi_collision;
668 __le64 rx_unicast;
669 __le64 rx_broadcast;
670 __le32 rx_multicast;
671 __le16 tx_aborted;
672 __le16 tx_underun;
673};
674
da78dbff 675enum rtl_flag {
6c4a70c5 676 RTL_FLAG_TASK_ENABLED,
da78dbff
FR
677 RTL_FLAG_TASK_SLOW_PENDING,
678 RTL_FLAG_TASK_RESET_PENDING,
679 RTL_FLAG_TASK_PHY_PENDING,
680 RTL_FLAG_MAX
681};
682
8027aa24
JW
683struct rtl8169_stats {
684 u64 packets;
685 u64 bytes;
686 struct u64_stats_sync syncp;
687};
688
1da177e4
LT
689struct rtl8169_private {
690 void __iomem *mmio_addr; /* memory map physical address */
cecb5fd7 691 struct pci_dev *pci_dev;
c4028958 692 struct net_device *dev;
bea3348e 693 struct napi_struct napi;
b57b7e5a 694 u32 msg_enable;
2b7b4318
FR
695 u16 txd_version;
696 u16 mac_version;
1da177e4
LT
697 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
698 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
699 u32 dirty_rx;
700 u32 dirty_tx;
8027aa24
JW
701 struct rtl8169_stats rx_stats;
702 struct rtl8169_stats tx_stats;
1da177e4
LT
703 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
704 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
705 dma_addr_t TxPhyAddr;
706 dma_addr_t RxPhyAddr;
6f0333b8 707 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
1da177e4 708 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
1da177e4
LT
709 struct timer_list timer;
710 u16 cp_cmd;
da78dbff
FR
711
712 u16 event_slow;
c0e45c1c 713
714 struct mdio_ops {
715 void (*write)(void __iomem *, int, int);
716 int (*read)(void __iomem *, int);
717 } mdio_ops;
718
065c27c1 719 struct pll_power_ops {
720 void (*down)(struct rtl8169_private *);
721 void (*up)(struct rtl8169_private *);
722 } pll_power_ops;
723
d58d46b5
FR
724 struct jumbo_ops {
725 void (*enable)(struct rtl8169_private *);
726 void (*disable)(struct rtl8169_private *);
727 } jumbo_ops;
728
beb1fe18
HW
729 struct csi_ops {
730 void (*write)(void __iomem *, int, int);
731 u32 (*read)(void __iomem *, int);
732 } csi_ops;
733
54405cde 734 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
ccdffb9a 735 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
4da19633 736 void (*phy_reset_enable)(struct rtl8169_private *tp);
07ce4064 737 void (*hw_start)(struct net_device *);
4da19633 738 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
1da177e4 739 unsigned int (*link_ok)(void __iomem *);
8b4ab28d 740 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
4422bcd4
FR
741
742 struct {
da78dbff
FR
743 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
744 struct mutex mutex;
4422bcd4
FR
745 struct work_struct work;
746 } wk;
747
f23e7fda 748 unsigned features;
ccdffb9a
FR
749
750 struct mii_if_info mii;
355423d0 751 struct rtl8169_counters counters;
e1759441 752 u32 saved_wolopts;
e03f33af 753 u32 opts1_mask;
f1e02ed1 754
b6ffd97f
FR
755 struct rtl_fw {
756 const struct firmware *fw;
1c361efb
FR
757
758#define RTL_VER_SIZE 32
759
760 char version[RTL_VER_SIZE];
761
762 struct rtl_fw_phy_action {
763 __le32 *code;
764 size_t size;
765 } phy_action;
b6ffd97f 766 } *rtl_fw;
497888cf 767#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
1da177e4
LT
768};
769
979b6c13 770MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4 771MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
1da177e4 772module_param(use_dac, int, 0);
4300e8c7 773MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
774module_param_named(debug, debug.msg_enable, int, 0);
775MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
776MODULE_LICENSE("GPL");
777MODULE_VERSION(RTL8169_VERSION);
bca03d5f 778MODULE_FIRMWARE(FIRMWARE_8168D_1);
779MODULE_FIRMWARE(FIRMWARE_8168D_2);
01dc7fec 780MODULE_FIRMWARE(FIRMWARE_8168E_1);
781MODULE_FIRMWARE(FIRMWARE_8168E_2);
bbb8af75 782MODULE_FIRMWARE(FIRMWARE_8168E_3);
5a5e4443 783MODULE_FIRMWARE(FIRMWARE_8105E_1);
c2218925
HW
784MODULE_FIRMWARE(FIRMWARE_8168F_1);
785MODULE_FIRMWARE(FIRMWARE_8168F_2);
7e18dca1 786MODULE_FIRMWARE(FIRMWARE_8402_1);
1da177e4 787
da78dbff
FR
788static void rtl_lock_work(struct rtl8169_private *tp)
789{
790 mutex_lock(&tp->wk.mutex);
791}
792
793static void rtl_unlock_work(struct rtl8169_private *tp)
794{
795 mutex_unlock(&tp->wk.mutex);
796}
797
d58d46b5
FR
798static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
799{
800 int cap = pci_pcie_cap(pdev);
801
802 if (cap) {
803 u16 ctl;
804
805 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
806 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
807 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
808 }
809}
810
b646d900 811static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
812{
813 void __iomem *ioaddr = tp->mmio_addr;
814 int i;
815
816 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
817 for (i = 0; i < 20; i++) {
818 udelay(100);
819 if (RTL_R32(OCPAR) & OCPAR_FLAG)
820 break;
821 }
822 return RTL_R32(OCPDR);
823}
824
825static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
826{
827 void __iomem *ioaddr = tp->mmio_addr;
828 int i;
829
830 RTL_W32(OCPDR, data);
831 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
832 for (i = 0; i < 20; i++) {
833 udelay(100);
834 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
835 break;
836 }
837}
838
fac5b3ca 839static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
b646d900 840{
fac5b3ca 841 void __iomem *ioaddr = tp->mmio_addr;
b646d900 842 int i;
843
844 RTL_W8(ERIDR, cmd);
845 RTL_W32(ERIAR, 0x800010e8);
846 msleep(2);
847 for (i = 0; i < 5; i++) {
848 udelay(100);
1e4e82ba 849 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
b646d900 850 break;
851 }
852
fac5b3ca 853 ocp_write(tp, 0x1, 0x30, 0x00000001);
b646d900 854}
855
856#define OOB_CMD_RESET 0x00
857#define OOB_CMD_DRIVER_START 0x05
858#define OOB_CMD_DRIVER_STOP 0x06
859
cecb5fd7
FR
860static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
861{
862 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
863}
864
b646d900 865static void rtl8168_driver_start(struct rtl8169_private *tp)
866{
cecb5fd7 867 u16 reg;
b646d900 868 int i;
869
870 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
871
cecb5fd7 872 reg = rtl8168_get_ocp_reg(tp);
4804b3b3 873
b646d900 874 for (i = 0; i < 10; i++) {
875 msleep(10);
4804b3b3 876 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
b646d900 877 break;
878 }
879}
880
881static void rtl8168_driver_stop(struct rtl8169_private *tp)
882{
cecb5fd7 883 u16 reg;
b646d900 884 int i;
885
886 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
887
cecb5fd7 888 reg = rtl8168_get_ocp_reg(tp);
4804b3b3 889
b646d900 890 for (i = 0; i < 10; i++) {
891 msleep(10);
4804b3b3 892 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
b646d900 893 break;
894 }
895}
896
4804b3b3 897static int r8168dp_check_dash(struct rtl8169_private *tp)
898{
cecb5fd7 899 u16 reg = rtl8168_get_ocp_reg(tp);
4804b3b3 900
cecb5fd7 901 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
4804b3b3 902}
b646d900 903
4da19633 904static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
1da177e4
LT
905{
906 int i;
907
a6baf3af 908 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
1da177e4 909
2371408c 910 for (i = 20; i > 0; i--) {
07d3f51f
FR
911 /*
912 * Check if the RTL8169 has completed writing to the specified
913 * MII register.
914 */
5b0384f4 915 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 916 break;
2371408c 917 udelay(25);
1da177e4 918 }
024a07ba 919 /*
81a95f04
TT
920 * According to hardware specs a 20us delay is required after write
921 * complete indication, but before sending next command.
024a07ba 922 */
81a95f04 923 udelay(20);
1da177e4
LT
924}
925
4da19633 926static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
1da177e4
LT
927{
928 int i, value = -1;
929
a6baf3af 930 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
1da177e4 931
2371408c 932 for (i = 20; i > 0; i--) {
07d3f51f
FR
933 /*
934 * Check if the RTL8169 has completed retrieving data from
935 * the specified MII register.
936 */
1da177e4 937 if (RTL_R32(PHYAR) & 0x80000000) {
a6baf3af 938 value = RTL_R32(PHYAR) & 0xffff;
1da177e4
LT
939 break;
940 }
2371408c 941 udelay(25);
1da177e4 942 }
81a95f04
TT
943 /*
944 * According to hardware specs a 20us delay is required after read
945 * complete indication, but before sending next command.
946 */
947 udelay(20);
948
1da177e4
LT
949 return value;
950}
951
c0e45c1c 952static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
953{
954 int i;
955
956 RTL_W32(OCPDR, data |
957 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
958 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
959 RTL_W32(EPHY_RXER_NUM, 0);
960
961 for (i = 0; i < 100; i++) {
962 mdelay(1);
963 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
964 break;
965 }
966}
967
968static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
969{
970 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
971 (value & OCPDR_DATA_MASK));
972}
973
974static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
975{
976 int i;
977
978 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
979
980 mdelay(1);
981 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
982 RTL_W32(EPHY_RXER_NUM, 0);
983
984 for (i = 0; i < 100; i++) {
985 mdelay(1);
986 if (RTL_R32(OCPAR) & OCPAR_FLAG)
987 break;
988 }
989
990 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
991}
992
e6de30d6 993#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
994
995static void r8168dp_2_mdio_start(void __iomem *ioaddr)
996{
997 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
998}
999
1000static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1001{
1002 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1003}
1004
1005static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
1006{
1007 r8168dp_2_mdio_start(ioaddr);
1008
1009 r8169_mdio_write(ioaddr, reg_addr, value);
1010
1011 r8168dp_2_mdio_stop(ioaddr);
1012}
1013
1014static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
1015{
1016 int value;
1017
1018 r8168dp_2_mdio_start(ioaddr);
1019
1020 value = r8169_mdio_read(ioaddr, reg_addr);
1021
1022 r8168dp_2_mdio_stop(ioaddr);
1023
1024 return value;
1025}
1026
4da19633 1027static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
dacf8154 1028{
c0e45c1c 1029 tp->mdio_ops.write(tp->mmio_addr, location, val);
dacf8154
FR
1030}
1031
4da19633 1032static int rtl_readphy(struct rtl8169_private *tp, int location)
1033{
c0e45c1c 1034 return tp->mdio_ops.read(tp->mmio_addr, location);
4da19633 1035}
1036
1037static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1038{
1039 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1040}
1041
1042static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
daf9df6d 1043{
1044 int val;
1045
4da19633 1046 val = rtl_readphy(tp, reg_addr);
1047 rtl_writephy(tp, reg_addr, (val | p) & ~m);
daf9df6d 1048}
1049
ccdffb9a
FR
1050static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1051 int val)
1052{
1053 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a 1054
4da19633 1055 rtl_writephy(tp, location, val);
ccdffb9a
FR
1056}
1057
1058static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1059{
1060 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a 1061
4da19633 1062 return rtl_readphy(tp, location);
ccdffb9a
FR
1063}
1064
dacf8154
FR
1065static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
1066{
1067 unsigned int i;
1068
1069 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1070 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1071
1072 for (i = 0; i < 100; i++) {
1073 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
1074 break;
1075 udelay(10);
1076 }
1077}
1078
1079static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1080{
1081 u16 value = 0xffff;
1082 unsigned int i;
1083
1084 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1085
1086 for (i = 0; i < 100; i++) {
1087 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
1088 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1089 break;
1090 }
1091 udelay(10);
1092 }
1093
1094 return value;
1095}
1096
133ac40a
HW
1097static
1098void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1099{
1100 unsigned int i;
1101
1102 BUG_ON((addr & 3) || (mask == 0));
1103 RTL_W32(ERIDR, val);
1104 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1105
1106 for (i = 0; i < 100; i++) {
1107 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1108 break;
1109 udelay(100);
1110 }
1111}
1112
1113static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
1114{
1115 u32 value = ~0x00;
1116 unsigned int i;
1117
1118 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1119
1120 for (i = 0; i < 100; i++) {
1121 if (RTL_R32(ERIAR) & ERIAR_FLAG) {
1122 value = RTL_R32(ERIDR);
1123 break;
1124 }
1125 udelay(100);
1126 }
1127
1128 return value;
1129}
1130
1131static void
1132rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
1133{
1134 u32 val;
1135
1136 val = rtl_eri_read(ioaddr, addr, type);
1137 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
1138}
1139
c28aa385 1140struct exgmac_reg {
1141 u16 addr;
1142 u16 mask;
1143 u32 val;
1144};
1145
1146static void rtl_write_exgmac_batch(void __iomem *ioaddr,
1147 const struct exgmac_reg *r, int len)
1148{
1149 while (len-- > 0) {
1150 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1151 r++;
1152 }
1153}
1154
daf9df6d 1155static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1156{
1157 u8 value = 0xff;
1158 unsigned int i;
1159
1160 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1161
1162 for (i = 0; i < 300; i++) {
1163 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
1164 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
1165 break;
1166 }
1167 udelay(100);
1168 }
1169
1170 return value;
1171}
1172
9085cdfa
FR
1173static u16 rtl_get_events(struct rtl8169_private *tp)
1174{
1175 void __iomem *ioaddr = tp->mmio_addr;
1176
1177 return RTL_R16(IntrStatus);
1178}
1179
1180static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1181{
1182 void __iomem *ioaddr = tp->mmio_addr;
1183
1184 RTL_W16(IntrStatus, bits);
1185 mmiowb();
1186}
1187
1188static void rtl_irq_disable(struct rtl8169_private *tp)
1189{
1190 void __iomem *ioaddr = tp->mmio_addr;
1191
1192 RTL_W16(IntrMask, 0);
1193 mmiowb();
1194}
1195
3e990ff5
FR
1196static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1197{
1198 void __iomem *ioaddr = tp->mmio_addr;
1199
1200 RTL_W16(IntrMask, bits);
1201}
1202
da78dbff
FR
1203#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1204#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1205#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1206
1207static void rtl_irq_enable_all(struct rtl8169_private *tp)
1208{
1209 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1210}
1211
811fd301 1212static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1da177e4 1213{
811fd301 1214 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 1215
9085cdfa 1216 rtl_irq_disable(tp);
da78dbff 1217 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
811fd301 1218 RTL_R8(ChipCmd);
1da177e4
LT
1219}
1220
4da19633 1221static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1da177e4 1222{
4da19633 1223 void __iomem *ioaddr = tp->mmio_addr;
1224
1da177e4
LT
1225 return RTL_R32(TBICSR) & TBIReset;
1226}
1227
4da19633 1228static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1da177e4 1229{
4da19633 1230 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1da177e4
LT
1231}
1232
1233static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1234{
1235 return RTL_R32(TBICSR) & TBILinkOk;
1236}
1237
1238static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1239{
1240 return RTL_R8(PHYstatus) & LinkStatus;
1241}
1242
4da19633 1243static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1da177e4 1244{
4da19633 1245 void __iomem *ioaddr = tp->mmio_addr;
1246
1da177e4
LT
1247 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1248}
1249
4da19633 1250static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1da177e4
LT
1251{
1252 unsigned int val;
1253
4da19633 1254 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1255 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1da177e4
LT
1256}
1257
70090424
HW
1258static void rtl_link_chg_patch(struct rtl8169_private *tp)
1259{
1260 void __iomem *ioaddr = tp->mmio_addr;
1261 struct net_device *dev = tp->dev;
1262
1263 if (!netif_running(dev))
1264 return;
1265
1266 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
1267 if (RTL_R8(PHYstatus) & _1000bpsF) {
1268 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1269 0x00000011, ERIAR_EXGMAC);
1270 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1271 0x00000005, ERIAR_EXGMAC);
1272 } else if (RTL_R8(PHYstatus) & _100bps) {
1273 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1274 0x0000001f, ERIAR_EXGMAC);
1275 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1276 0x00000005, ERIAR_EXGMAC);
1277 } else {
1278 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1279 0x0000001f, ERIAR_EXGMAC);
1280 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1281 0x0000003f, ERIAR_EXGMAC);
1282 }
1283 /* Reset packet filter */
1284 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1285 ERIAR_EXGMAC);
1286 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1287 ERIAR_EXGMAC);
c2218925
HW
1288 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1289 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1290 if (RTL_R8(PHYstatus) & _1000bpsF) {
1291 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1292 0x00000011, ERIAR_EXGMAC);
1293 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1294 0x00000005, ERIAR_EXGMAC);
1295 } else {
1296 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1297 0x0000001f, ERIAR_EXGMAC);
1298 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1299 0x0000003f, ERIAR_EXGMAC);
1300 }
7e18dca1
HW
1301 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1302 if (RTL_R8(PHYstatus) & _10bps) {
1303 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1304 0x4d02, ERIAR_EXGMAC);
1305 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
1306 0x0060, ERIAR_EXGMAC);
1307 } else {
1308 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1309 0x0000, ERIAR_EXGMAC);
1310 }
70090424
HW
1311 }
1312}
1313
e4fbce74 1314static void __rtl8169_check_link_status(struct net_device *dev,
cecb5fd7
FR
1315 struct rtl8169_private *tp,
1316 void __iomem *ioaddr, bool pm)
1da177e4 1317{
1da177e4 1318 if (tp->link_ok(ioaddr)) {
70090424 1319 rtl_link_chg_patch(tp);
e1759441 1320 /* This is to cancel a scheduled suspend if there's one. */
e4fbce74
RW
1321 if (pm)
1322 pm_request_resume(&tp->pci_dev->dev);
1da177e4 1323 netif_carrier_on(dev);
1519e57f
FR
1324 if (net_ratelimit())
1325 netif_info(tp, ifup, dev, "link up\n");
b57b7e5a 1326 } else {
1da177e4 1327 netif_carrier_off(dev);
bf82c189 1328 netif_info(tp, ifdown, dev, "link down\n");
e4fbce74 1329 if (pm)
10953db8 1330 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
b57b7e5a 1331 }
1da177e4
LT
1332}
1333
e4fbce74
RW
1334static void rtl8169_check_link_status(struct net_device *dev,
1335 struct rtl8169_private *tp,
1336 void __iomem *ioaddr)
1337{
1338 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1339}
1340
e1759441
RW
1341#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1342
1343static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
61a4dcc2 1344{
61a4dcc2
FR
1345 void __iomem *ioaddr = tp->mmio_addr;
1346 u8 options;
e1759441 1347 u32 wolopts = 0;
61a4dcc2
FR
1348
1349 options = RTL_R8(Config1);
1350 if (!(options & PMEnable))
e1759441 1351 return 0;
61a4dcc2
FR
1352
1353 options = RTL_R8(Config3);
1354 if (options & LinkUp)
e1759441 1355 wolopts |= WAKE_PHY;
61a4dcc2 1356 if (options & MagicPacket)
e1759441 1357 wolopts |= WAKE_MAGIC;
61a4dcc2
FR
1358
1359 options = RTL_R8(Config5);
1360 if (options & UWF)
e1759441 1361 wolopts |= WAKE_UCAST;
61a4dcc2 1362 if (options & BWF)
e1759441 1363 wolopts |= WAKE_BCAST;
61a4dcc2 1364 if (options & MWF)
e1759441 1365 wolopts |= WAKE_MCAST;
61a4dcc2 1366
e1759441 1367 return wolopts;
61a4dcc2
FR
1368}
1369
e1759441 1370static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
61a4dcc2
FR
1371{
1372 struct rtl8169_private *tp = netdev_priv(dev);
e1759441 1373
da78dbff 1374 rtl_lock_work(tp);
e1759441
RW
1375
1376 wol->supported = WAKE_ANY;
1377 wol->wolopts = __rtl8169_get_wol(tp);
1378
da78dbff 1379 rtl_unlock_work(tp);
e1759441
RW
1380}
1381
1382static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1383{
61a4dcc2 1384 void __iomem *ioaddr = tp->mmio_addr;
07d3f51f 1385 unsigned int i;
350f7596 1386 static const struct {
61a4dcc2
FR
1387 u32 opt;
1388 u16 reg;
1389 u8 mask;
1390 } cfg[] = {
61a4dcc2
FR
1391 { WAKE_PHY, Config3, LinkUp },
1392 { WAKE_MAGIC, Config3, MagicPacket },
1393 { WAKE_UCAST, Config5, UWF },
1394 { WAKE_BCAST, Config5, BWF },
1395 { WAKE_MCAST, Config5, MWF },
1396 { WAKE_ANY, Config5, LanWake }
1397 };
851e6022 1398 u8 options;
61a4dcc2 1399
61a4dcc2
FR
1400 RTL_W8(Cfg9346, Cfg9346_Unlock);
1401
1402 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
851e6022 1403 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
e1759441 1404 if (wolopts & cfg[i].opt)
61a4dcc2
FR
1405 options |= cfg[i].mask;
1406 RTL_W8(cfg[i].reg, options);
1407 }
1408
851e6022
FR
1409 switch (tp->mac_version) {
1410 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1411 options = RTL_R8(Config1) & ~PMEnable;
1412 if (wolopts)
1413 options |= PMEnable;
1414 RTL_W8(Config1, options);
1415 break;
1416 default:
d387b427
FR
1417 options = RTL_R8(Config2) & ~PME_SIGNAL;
1418 if (wolopts)
1419 options |= PME_SIGNAL;
1420 RTL_W8(Config2, options);
851e6022
FR
1421 break;
1422 }
1423
61a4dcc2 1424 RTL_W8(Cfg9346, Cfg9346_Lock);
e1759441
RW
1425}
1426
1427static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1428{
1429 struct rtl8169_private *tp = netdev_priv(dev);
1430
da78dbff 1431 rtl_lock_work(tp);
61a4dcc2 1432
f23e7fda
FR
1433 if (wol->wolopts)
1434 tp->features |= RTL_FEATURE_WOL;
1435 else
1436 tp->features &= ~RTL_FEATURE_WOL;
e1759441 1437 __rtl8169_set_wol(tp, wol->wolopts);
da78dbff
FR
1438
1439 rtl_unlock_work(tp);
61a4dcc2 1440
ea80907f 1441 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1442
61a4dcc2
FR
1443 return 0;
1444}
1445
31bd204f
FR
1446static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1447{
85bffe6c 1448 return rtl_chip_infos[tp->mac_version].fw_name;
31bd204f
FR
1449}
1450
1da177e4
LT
1451static void rtl8169_get_drvinfo(struct net_device *dev,
1452 struct ethtool_drvinfo *info)
1453{
1454 struct rtl8169_private *tp = netdev_priv(dev);
b6ffd97f 1455 struct rtl_fw *rtl_fw = tp->rtl_fw;
1da177e4 1456
68aad78c
RJ
1457 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1458 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1459 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1c361efb 1460 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
8ac72d16
RJ
1461 if (!IS_ERR_OR_NULL(rtl_fw))
1462 strlcpy(info->fw_version, rtl_fw->version,
1463 sizeof(info->fw_version));
1da177e4
LT
1464}
1465
1466static int rtl8169_get_regs_len(struct net_device *dev)
1467{
1468 return R8169_REGS_SIZE;
1469}
1470
1471static int rtl8169_set_speed_tbi(struct net_device *dev,
54405cde 1472 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1da177e4
LT
1473{
1474 struct rtl8169_private *tp = netdev_priv(dev);
1475 void __iomem *ioaddr = tp->mmio_addr;
1476 int ret = 0;
1477 u32 reg;
1478
1479 reg = RTL_R32(TBICSR);
1480 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1481 (duplex == DUPLEX_FULL)) {
1482 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1483 } else if (autoneg == AUTONEG_ENABLE)
1484 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1485 else {
bf82c189
JP
1486 netif_warn(tp, link, dev,
1487 "incorrect speed setting refused in TBI mode\n");
1da177e4
LT
1488 ret = -EOPNOTSUPP;
1489 }
1490
1491 return ret;
1492}
1493
1494static int rtl8169_set_speed_xmii(struct net_device *dev,
54405cde 1495 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1da177e4
LT
1496{
1497 struct rtl8169_private *tp = netdev_priv(dev);
3577aa1b 1498 int giga_ctrl, bmcr;
54405cde 1499 int rc = -EINVAL;
1da177e4 1500
716b50a3 1501 rtl_writephy(tp, 0x1f, 0x0000);
1da177e4
LT
1502
1503 if (autoneg == AUTONEG_ENABLE) {
3577aa1b 1504 int auto_nego;
1505
4da19633 1506 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
54405cde
ON
1507 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1508 ADVERTISE_100HALF | ADVERTISE_100FULL);
1509
1510 if (adv & ADVERTISED_10baseT_Half)
1511 auto_nego |= ADVERTISE_10HALF;
1512 if (adv & ADVERTISED_10baseT_Full)
1513 auto_nego |= ADVERTISE_10FULL;
1514 if (adv & ADVERTISED_100baseT_Half)
1515 auto_nego |= ADVERTISE_100HALF;
1516 if (adv & ADVERTISED_100baseT_Full)
1517 auto_nego |= ADVERTISE_100FULL;
1518
3577aa1b 1519 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1da177e4 1520
4da19633 1521 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
3577aa1b 1522 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
bcf0bf90 1523
3577aa1b 1524 /* The 8100e/8101e/8102e do Fast Ethernet only. */
826e6cbd 1525 if (tp->mii.supports_gmii) {
54405cde
ON
1526 if (adv & ADVERTISED_1000baseT_Half)
1527 giga_ctrl |= ADVERTISE_1000HALF;
1528 if (adv & ADVERTISED_1000baseT_Full)
1529 giga_ctrl |= ADVERTISE_1000FULL;
1530 } else if (adv & (ADVERTISED_1000baseT_Half |
1531 ADVERTISED_1000baseT_Full)) {
bf82c189
JP
1532 netif_info(tp, link, dev,
1533 "PHY does not support 1000Mbps\n");
54405cde 1534 goto out;
bcf0bf90 1535 }
1da177e4 1536
3577aa1b 1537 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1538
4da19633 1539 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1540 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
3577aa1b 1541 } else {
1542 giga_ctrl = 0;
1543
1544 if (speed == SPEED_10)
1545 bmcr = 0;
1546 else if (speed == SPEED_100)
1547 bmcr = BMCR_SPEED100;
1548 else
54405cde 1549 goto out;
3577aa1b 1550
1551 if (duplex == DUPLEX_FULL)
1552 bmcr |= BMCR_FULLDPLX;
2584fbc3
RS
1553 }
1554
4da19633 1555 rtl_writephy(tp, MII_BMCR, bmcr);
3577aa1b 1556
cecb5fd7
FR
1557 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1558 tp->mac_version == RTL_GIGA_MAC_VER_03) {
3577aa1b 1559 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
4da19633 1560 rtl_writephy(tp, 0x17, 0x2138);
1561 rtl_writephy(tp, 0x0e, 0x0260);
3577aa1b 1562 } else {
4da19633 1563 rtl_writephy(tp, 0x17, 0x2108);
1564 rtl_writephy(tp, 0x0e, 0x0000);
3577aa1b 1565 }
1566 }
1567
54405cde
ON
1568 rc = 0;
1569out:
1570 return rc;
1da177e4
LT
1571}
1572
1573static int rtl8169_set_speed(struct net_device *dev,
54405cde 1574 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1da177e4
LT
1575{
1576 struct rtl8169_private *tp = netdev_priv(dev);
1577 int ret;
1578
54405cde 1579 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
4876cc1e
FR
1580 if (ret < 0)
1581 goto out;
1da177e4 1582
4876cc1e
FR
1583 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1584 (advertising & ADVERTISED_1000baseT_Full)) {
1da177e4 1585 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
4876cc1e
FR
1586 }
1587out:
1da177e4
LT
1588 return ret;
1589}
1590
1591static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1592{
1593 struct rtl8169_private *tp = netdev_priv(dev);
1da177e4
LT
1594 int ret;
1595
4876cc1e
FR
1596 del_timer_sync(&tp->timer);
1597
da78dbff 1598 rtl_lock_work(tp);
cecb5fd7 1599 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
25db0338 1600 cmd->duplex, cmd->advertising);
da78dbff 1601 rtl_unlock_work(tp);
5b0384f4 1602
1da177e4
LT
1603 return ret;
1604}
1605
c8f44aff
MM
1606static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1607 netdev_features_t features)
1da177e4 1608{
d58d46b5
FR
1609 struct rtl8169_private *tp = netdev_priv(dev);
1610
2b7b4318 1611 if (dev->mtu > TD_MSS_MAX)
350fb32a 1612 features &= ~NETIF_F_ALL_TSO;
1da177e4 1613
d58d46b5
FR
1614 if (dev->mtu > JUMBO_1K &&
1615 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1616 features &= ~NETIF_F_IP_CSUM;
1617
350fb32a 1618 return features;
1da177e4
LT
1619}
1620
da78dbff
FR
1621static void __rtl8169_set_features(struct net_device *dev,
1622 netdev_features_t features)
1da177e4
LT
1623{
1624 struct rtl8169_private *tp = netdev_priv(dev);
6bbe021d 1625 netdev_features_t changed = features ^ dev->features;
da78dbff 1626 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 1627
6bbe021d
BG
1628 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1629 return;
1da177e4 1630
6bbe021d
BG
1631 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1632 if (features & NETIF_F_RXCSUM)
1633 tp->cp_cmd |= RxChkSum;
1634 else
1635 tp->cp_cmd &= ~RxChkSum;
350fb32a 1636
6bbe021d
BG
1637 if (dev->features & NETIF_F_HW_VLAN_RX)
1638 tp->cp_cmd |= RxVlan;
1639 else
1640 tp->cp_cmd &= ~RxVlan;
1641
1642 RTL_W16(CPlusCmd, tp->cp_cmd);
1643 RTL_R16(CPlusCmd);
1644 }
1645 if (changed & NETIF_F_RXALL) {
1646 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1647 if (features & NETIF_F_RXALL)
1648 tmp |= (AcceptErr | AcceptRunt);
1649 RTL_W32(RxConfig, tmp);
1650 }
da78dbff 1651}
1da177e4 1652
da78dbff
FR
1653static int rtl8169_set_features(struct net_device *dev,
1654 netdev_features_t features)
1655{
1656 struct rtl8169_private *tp = netdev_priv(dev);
1657
1658 rtl_lock_work(tp);
1659 __rtl8169_set_features(dev, features);
1660 rtl_unlock_work(tp);
1da177e4
LT
1661
1662 return 0;
1663}
1664
da78dbff 1665
1da177e4
LT
1666static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1667 struct sk_buff *skb)
1668{
eab6d18d 1669 return (vlan_tx_tag_present(skb)) ?
1da177e4
LT
1670 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1671}
1672
7a8fc77b 1673static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1da177e4
LT
1674{
1675 u32 opts2 = le32_to_cpu(desc->opts2);
1da177e4 1676
7a8fc77b
FR
1677 if (opts2 & RxVlanTag)
1678 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
2edae08e 1679
1da177e4 1680 desc->opts2 = 0;
1da177e4
LT
1681}
1682
ccdffb9a 1683static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1da177e4
LT
1684{
1685 struct rtl8169_private *tp = netdev_priv(dev);
1686 void __iomem *ioaddr = tp->mmio_addr;
1687 u32 status;
1688
1689 cmd->supported =
1690 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1691 cmd->port = PORT_FIBRE;
1692 cmd->transceiver = XCVR_INTERNAL;
1693
1694 status = RTL_R32(TBICSR);
1695 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1696 cmd->autoneg = !!(status & TBINwEnable);
1697
70739497 1698 ethtool_cmd_speed_set(cmd, SPEED_1000);
1da177e4 1699 cmd->duplex = DUPLEX_FULL; /* Always set */
ccdffb9a
FR
1700
1701 return 0;
1da177e4
LT
1702}
1703
ccdffb9a 1704static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1da177e4
LT
1705{
1706 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a
FR
1707
1708 return mii_ethtool_gset(&tp->mii, cmd);
1da177e4
LT
1709}
1710
1711static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1712{
1713 struct rtl8169_private *tp = netdev_priv(dev);
ccdffb9a 1714 int rc;
1da177e4 1715
da78dbff 1716 rtl_lock_work(tp);
ccdffb9a 1717 rc = tp->get_settings(dev, cmd);
da78dbff 1718 rtl_unlock_work(tp);
1da177e4 1719
ccdffb9a 1720 return rc;
1da177e4
LT
1721}
1722
1723static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1724 void *p)
1725{
5b0384f4 1726 struct rtl8169_private *tp = netdev_priv(dev);
1da177e4 1727
5b0384f4
FR
1728 if (regs->len > R8169_REGS_SIZE)
1729 regs->len = R8169_REGS_SIZE;
1da177e4 1730
da78dbff 1731 rtl_lock_work(tp);
5b0384f4 1732 memcpy_fromio(p, tp->mmio_addr, regs->len);
da78dbff 1733 rtl_unlock_work(tp);
1da177e4
LT
1734}
1735
b57b7e5a
SH
1736static u32 rtl8169_get_msglevel(struct net_device *dev)
1737{
1738 struct rtl8169_private *tp = netdev_priv(dev);
1739
1740 return tp->msg_enable;
1741}
1742
1743static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1744{
1745 struct rtl8169_private *tp = netdev_priv(dev);
1746
1747 tp->msg_enable = value;
1748}
1749
d4a3a0fc
SH
1750static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1751 "tx_packets",
1752 "rx_packets",
1753 "tx_errors",
1754 "rx_errors",
1755 "rx_missed",
1756 "align_errors",
1757 "tx_single_collisions",
1758 "tx_multi_collisions",
1759 "unicast",
1760 "broadcast",
1761 "multicast",
1762 "tx_aborted",
1763 "tx_underrun",
1764};
1765
b9f2c044 1766static int rtl8169_get_sset_count(struct net_device *dev, int sset)
d4a3a0fc 1767{
b9f2c044
JG
1768 switch (sset) {
1769 case ETH_SS_STATS:
1770 return ARRAY_SIZE(rtl8169_gstrings);
1771 default:
1772 return -EOPNOTSUPP;
1773 }
d4a3a0fc
SH
1774}
1775
355423d0 1776static void rtl8169_update_counters(struct net_device *dev)
d4a3a0fc
SH
1777{
1778 struct rtl8169_private *tp = netdev_priv(dev);
1779 void __iomem *ioaddr = tp->mmio_addr;
cecb5fd7 1780 struct device *d = &tp->pci_dev->dev;
d4a3a0fc
SH
1781 struct rtl8169_counters *counters;
1782 dma_addr_t paddr;
1783 u32 cmd;
355423d0 1784 int wait = 1000;
d4a3a0fc 1785
355423d0
IV
1786 /*
1787 * Some chips are unable to dump tally counters when the receiver
1788 * is disabled.
1789 */
1790 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1791 return;
d4a3a0fc 1792
48addcc9 1793 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
d4a3a0fc
SH
1794 if (!counters)
1795 return;
1796
1797 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
284901a9 1798 cmd = (u64)paddr & DMA_BIT_MASK(32);
d4a3a0fc
SH
1799 RTL_W32(CounterAddrLow, cmd);
1800 RTL_W32(CounterAddrLow, cmd | CounterDump);
1801
355423d0
IV
1802 while (wait--) {
1803 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
355423d0 1804 memcpy(&tp->counters, counters, sizeof(*counters));
d4a3a0fc 1805 break;
355423d0
IV
1806 }
1807 udelay(10);
d4a3a0fc
SH
1808 }
1809
1810 RTL_W32(CounterAddrLow, 0);
1811 RTL_W32(CounterAddrHigh, 0);
1812
48addcc9 1813 dma_free_coherent(d, sizeof(*counters), counters, paddr);
d4a3a0fc
SH
1814}
1815
355423d0
IV
1816static void rtl8169_get_ethtool_stats(struct net_device *dev,
1817 struct ethtool_stats *stats, u64 *data)
1818{
1819 struct rtl8169_private *tp = netdev_priv(dev);
1820
1821 ASSERT_RTNL();
1822
1823 rtl8169_update_counters(dev);
1824
1825 data[0] = le64_to_cpu(tp->counters.tx_packets);
1826 data[1] = le64_to_cpu(tp->counters.rx_packets);
1827 data[2] = le64_to_cpu(tp->counters.tx_errors);
1828 data[3] = le32_to_cpu(tp->counters.rx_errors);
1829 data[4] = le16_to_cpu(tp->counters.rx_missed);
1830 data[5] = le16_to_cpu(tp->counters.align_errors);
1831 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1832 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1833 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1834 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1835 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1836 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1837 data[12] = le16_to_cpu(tp->counters.tx_underun);
1838}
1839
d4a3a0fc
SH
1840static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1841{
1842 switch(stringset) {
1843 case ETH_SS_STATS:
1844 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1845 break;
1846 }
1847}
1848
7282d491 1849static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1850 .get_drvinfo = rtl8169_get_drvinfo,
1851 .get_regs_len = rtl8169_get_regs_len,
1852 .get_link = ethtool_op_get_link,
1853 .get_settings = rtl8169_get_settings,
1854 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1855 .get_msglevel = rtl8169_get_msglevel,
1856 .set_msglevel = rtl8169_set_msglevel,
1da177e4 1857 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1858 .get_wol = rtl8169_get_wol,
1859 .set_wol = rtl8169_set_wol,
d4a3a0fc 1860 .get_strings = rtl8169_get_strings,
b9f2c044 1861 .get_sset_count = rtl8169_get_sset_count,
d4a3a0fc 1862 .get_ethtool_stats = rtl8169_get_ethtool_stats,
e1593bb1 1863 .get_ts_info = ethtool_op_get_ts_info,
1da177e4
LT
1864};
1865
07d3f51f 1866static void rtl8169_get_mac_version(struct rtl8169_private *tp,
5d320a20 1867 struct net_device *dev, u8 default_version)
1da177e4 1868{
5d320a20 1869 void __iomem *ioaddr = tp->mmio_addr;
0e485150
FR
1870 /*
1871 * The driver currently handles the 8168Bf and the 8168Be identically
1872 * but they can be identified more specifically through the test below
1873 * if needed:
1874 *
1875 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
0127215c
FR
1876 *
1877 * Same thing for the 8101Eb and the 8101Ec:
1878 *
1879 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
0e485150 1880 */
3744100e 1881 static const struct rtl_mac_info {
1da177e4 1882 u32 mask;
e3cf0cc0 1883 u32 val;
1da177e4
LT
1884 int mac_version;
1885 } mac_info[] = {
c2218925
HW
1886 /* 8168F family. */
1887 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1888 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1889
01dc7fec 1890 /* 8168E family. */
70090424 1891 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
01dc7fec 1892 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1893 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1894 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1895
5b538df9 1896 /* 8168D family. */
daf9df6d 1897 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1898 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
daf9df6d 1899 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
5b538df9 1900
e6de30d6 1901 /* 8168DP family. */
1902 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1903 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
4804b3b3 1904 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
e6de30d6 1905
ef808d50 1906 /* 8168C family. */
17c99297 1907 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
ef3386f0 1908 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
ef808d50 1909 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
7f3e3d3a 1910 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
e3cf0cc0
FR
1911 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1912 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
197ff761 1913 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
6fb07058 1914 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
ef808d50 1915 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
e3cf0cc0
FR
1916
1917 /* 8168B family. */
1918 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1919 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1920 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1921 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1922
1923 /* 8101 family. */
7e18dca1 1924 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
36a0e6c2 1925 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
5a5e4443
HW
1926 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1927 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1928 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2857ffb7
FR
1929 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1930 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1931 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1932 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1933 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1934 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
e3cf0cc0 1935 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2857ffb7 1936 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
e3cf0cc0 1937 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2857ffb7
FR
1938 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1939 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
e3cf0cc0
FR
1940 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1941 /* FIXME: where did these entries come from ? -- FR */
1942 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1943 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1944
1945 /* 8110 family. */
1946 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1947 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1948 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1949 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1950 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1951 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1952
f21b75e9
JD
1953 /* Catch-all */
1954 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
3744100e
FR
1955 };
1956 const struct rtl_mac_info *p = mac_info;
1da177e4
LT
1957 u32 reg;
1958
e3cf0cc0
FR
1959 reg = RTL_R32(TxConfig);
1960 while ((reg & p->mask) != p->val)
1da177e4
LT
1961 p++;
1962 tp->mac_version = p->mac_version;
5d320a20
FR
1963
1964 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1965 netif_notice(tp, probe, dev,
1966 "unknown MAC, using family default\n");
1967 tp->mac_version = default_version;
1968 }
1da177e4
LT
1969}
1970
1971static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1972{
bcf0bf90 1973 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1974}
1975
867763c1
FR
1976struct phy_reg {
1977 u16 reg;
1978 u16 val;
1979};
1980
4da19633 1981static void rtl_writephy_batch(struct rtl8169_private *tp,
1982 const struct phy_reg *regs, int len)
867763c1
FR
1983{
1984 while (len-- > 0) {
4da19633 1985 rtl_writephy(tp, regs->reg, regs->val);
867763c1
FR
1986 regs++;
1987 }
1988}
1989
bca03d5f 1990#define PHY_READ 0x00000000
1991#define PHY_DATA_OR 0x10000000
1992#define PHY_DATA_AND 0x20000000
1993#define PHY_BJMPN 0x30000000
1994#define PHY_READ_EFUSE 0x40000000
1995#define PHY_READ_MAC_BYTE 0x50000000
1996#define PHY_WRITE_MAC_BYTE 0x60000000
1997#define PHY_CLEAR_READCOUNT 0x70000000
1998#define PHY_WRITE 0x80000000
1999#define PHY_READCOUNT_EQ_SKIP 0x90000000
2000#define PHY_COMP_EQ_SKIPN 0xa0000000
2001#define PHY_COMP_NEQ_SKIPN 0xb0000000
2002#define PHY_WRITE_PREVIOUS 0xc0000000
2003#define PHY_SKIPN 0xd0000000
2004#define PHY_DELAY_MS 0xe0000000
2005#define PHY_WRITE_ERI_WORD 0xf0000000
2006
960aee6c
HW
2007struct fw_info {
2008 u32 magic;
2009 char version[RTL_VER_SIZE];
2010 __le32 fw_start;
2011 __le32 fw_len;
2012 u8 chksum;
2013} __packed;
2014
1c361efb
FR
2015#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2016
2017static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
bca03d5f 2018{
b6ffd97f 2019 const struct firmware *fw = rtl_fw->fw;
960aee6c 2020 struct fw_info *fw_info = (struct fw_info *)fw->data;
1c361efb
FR
2021 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2022 char *version = rtl_fw->version;
2023 bool rc = false;
2024
2025 if (fw->size < FW_OPCODE_SIZE)
2026 goto out;
960aee6c
HW
2027
2028 if (!fw_info->magic) {
2029 size_t i, size, start;
2030 u8 checksum = 0;
2031
2032 if (fw->size < sizeof(*fw_info))
2033 goto out;
2034
2035 for (i = 0; i < fw->size; i++)
2036 checksum += fw->data[i];
2037 if (checksum != 0)
2038 goto out;
2039
2040 start = le32_to_cpu(fw_info->fw_start);
2041 if (start > fw->size)
2042 goto out;
2043
2044 size = le32_to_cpu(fw_info->fw_len);
2045 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2046 goto out;
2047
2048 memcpy(version, fw_info->version, RTL_VER_SIZE);
2049
2050 pa->code = (__le32 *)(fw->data + start);
2051 pa->size = size;
2052 } else {
1c361efb
FR
2053 if (fw->size % FW_OPCODE_SIZE)
2054 goto out;
2055
2056 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2057
2058 pa->code = (__le32 *)fw->data;
2059 pa->size = fw->size / FW_OPCODE_SIZE;
2060 }
2061 version[RTL_VER_SIZE - 1] = 0;
2062
2063 rc = true;
2064out:
2065 return rc;
2066}
2067
fd112f2e
FR
2068static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2069 struct rtl_fw_phy_action *pa)
1c361efb 2070{
fd112f2e 2071 bool rc = false;
1c361efb 2072 size_t index;
bca03d5f 2073
1c361efb
FR
2074 for (index = 0; index < pa->size; index++) {
2075 u32 action = le32_to_cpu(pa->code[index]);
42b82dc1 2076 u32 regno = (action & 0x0fff0000) >> 16;
bca03d5f 2077
42b82dc1 2078 switch(action & 0xf0000000) {
2079 case PHY_READ:
2080 case PHY_DATA_OR:
2081 case PHY_DATA_AND:
2082 case PHY_READ_EFUSE:
2083 case PHY_CLEAR_READCOUNT:
2084 case PHY_WRITE:
2085 case PHY_WRITE_PREVIOUS:
2086 case PHY_DELAY_MS:
2087 break;
2088
2089 case PHY_BJMPN:
2090 if (regno > index) {
fd112f2e 2091 netif_err(tp, ifup, tp->dev,
cecb5fd7 2092 "Out of range of firmware\n");
fd112f2e 2093 goto out;
42b82dc1 2094 }
2095 break;
2096 case PHY_READCOUNT_EQ_SKIP:
1c361efb 2097 if (index + 2 >= pa->size) {
fd112f2e 2098 netif_err(tp, ifup, tp->dev,
cecb5fd7 2099 "Out of range of firmware\n");
fd112f2e 2100 goto out;
42b82dc1 2101 }
2102 break;
2103 case PHY_COMP_EQ_SKIPN:
2104 case PHY_COMP_NEQ_SKIPN:
2105 case PHY_SKIPN:
1c361efb 2106 if (index + 1 + regno >= pa->size) {
fd112f2e 2107 netif_err(tp, ifup, tp->dev,
cecb5fd7 2108 "Out of range of firmware\n");
fd112f2e 2109 goto out;
42b82dc1 2110 }
bca03d5f 2111 break;
2112
42b82dc1 2113 case PHY_READ_MAC_BYTE:
2114 case PHY_WRITE_MAC_BYTE:
2115 case PHY_WRITE_ERI_WORD:
2116 default:
fd112f2e 2117 netif_err(tp, ifup, tp->dev,
42b82dc1 2118 "Invalid action 0x%08x\n", action);
fd112f2e 2119 goto out;
bca03d5f 2120 }
2121 }
fd112f2e
FR
2122 rc = true;
2123out:
2124 return rc;
2125}
bca03d5f 2126
fd112f2e
FR
2127static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2128{
2129 struct net_device *dev = tp->dev;
2130 int rc = -EINVAL;
2131
2132 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2133 netif_err(tp, ifup, dev, "invalid firwmare\n");
2134 goto out;
2135 }
2136
2137 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2138 rc = 0;
2139out:
2140 return rc;
2141}
2142
2143static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2144{
2145 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2146 u32 predata, count;
2147 size_t index;
2148
2149 predata = count = 0;
42b82dc1 2150
1c361efb
FR
2151 for (index = 0; index < pa->size; ) {
2152 u32 action = le32_to_cpu(pa->code[index]);
bca03d5f 2153 u32 data = action & 0x0000ffff;
42b82dc1 2154 u32 regno = (action & 0x0fff0000) >> 16;
2155
2156 if (!action)
2157 break;
bca03d5f 2158
2159 switch(action & 0xf0000000) {
42b82dc1 2160 case PHY_READ:
2161 predata = rtl_readphy(tp, regno);
2162 count++;
2163 index++;
2164 break;
2165 case PHY_DATA_OR:
2166 predata |= data;
2167 index++;
2168 break;
2169 case PHY_DATA_AND:
2170 predata &= data;
2171 index++;
2172 break;
2173 case PHY_BJMPN:
2174 index -= regno;
2175 break;
2176 case PHY_READ_EFUSE:
2177 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
2178 index++;
2179 break;
2180 case PHY_CLEAR_READCOUNT:
2181 count = 0;
2182 index++;
2183 break;
bca03d5f 2184 case PHY_WRITE:
42b82dc1 2185 rtl_writephy(tp, regno, data);
2186 index++;
2187 break;
2188 case PHY_READCOUNT_EQ_SKIP:
cecb5fd7 2189 index += (count == data) ? 2 : 1;
bca03d5f 2190 break;
42b82dc1 2191 case PHY_COMP_EQ_SKIPN:
2192 if (predata == data)
2193 index += regno;
2194 index++;
2195 break;
2196 case PHY_COMP_NEQ_SKIPN:
2197 if (predata != data)
2198 index += regno;
2199 index++;
2200 break;
2201 case PHY_WRITE_PREVIOUS:
2202 rtl_writephy(tp, regno, predata);
2203 index++;
2204 break;
2205 case PHY_SKIPN:
2206 index += regno + 1;
2207 break;
2208 case PHY_DELAY_MS:
2209 mdelay(data);
2210 index++;
2211 break;
2212
2213 case PHY_READ_MAC_BYTE:
2214 case PHY_WRITE_MAC_BYTE:
2215 case PHY_WRITE_ERI_WORD:
bca03d5f 2216 default:
2217 BUG();
2218 }
2219 }
2220}
2221
f1e02ed1 2222static void rtl_release_firmware(struct rtl8169_private *tp)
2223{
b6ffd97f
FR
2224 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2225 release_firmware(tp->rtl_fw->fw);
2226 kfree(tp->rtl_fw);
2227 }
2228 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
f1e02ed1 2229}
2230
953a12cc 2231static void rtl_apply_firmware(struct rtl8169_private *tp)
f1e02ed1 2232{
b6ffd97f 2233 struct rtl_fw *rtl_fw = tp->rtl_fw;
f1e02ed1 2234
2235 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
b6ffd97f
FR
2236 if (!IS_ERR_OR_NULL(rtl_fw))
2237 rtl_phy_write_fw(tp, rtl_fw);
953a12cc
FR
2238}
2239
2240static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2241{
2242 if (rtl_readphy(tp, reg) != val)
2243 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2244 else
2245 rtl_apply_firmware(tp);
f1e02ed1 2246}
2247
4da19633 2248static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
1da177e4 2249{
350f7596 2250 static const struct phy_reg phy_reg_init[] = {
0b9b571d 2251 { 0x1f, 0x0001 },
2252 { 0x06, 0x006e },
2253 { 0x08, 0x0708 },
2254 { 0x15, 0x4000 },
2255 { 0x18, 0x65c7 },
1da177e4 2256
0b9b571d 2257 { 0x1f, 0x0001 },
2258 { 0x03, 0x00a1 },
2259 { 0x02, 0x0008 },
2260 { 0x01, 0x0120 },
2261 { 0x00, 0x1000 },
2262 { 0x04, 0x0800 },
2263 { 0x04, 0x0000 },
1da177e4 2264
0b9b571d 2265 { 0x03, 0xff41 },
2266 { 0x02, 0xdf60 },
2267 { 0x01, 0x0140 },
2268 { 0x00, 0x0077 },
2269 { 0x04, 0x7800 },
2270 { 0x04, 0x7000 },
2271
2272 { 0x03, 0x802f },
2273 { 0x02, 0x4f02 },
2274 { 0x01, 0x0409 },
2275 { 0x00, 0xf0f9 },
2276 { 0x04, 0x9800 },
2277 { 0x04, 0x9000 },
2278
2279 { 0x03, 0xdf01 },
2280 { 0x02, 0xdf20 },
2281 { 0x01, 0xff95 },
2282 { 0x00, 0xba00 },
2283 { 0x04, 0xa800 },
2284 { 0x04, 0xa000 },
2285
2286 { 0x03, 0xff41 },
2287 { 0x02, 0xdf20 },
2288 { 0x01, 0x0140 },
2289 { 0x00, 0x00bb },
2290 { 0x04, 0xb800 },
2291 { 0x04, 0xb000 },
2292
2293 { 0x03, 0xdf41 },
2294 { 0x02, 0xdc60 },
2295 { 0x01, 0x6340 },
2296 { 0x00, 0x007d },
2297 { 0x04, 0xd800 },
2298 { 0x04, 0xd000 },
2299
2300 { 0x03, 0xdf01 },
2301 { 0x02, 0xdf20 },
2302 { 0x01, 0x100a },
2303 { 0x00, 0xa0ff },
2304 { 0x04, 0xf800 },
2305 { 0x04, 0xf000 },
2306
2307 { 0x1f, 0x0000 },
2308 { 0x0b, 0x0000 },
2309 { 0x00, 0x9200 }
2310 };
1da177e4 2311
4da19633 2312 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1da177e4
LT
2313}
2314
4da19633 2315static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
5615d9f1 2316{
350f7596 2317 static const struct phy_reg phy_reg_init[] = {
a441d7b6
FR
2318 { 0x1f, 0x0002 },
2319 { 0x01, 0x90d0 },
2320 { 0x1f, 0x0000 }
2321 };
2322
4da19633 2323 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
5615d9f1
FR
2324}
2325
4da19633 2326static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2e955856 2327{
2328 struct pci_dev *pdev = tp->pci_dev;
2e955856 2329
ccbae55e
SS
2330 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2331 (pdev->subsystem_device != 0xe000))
2e955856 2332 return;
2333
4da19633 2334 rtl_writephy(tp, 0x1f, 0x0001);
2335 rtl_writephy(tp, 0x10, 0xf01b);
2336 rtl_writephy(tp, 0x1f, 0x0000);
2e955856 2337}
2338
4da19633 2339static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2e955856 2340{
350f7596 2341 static const struct phy_reg phy_reg_init[] = {
2e955856 2342 { 0x1f, 0x0001 },
2343 { 0x04, 0x0000 },
2344 { 0x03, 0x00a1 },
2345 { 0x02, 0x0008 },
2346 { 0x01, 0x0120 },
2347 { 0x00, 0x1000 },
2348 { 0x04, 0x0800 },
2349 { 0x04, 0x9000 },
2350 { 0x03, 0x802f },
2351 { 0x02, 0x4f02 },
2352 { 0x01, 0x0409 },
2353 { 0x00, 0xf099 },
2354 { 0x04, 0x9800 },
2355 { 0x04, 0xa000 },
2356 { 0x03, 0xdf01 },
2357 { 0x02, 0xdf20 },
2358 { 0x01, 0xff95 },
2359 { 0x00, 0xba00 },
2360 { 0x04, 0xa800 },
2361 { 0x04, 0xf000 },
2362 { 0x03, 0xdf01 },
2363 { 0x02, 0xdf20 },
2364 { 0x01, 0x101a },
2365 { 0x00, 0xa0ff },
2366 { 0x04, 0xf800 },
2367 { 0x04, 0x0000 },
2368 { 0x1f, 0x0000 },
2369
2370 { 0x1f, 0x0001 },
2371 { 0x10, 0xf41b },
2372 { 0x14, 0xfb54 },
2373 { 0x18, 0xf5c7 },
2374 { 0x1f, 0x0000 },
2375
2376 { 0x1f, 0x0001 },
2377 { 0x17, 0x0cc0 },
2378 { 0x1f, 0x0000 }
2379 };
2380
4da19633 2381 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2e955856 2382
4da19633 2383 rtl8169scd_hw_phy_config_quirk(tp);
2e955856 2384}
2385
4da19633 2386static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
8c7006aa 2387{
350f7596 2388 static const struct phy_reg phy_reg_init[] = {
8c7006aa 2389 { 0x1f, 0x0001 },
2390 { 0x04, 0x0000 },
2391 { 0x03, 0x00a1 },
2392 { 0x02, 0x0008 },
2393 { 0x01, 0x0120 },
2394 { 0x00, 0x1000 },
2395 { 0x04, 0x0800 },
2396 { 0x04, 0x9000 },
2397 { 0x03, 0x802f },
2398 { 0x02, 0x4f02 },
2399 { 0x01, 0x0409 },
2400 { 0x00, 0xf099 },
2401 { 0x04, 0x9800 },
2402 { 0x04, 0xa000 },
2403 { 0x03, 0xdf01 },
2404 { 0x02, 0xdf20 },
2405 { 0x01, 0xff95 },
2406 { 0x00, 0xba00 },
2407 { 0x04, 0xa800 },
2408 { 0x04, 0xf000 },
2409 { 0x03, 0xdf01 },
2410 { 0x02, 0xdf20 },
2411 { 0x01, 0x101a },
2412 { 0x00, 0xa0ff },
2413 { 0x04, 0xf800 },
2414 { 0x04, 0x0000 },
2415 { 0x1f, 0x0000 },
2416
2417 { 0x1f, 0x0001 },
2418 { 0x0b, 0x8480 },
2419 { 0x1f, 0x0000 },
2420
2421 { 0x1f, 0x0001 },
2422 { 0x18, 0x67c7 },
2423 { 0x04, 0x2000 },
2424 { 0x03, 0x002f },
2425 { 0x02, 0x4360 },
2426 { 0x01, 0x0109 },
2427 { 0x00, 0x3022 },
2428 { 0x04, 0x2800 },
2429 { 0x1f, 0x0000 },
2430
2431 { 0x1f, 0x0001 },
2432 { 0x17, 0x0cc0 },
2433 { 0x1f, 0x0000 }
2434 };
2435
4da19633 2436 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
8c7006aa 2437}
2438
4da19633 2439static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
236b8082 2440{
350f7596 2441 static const struct phy_reg phy_reg_init[] = {
236b8082
FR
2442 { 0x10, 0xf41b },
2443 { 0x1f, 0x0000 }
2444 };
2445
4da19633 2446 rtl_writephy(tp, 0x1f, 0x0001);
2447 rtl_patchphy(tp, 0x16, 1 << 0);
236b8082 2448
4da19633 2449 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
236b8082
FR
2450}
2451
4da19633 2452static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
236b8082 2453{
350f7596 2454 static const struct phy_reg phy_reg_init[] = {
236b8082
FR
2455 { 0x1f, 0x0001 },
2456 { 0x10, 0xf41b },
2457 { 0x1f, 0x0000 }
2458 };
2459
4da19633 2460 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
236b8082
FR
2461}
2462
4da19633 2463static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
867763c1 2464{
350f7596 2465 static const struct phy_reg phy_reg_init[] = {
867763c1
FR
2466 { 0x1f, 0x0000 },
2467 { 0x1d, 0x0f00 },
2468 { 0x1f, 0x0002 },
2469 { 0x0c, 0x1ec8 },
2470 { 0x1f, 0x0000 }
2471 };
2472
4da19633 2473 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
867763c1
FR
2474}
2475
4da19633 2476static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
ef3386f0 2477{
350f7596 2478 static const struct phy_reg phy_reg_init[] = {
ef3386f0
FR
2479 { 0x1f, 0x0001 },
2480 { 0x1d, 0x3d98 },
2481 { 0x1f, 0x0000 }
2482 };
2483
4da19633 2484 rtl_writephy(tp, 0x1f, 0x0000);
2485 rtl_patchphy(tp, 0x14, 1 << 5);
2486 rtl_patchphy(tp, 0x0d, 1 << 5);
ef3386f0 2487
4da19633 2488 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
ef3386f0
FR
2489}
2490
4da19633 2491static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
867763c1 2492{
350f7596 2493 static const struct phy_reg phy_reg_init[] = {
a3f80671
FR
2494 { 0x1f, 0x0001 },
2495 { 0x12, 0x2300 },
867763c1
FR
2496 { 0x1f, 0x0002 },
2497 { 0x00, 0x88d4 },
2498 { 0x01, 0x82b1 },
2499 { 0x03, 0x7002 },
2500 { 0x08, 0x9e30 },
2501 { 0x09, 0x01f0 },
2502 { 0x0a, 0x5500 },
2503 { 0x0c, 0x00c8 },
2504 { 0x1f, 0x0003 },
2505 { 0x12, 0xc096 },
2506 { 0x16, 0x000a },
f50d4275
FR
2507 { 0x1f, 0x0000 },
2508 { 0x1f, 0x0000 },
2509 { 0x09, 0x2000 },
2510 { 0x09, 0x0000 }
867763c1
FR
2511 };
2512
4da19633 2513 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
f50d4275 2514
4da19633 2515 rtl_patchphy(tp, 0x14, 1 << 5);
2516 rtl_patchphy(tp, 0x0d, 1 << 5);
2517 rtl_writephy(tp, 0x1f, 0x0000);
867763c1
FR
2518}
2519
4da19633 2520static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
7da97ec9 2521{
350f7596 2522 static const struct phy_reg phy_reg_init[] = {
f50d4275 2523 { 0x1f, 0x0001 },
7da97ec9 2524 { 0x12, 0x2300 },
f50d4275
FR
2525 { 0x03, 0x802f },
2526 { 0x02, 0x4f02 },
2527 { 0x01, 0x0409 },
2528 { 0x00, 0xf099 },
2529 { 0x04, 0x9800 },
2530 { 0x04, 0x9000 },
2531 { 0x1d, 0x3d98 },
7da97ec9
FR
2532 { 0x1f, 0x0002 },
2533 { 0x0c, 0x7eb8 },
f50d4275
FR
2534 { 0x06, 0x0761 },
2535 { 0x1f, 0x0003 },
2536 { 0x16, 0x0f0a },
7da97ec9
FR
2537 { 0x1f, 0x0000 }
2538 };
2539
4da19633 2540 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
f50d4275 2541
4da19633 2542 rtl_patchphy(tp, 0x16, 1 << 0);
2543 rtl_patchphy(tp, 0x14, 1 << 5);
2544 rtl_patchphy(tp, 0x0d, 1 << 5);
2545 rtl_writephy(tp, 0x1f, 0x0000);
7da97ec9
FR
2546}
2547
4da19633 2548static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
197ff761 2549{
350f7596 2550 static const struct phy_reg phy_reg_init[] = {
197ff761
FR
2551 { 0x1f, 0x0001 },
2552 { 0x12, 0x2300 },
2553 { 0x1d, 0x3d98 },
2554 { 0x1f, 0x0002 },
2555 { 0x0c, 0x7eb8 },
2556 { 0x06, 0x5461 },
2557 { 0x1f, 0x0003 },
2558 { 0x16, 0x0f0a },
2559 { 0x1f, 0x0000 }
2560 };
2561
4da19633 2562 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
197ff761 2563
4da19633 2564 rtl_patchphy(tp, 0x16, 1 << 0);
2565 rtl_patchphy(tp, 0x14, 1 << 5);
2566 rtl_patchphy(tp, 0x0d, 1 << 5);
2567 rtl_writephy(tp, 0x1f, 0x0000);
197ff761
FR
2568}
2569
4da19633 2570static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
6fb07058 2571{
4da19633 2572 rtl8168c_3_hw_phy_config(tp);
6fb07058
FR
2573}
2574
bca03d5f 2575static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
5b538df9 2576{
350f7596 2577 static const struct phy_reg phy_reg_init_0[] = {
bca03d5f 2578 /* Channel Estimation */
5b538df9 2579 { 0x1f, 0x0001 },
daf9df6d 2580 { 0x06, 0x4064 },
2581 { 0x07, 0x2863 },
2582 { 0x08, 0x059c },
2583 { 0x09, 0x26b4 },
2584 { 0x0a, 0x6a19 },
2585 { 0x0b, 0xdcc8 },
2586 { 0x10, 0xf06d },
2587 { 0x14, 0x7f68 },
2588 { 0x18, 0x7fd9 },
2589 { 0x1c, 0xf0ff },
2590 { 0x1d, 0x3d9c },
5b538df9 2591 { 0x1f, 0x0003 },
daf9df6d 2592 { 0x12, 0xf49f },
2593 { 0x13, 0x070b },
2594 { 0x1a, 0x05ad },
bca03d5f 2595 { 0x14, 0x94c0 },
2596
2597 /*
2598 * Tx Error Issue
cecb5fd7 2599 * Enhance line driver power
bca03d5f 2600 */
5b538df9 2601 { 0x1f, 0x0002 },
daf9df6d 2602 { 0x06, 0x5561 },
2603 { 0x1f, 0x0005 },
2604 { 0x05, 0x8332 },
bca03d5f 2605 { 0x06, 0x5561 },
2606
2607 /*
2608 * Can not link to 1Gbps with bad cable
2609 * Decrease SNR threshold form 21.07dB to 19.04dB
2610 */
2611 { 0x1f, 0x0001 },
2612 { 0x17, 0x0cc0 },
daf9df6d 2613
5b538df9 2614 { 0x1f, 0x0000 },
bca03d5f 2615 { 0x0d, 0xf880 }
daf9df6d 2616 };
bca03d5f 2617 void __iomem *ioaddr = tp->mmio_addr;
daf9df6d 2618
4da19633 2619 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
daf9df6d 2620
bca03d5f 2621 /*
2622 * Rx Error Issue
2623 * Fine Tune Switching regulator parameter
2624 */
4da19633 2625 rtl_writephy(tp, 0x1f, 0x0002);
2626 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2627 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
daf9df6d 2628
daf9df6d 2629 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
350f7596 2630 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2631 { 0x1f, 0x0002 },
2632 { 0x05, 0x669a },
2633 { 0x1f, 0x0005 },
2634 { 0x05, 0x8330 },
2635 { 0x06, 0x669a },
2636 { 0x1f, 0x0002 }
2637 };
2638 int val;
2639
4da19633 2640 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
daf9df6d 2641
4da19633 2642 val = rtl_readphy(tp, 0x0d);
daf9df6d 2643
2644 if ((val & 0x00ff) != 0x006c) {
350f7596 2645 static const u32 set[] = {
daf9df6d 2646 0x0065, 0x0066, 0x0067, 0x0068,
2647 0x0069, 0x006a, 0x006b, 0x006c
2648 };
2649 int i;
2650
4da19633 2651 rtl_writephy(tp, 0x1f, 0x0002);
daf9df6d 2652
2653 val &= 0xff00;
2654 for (i = 0; i < ARRAY_SIZE(set); i++)
4da19633 2655 rtl_writephy(tp, 0x0d, val | set[i]);
daf9df6d 2656 }
2657 } else {
350f7596 2658 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2659 { 0x1f, 0x0002 },
2660 { 0x05, 0x6662 },
2661 { 0x1f, 0x0005 },
2662 { 0x05, 0x8330 },
2663 { 0x06, 0x6662 }
2664 };
2665
4da19633 2666 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
daf9df6d 2667 }
2668
bca03d5f 2669 /* RSET couple improve */
4da19633 2670 rtl_writephy(tp, 0x1f, 0x0002);
2671 rtl_patchphy(tp, 0x0d, 0x0300);
2672 rtl_patchphy(tp, 0x0f, 0x0010);
daf9df6d 2673
bca03d5f 2674 /* Fine tune PLL performance */
4da19633 2675 rtl_writephy(tp, 0x1f, 0x0002);
2676 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2677 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
daf9df6d 2678
4da19633 2679 rtl_writephy(tp, 0x1f, 0x0005);
2680 rtl_writephy(tp, 0x05, 0x001b);
953a12cc
FR
2681
2682 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
bca03d5f 2683
4da19633 2684 rtl_writephy(tp, 0x1f, 0x0000);
daf9df6d 2685}
2686
bca03d5f 2687static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
daf9df6d 2688{
350f7596 2689 static const struct phy_reg phy_reg_init_0[] = {
bca03d5f 2690 /* Channel Estimation */
daf9df6d 2691 { 0x1f, 0x0001 },
2692 { 0x06, 0x4064 },
2693 { 0x07, 0x2863 },
2694 { 0x08, 0x059c },
2695 { 0x09, 0x26b4 },
2696 { 0x0a, 0x6a19 },
2697 { 0x0b, 0xdcc8 },
2698 { 0x10, 0xf06d },
2699 { 0x14, 0x7f68 },
2700 { 0x18, 0x7fd9 },
2701 { 0x1c, 0xf0ff },
2702 { 0x1d, 0x3d9c },
2703 { 0x1f, 0x0003 },
2704 { 0x12, 0xf49f },
2705 { 0x13, 0x070b },
2706 { 0x1a, 0x05ad },
2707 { 0x14, 0x94c0 },
2708
bca03d5f 2709 /*
2710 * Tx Error Issue
cecb5fd7 2711 * Enhance line driver power
bca03d5f 2712 */
daf9df6d 2713 { 0x1f, 0x0002 },
2714 { 0x06, 0x5561 },
2715 { 0x1f, 0x0005 },
2716 { 0x05, 0x8332 },
bca03d5f 2717 { 0x06, 0x5561 },
2718
2719 /*
2720 * Can not link to 1Gbps with bad cable
2721 * Decrease SNR threshold form 21.07dB to 19.04dB
2722 */
2723 { 0x1f, 0x0001 },
2724 { 0x17, 0x0cc0 },
daf9df6d 2725
2726 { 0x1f, 0x0000 },
bca03d5f 2727 { 0x0d, 0xf880 }
5b538df9 2728 };
bca03d5f 2729 void __iomem *ioaddr = tp->mmio_addr;
5b538df9 2730
4da19633 2731 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
5b538df9 2732
daf9df6d 2733 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
350f7596 2734 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2735 { 0x1f, 0x0002 },
2736 { 0x05, 0x669a },
5b538df9 2737 { 0x1f, 0x0005 },
daf9df6d 2738 { 0x05, 0x8330 },
2739 { 0x06, 0x669a },
2740
2741 { 0x1f, 0x0002 }
2742 };
2743 int val;
2744
4da19633 2745 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
daf9df6d 2746
4da19633 2747 val = rtl_readphy(tp, 0x0d);
daf9df6d 2748 if ((val & 0x00ff) != 0x006c) {
b6bc7650 2749 static const u32 set[] = {
daf9df6d 2750 0x0065, 0x0066, 0x0067, 0x0068,
2751 0x0069, 0x006a, 0x006b, 0x006c
2752 };
2753 int i;
2754
4da19633 2755 rtl_writephy(tp, 0x1f, 0x0002);
daf9df6d 2756
2757 val &= 0xff00;
2758 for (i = 0; i < ARRAY_SIZE(set); i++)
4da19633 2759 rtl_writephy(tp, 0x0d, val | set[i]);
daf9df6d 2760 }
2761 } else {
350f7596 2762 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2763 { 0x1f, 0x0002 },
2764 { 0x05, 0x2642 },
5b538df9 2765 { 0x1f, 0x0005 },
daf9df6d 2766 { 0x05, 0x8330 },
2767 { 0x06, 0x2642 }
5b538df9
FR
2768 };
2769
4da19633 2770 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
5b538df9
FR
2771 }
2772
bca03d5f 2773 /* Fine tune PLL performance */
4da19633 2774 rtl_writephy(tp, 0x1f, 0x0002);
2775 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2776 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
daf9df6d 2777
bca03d5f 2778 /* Switching regulator Slew rate */
4da19633 2779 rtl_writephy(tp, 0x1f, 0x0002);
2780 rtl_patchphy(tp, 0x0f, 0x0017);
daf9df6d 2781
4da19633 2782 rtl_writephy(tp, 0x1f, 0x0005);
2783 rtl_writephy(tp, 0x05, 0x001b);
953a12cc
FR
2784
2785 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
bca03d5f 2786
4da19633 2787 rtl_writephy(tp, 0x1f, 0x0000);
daf9df6d 2788}
2789
4da19633 2790static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
daf9df6d 2791{
350f7596 2792 static const struct phy_reg phy_reg_init[] = {
daf9df6d 2793 { 0x1f, 0x0002 },
2794 { 0x10, 0x0008 },
2795 { 0x0d, 0x006c },
2796
2797 { 0x1f, 0x0000 },
2798 { 0x0d, 0xf880 },
2799
2800 { 0x1f, 0x0001 },
2801 { 0x17, 0x0cc0 },
2802
2803 { 0x1f, 0x0001 },
2804 { 0x0b, 0xa4d8 },
2805 { 0x09, 0x281c },
2806 { 0x07, 0x2883 },
2807 { 0x0a, 0x6b35 },
2808 { 0x1d, 0x3da4 },
2809 { 0x1c, 0xeffd },
2810 { 0x14, 0x7f52 },
2811 { 0x18, 0x7fc6 },
2812 { 0x08, 0x0601 },
2813 { 0x06, 0x4063 },
2814 { 0x10, 0xf074 },
2815 { 0x1f, 0x0003 },
2816 { 0x13, 0x0789 },
2817 { 0x12, 0xf4bd },
2818 { 0x1a, 0x04fd },
2819 { 0x14, 0x84b0 },
2820 { 0x1f, 0x0000 },
2821 { 0x00, 0x9200 },
2822
2823 { 0x1f, 0x0005 },
2824 { 0x01, 0x0340 },
2825 { 0x1f, 0x0001 },
2826 { 0x04, 0x4000 },
2827 { 0x03, 0x1d21 },
2828 { 0x02, 0x0c32 },
2829 { 0x01, 0x0200 },
2830 { 0x00, 0x5554 },
2831 { 0x04, 0x4800 },
2832 { 0x04, 0x4000 },
2833 { 0x04, 0xf000 },
2834 { 0x03, 0xdf01 },
2835 { 0x02, 0xdf20 },
2836 { 0x01, 0x101a },
2837 { 0x00, 0xa0ff },
2838 { 0x04, 0xf800 },
2839 { 0x04, 0xf000 },
2840 { 0x1f, 0x0000 },
2841
2842 { 0x1f, 0x0007 },
2843 { 0x1e, 0x0023 },
2844 { 0x16, 0x0000 },
2845 { 0x1f, 0x0000 }
2846 };
2847
4da19633 2848 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
5b538df9
FR
2849}
2850
e6de30d6 2851static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2852{
2853 static const struct phy_reg phy_reg_init[] = {
2854 { 0x1f, 0x0001 },
2855 { 0x17, 0x0cc0 },
2856
2857 { 0x1f, 0x0007 },
2858 { 0x1e, 0x002d },
2859 { 0x18, 0x0040 },
2860 { 0x1f, 0x0000 }
2861 };
2862
2863 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2864 rtl_patchphy(tp, 0x0d, 1 << 5);
2865}
2866
70090424 2867static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
01dc7fec 2868{
2869 static const struct phy_reg phy_reg_init[] = {
2870 /* Enable Delay cap */
2871 { 0x1f, 0x0005 },
2872 { 0x05, 0x8b80 },
2873 { 0x06, 0xc896 },
2874 { 0x1f, 0x0000 },
2875
2876 /* Channel estimation fine tune */
2877 { 0x1f, 0x0001 },
2878 { 0x0b, 0x6c20 },
2879 { 0x07, 0x2872 },
2880 { 0x1c, 0xefff },
2881 { 0x1f, 0x0003 },
2882 { 0x14, 0x6420 },
2883 { 0x1f, 0x0000 },
2884
2885 /* Update PFM & 10M TX idle timer */
2886 { 0x1f, 0x0007 },
2887 { 0x1e, 0x002f },
2888 { 0x15, 0x1919 },
2889 { 0x1f, 0x0000 },
2890
2891 { 0x1f, 0x0007 },
2892 { 0x1e, 0x00ac },
2893 { 0x18, 0x0006 },
2894 { 0x1f, 0x0000 }
2895 };
2896
15ecd039
FR
2897 rtl_apply_firmware(tp);
2898
01dc7fec 2899 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2900
2901 /* DCO enable for 10M IDLE Power */
2902 rtl_writephy(tp, 0x1f, 0x0007);
2903 rtl_writephy(tp, 0x1e, 0x0023);
2904 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2905 rtl_writephy(tp, 0x1f, 0x0000);
2906
2907 /* For impedance matching */
2908 rtl_writephy(tp, 0x1f, 0x0002);
2909 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
cecb5fd7 2910 rtl_writephy(tp, 0x1f, 0x0000);
01dc7fec 2911
2912 /* PHY auto speed down */
2913 rtl_writephy(tp, 0x1f, 0x0007);
2914 rtl_writephy(tp, 0x1e, 0x002d);
2915 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2916 rtl_writephy(tp, 0x1f, 0x0000);
2917 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2918
2919 rtl_writephy(tp, 0x1f, 0x0005);
2920 rtl_writephy(tp, 0x05, 0x8b86);
2921 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2922 rtl_writephy(tp, 0x1f, 0x0000);
2923
2924 rtl_writephy(tp, 0x1f, 0x0005);
2925 rtl_writephy(tp, 0x05, 0x8b85);
2926 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2927 rtl_writephy(tp, 0x1f, 0x0007);
2928 rtl_writephy(tp, 0x1e, 0x0020);
2929 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2930 rtl_writephy(tp, 0x1f, 0x0006);
2931 rtl_writephy(tp, 0x00, 0x5a00);
2932 rtl_writephy(tp, 0x1f, 0x0000);
2933 rtl_writephy(tp, 0x0d, 0x0007);
2934 rtl_writephy(tp, 0x0e, 0x003c);
2935 rtl_writephy(tp, 0x0d, 0x4007);
2936 rtl_writephy(tp, 0x0e, 0x0000);
2937 rtl_writephy(tp, 0x0d, 0x0000);
2938}
2939
70090424
HW
2940static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2941{
2942 static const struct phy_reg phy_reg_init[] = {
2943 /* Enable Delay cap */
2944 { 0x1f, 0x0004 },
2945 { 0x1f, 0x0007 },
2946 { 0x1e, 0x00ac },
2947 { 0x18, 0x0006 },
2948 { 0x1f, 0x0002 },
2949 { 0x1f, 0x0000 },
2950 { 0x1f, 0x0000 },
2951
2952 /* Channel estimation fine tune */
2953 { 0x1f, 0x0003 },
2954 { 0x09, 0xa20f },
2955 { 0x1f, 0x0000 },
2956 { 0x1f, 0x0000 },
2957
2958 /* Green Setting */
2959 { 0x1f, 0x0005 },
2960 { 0x05, 0x8b5b },
2961 { 0x06, 0x9222 },
2962 { 0x05, 0x8b6d },
2963 { 0x06, 0x8000 },
2964 { 0x05, 0x8b76 },
2965 { 0x06, 0x8000 },
2966 { 0x1f, 0x0000 }
2967 };
2968
2969 rtl_apply_firmware(tp);
2970
2971 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2972
2973 /* For 4-corner performance improve */
2974 rtl_writephy(tp, 0x1f, 0x0005);
2975 rtl_writephy(tp, 0x05, 0x8b80);
2976 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2977 rtl_writephy(tp, 0x1f, 0x0000);
2978
2979 /* PHY auto speed down */
2980 rtl_writephy(tp, 0x1f, 0x0004);
2981 rtl_writephy(tp, 0x1f, 0x0007);
2982 rtl_writephy(tp, 0x1e, 0x002d);
2983 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
2984 rtl_writephy(tp, 0x1f, 0x0002);
2985 rtl_writephy(tp, 0x1f, 0x0000);
2986 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2987
2988 /* improve 10M EEE waveform */
2989 rtl_writephy(tp, 0x1f, 0x0005);
2990 rtl_writephy(tp, 0x05, 0x8b86);
2991 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2992 rtl_writephy(tp, 0x1f, 0x0000);
2993
2994 /* Improve 2-pair detection performance */
2995 rtl_writephy(tp, 0x1f, 0x0005);
2996 rtl_writephy(tp, 0x05, 0x8b85);
2997 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
2998 rtl_writephy(tp, 0x1f, 0x0000);
2999
3000 /* EEE setting */
3001 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
3002 ERIAR_EXGMAC);
3003 rtl_writephy(tp, 0x1f, 0x0005);
3004 rtl_writephy(tp, 0x05, 0x8b85);
3005 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3006 rtl_writephy(tp, 0x1f, 0x0004);
3007 rtl_writephy(tp, 0x1f, 0x0007);
3008 rtl_writephy(tp, 0x1e, 0x0020);
1b23a3e3 3009 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
70090424
HW
3010 rtl_writephy(tp, 0x1f, 0x0002);
3011 rtl_writephy(tp, 0x1f, 0x0000);
3012 rtl_writephy(tp, 0x0d, 0x0007);
3013 rtl_writephy(tp, 0x0e, 0x003c);
3014 rtl_writephy(tp, 0x0d, 0x4007);
3015 rtl_writephy(tp, 0x0e, 0x0000);
3016 rtl_writephy(tp, 0x0d, 0x0000);
3017
3018 /* Green feature */
3019 rtl_writephy(tp, 0x1f, 0x0003);
3020 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3021 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3022 rtl_writephy(tp, 0x1f, 0x0000);
3023}
3024
c2218925
HW
3025static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3026{
3027 static const struct phy_reg phy_reg_init[] = {
3028 /* Channel estimation fine tune */
3029 { 0x1f, 0x0003 },
3030 { 0x09, 0xa20f },
3031 { 0x1f, 0x0000 },
3032
3033 /* Modify green table for giga & fnet */
3034 { 0x1f, 0x0005 },
3035 { 0x05, 0x8b55 },
3036 { 0x06, 0x0000 },
3037 { 0x05, 0x8b5e },
3038 { 0x06, 0x0000 },
3039 { 0x05, 0x8b67 },
3040 { 0x06, 0x0000 },
3041 { 0x05, 0x8b70 },
3042 { 0x06, 0x0000 },
3043 { 0x1f, 0x0000 },
3044 { 0x1f, 0x0007 },
3045 { 0x1e, 0x0078 },
3046 { 0x17, 0x0000 },
3047 { 0x19, 0x00fb },
3048 { 0x1f, 0x0000 },
3049
3050 /* Modify green table for 10M */
3051 { 0x1f, 0x0005 },
3052 { 0x05, 0x8b79 },
3053 { 0x06, 0xaa00 },
3054 { 0x1f, 0x0000 },
3055
3056 /* Disable hiimpedance detection (RTCT) */
3057 { 0x1f, 0x0003 },
3058 { 0x01, 0x328a },
3059 { 0x1f, 0x0000 }
3060 };
3061
3062 rtl_apply_firmware(tp);
3063
3064 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3065
3066 /* For 4-corner performance improve */
3067 rtl_writephy(tp, 0x1f, 0x0005);
3068 rtl_writephy(tp, 0x05, 0x8b80);
3069 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3070 rtl_writephy(tp, 0x1f, 0x0000);
3071
3072 /* PHY auto speed down */
3073 rtl_writephy(tp, 0x1f, 0x0007);
3074 rtl_writephy(tp, 0x1e, 0x002d);
3075 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3076 rtl_writephy(tp, 0x1f, 0x0000);
3077 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3078
3079 /* Improve 10M EEE waveform */
3080 rtl_writephy(tp, 0x1f, 0x0005);
3081 rtl_writephy(tp, 0x05, 0x8b86);
3082 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3083 rtl_writephy(tp, 0x1f, 0x0000);
3084
3085 /* Improve 2-pair detection performance */
3086 rtl_writephy(tp, 0x1f, 0x0005);
3087 rtl_writephy(tp, 0x05, 0x8b85);
3088 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3089 rtl_writephy(tp, 0x1f, 0x0000);
3090}
3091
3092static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3093{
3094 rtl_apply_firmware(tp);
3095
3096 /* For 4-corner performance improve */
3097 rtl_writephy(tp, 0x1f, 0x0005);
3098 rtl_writephy(tp, 0x05, 0x8b80);
3099 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3100 rtl_writephy(tp, 0x1f, 0x0000);
3101
3102 /* PHY auto speed down */
3103 rtl_writephy(tp, 0x1f, 0x0007);
3104 rtl_writephy(tp, 0x1e, 0x002d);
3105 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3106 rtl_writephy(tp, 0x1f, 0x0000);
3107 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3108
3109 /* Improve 10M EEE waveform */
3110 rtl_writephy(tp, 0x1f, 0x0005);
3111 rtl_writephy(tp, 0x05, 0x8b86);
3112 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3113 rtl_writephy(tp, 0x1f, 0x0000);
3114}
3115
4da19633 3116static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2857ffb7 3117{
350f7596 3118 static const struct phy_reg phy_reg_init[] = {
2857ffb7
FR
3119 { 0x1f, 0x0003 },
3120 { 0x08, 0x441d },
3121 { 0x01, 0x9100 },
3122 { 0x1f, 0x0000 }
3123 };
3124
4da19633 3125 rtl_writephy(tp, 0x1f, 0x0000);
3126 rtl_patchphy(tp, 0x11, 1 << 12);
3127 rtl_patchphy(tp, 0x19, 1 << 13);
3128 rtl_patchphy(tp, 0x10, 1 << 15);
2857ffb7 3129
4da19633 3130 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2857ffb7
FR
3131}
3132
5a5e4443
HW
3133static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3134{
3135 static const struct phy_reg phy_reg_init[] = {
3136 { 0x1f, 0x0005 },
3137 { 0x1a, 0x0000 },
3138 { 0x1f, 0x0000 },
3139
3140 { 0x1f, 0x0004 },
3141 { 0x1c, 0x0000 },
3142 { 0x1f, 0x0000 },
3143
3144 { 0x1f, 0x0001 },
3145 { 0x15, 0x7701 },
3146 { 0x1f, 0x0000 }
3147 };
3148
3149 /* Disable ALDPS before ram code */
3150 rtl_writephy(tp, 0x1f, 0x0000);
3151 rtl_writephy(tp, 0x18, 0x0310);
3152 msleep(100);
3153
953a12cc 3154 rtl_apply_firmware(tp);
5a5e4443
HW
3155
3156 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3157}
3158
7e18dca1
HW
3159static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3160{
3161 void __iomem *ioaddr = tp->mmio_addr;
3162
3163 /* Disable ALDPS before setting firmware */
3164 rtl_writephy(tp, 0x1f, 0x0000);
3165 rtl_writephy(tp, 0x18, 0x0310);
3166 msleep(20);
3167
3168 rtl_apply_firmware(tp);
3169
3170 /* EEE setting */
3171 rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3172 rtl_writephy(tp, 0x1f, 0x0004);
3173 rtl_writephy(tp, 0x10, 0x401f);
3174 rtl_writephy(tp, 0x19, 0x7030);
3175 rtl_writephy(tp, 0x1f, 0x0000);
3176}
3177
5615d9f1
FR
3178static void rtl_hw_phy_config(struct net_device *dev)
3179{
3180 struct rtl8169_private *tp = netdev_priv(dev);
5615d9f1
FR
3181
3182 rtl8169_print_mac_version(tp);
3183
3184 switch (tp->mac_version) {
3185 case RTL_GIGA_MAC_VER_01:
3186 break;
3187 case RTL_GIGA_MAC_VER_02:
3188 case RTL_GIGA_MAC_VER_03:
4da19633 3189 rtl8169s_hw_phy_config(tp);
5615d9f1
FR
3190 break;
3191 case RTL_GIGA_MAC_VER_04:
4da19633 3192 rtl8169sb_hw_phy_config(tp);
5615d9f1 3193 break;
2e955856 3194 case RTL_GIGA_MAC_VER_05:
4da19633 3195 rtl8169scd_hw_phy_config(tp);
2e955856 3196 break;
8c7006aa 3197 case RTL_GIGA_MAC_VER_06:
4da19633 3198 rtl8169sce_hw_phy_config(tp);
8c7006aa 3199 break;
2857ffb7
FR
3200 case RTL_GIGA_MAC_VER_07:
3201 case RTL_GIGA_MAC_VER_08:
3202 case RTL_GIGA_MAC_VER_09:
4da19633 3203 rtl8102e_hw_phy_config(tp);
2857ffb7 3204 break;
236b8082 3205 case RTL_GIGA_MAC_VER_11:
4da19633 3206 rtl8168bb_hw_phy_config(tp);
236b8082
FR
3207 break;
3208 case RTL_GIGA_MAC_VER_12:
4da19633 3209 rtl8168bef_hw_phy_config(tp);
236b8082
FR
3210 break;
3211 case RTL_GIGA_MAC_VER_17:
4da19633 3212 rtl8168bef_hw_phy_config(tp);
236b8082 3213 break;
867763c1 3214 case RTL_GIGA_MAC_VER_18:
4da19633 3215 rtl8168cp_1_hw_phy_config(tp);
867763c1
FR
3216 break;
3217 case RTL_GIGA_MAC_VER_19:
4da19633 3218 rtl8168c_1_hw_phy_config(tp);
867763c1 3219 break;
7da97ec9 3220 case RTL_GIGA_MAC_VER_20:
4da19633 3221 rtl8168c_2_hw_phy_config(tp);
7da97ec9 3222 break;
197ff761 3223 case RTL_GIGA_MAC_VER_21:
4da19633 3224 rtl8168c_3_hw_phy_config(tp);
197ff761 3225 break;
6fb07058 3226 case RTL_GIGA_MAC_VER_22:
4da19633 3227 rtl8168c_4_hw_phy_config(tp);
6fb07058 3228 break;
ef3386f0 3229 case RTL_GIGA_MAC_VER_23:
7f3e3d3a 3230 case RTL_GIGA_MAC_VER_24:
4da19633 3231 rtl8168cp_2_hw_phy_config(tp);
ef3386f0 3232 break;
5b538df9 3233 case RTL_GIGA_MAC_VER_25:
bca03d5f 3234 rtl8168d_1_hw_phy_config(tp);
daf9df6d 3235 break;
3236 case RTL_GIGA_MAC_VER_26:
bca03d5f 3237 rtl8168d_2_hw_phy_config(tp);
daf9df6d 3238 break;
3239 case RTL_GIGA_MAC_VER_27:
4da19633 3240 rtl8168d_3_hw_phy_config(tp);
5b538df9 3241 break;
e6de30d6 3242 case RTL_GIGA_MAC_VER_28:
3243 rtl8168d_4_hw_phy_config(tp);
3244 break;
5a5e4443
HW
3245 case RTL_GIGA_MAC_VER_29:
3246 case RTL_GIGA_MAC_VER_30:
3247 rtl8105e_hw_phy_config(tp);
3248 break;
cecb5fd7
FR
3249 case RTL_GIGA_MAC_VER_31:
3250 /* None. */
3251 break;
01dc7fec 3252 case RTL_GIGA_MAC_VER_32:
01dc7fec 3253 case RTL_GIGA_MAC_VER_33:
70090424
HW
3254 rtl8168e_1_hw_phy_config(tp);
3255 break;
3256 case RTL_GIGA_MAC_VER_34:
3257 rtl8168e_2_hw_phy_config(tp);
01dc7fec 3258 break;
c2218925
HW
3259 case RTL_GIGA_MAC_VER_35:
3260 rtl8168f_1_hw_phy_config(tp);
3261 break;
3262 case RTL_GIGA_MAC_VER_36:
3263 rtl8168f_2_hw_phy_config(tp);
3264 break;
ef3386f0 3265
7e18dca1
HW
3266 case RTL_GIGA_MAC_VER_37:
3267 rtl8402_hw_phy_config(tp);
3268 break;
3269
5615d9f1
FR
3270 default:
3271 break;
3272 }
3273}
3274
da78dbff 3275static void rtl_phy_work(struct rtl8169_private *tp)
1da177e4 3276{
1da177e4
LT
3277 struct timer_list *timer = &tp->timer;
3278 void __iomem *ioaddr = tp->mmio_addr;
3279 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3280
bcf0bf90 3281 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4 3282
4da19633 3283 if (tp->phy_reset_pending(tp)) {
5b0384f4 3284 /*
1da177e4
LT
3285 * A busy loop could burn quite a few cycles on nowadays CPU.
3286 * Let's delay the execution of the timer for a few ticks.
3287 */
3288 timeout = HZ/10;
3289 goto out_mod_timer;
3290 }
3291
3292 if (tp->link_ok(ioaddr))
da78dbff 3293 return;
1da177e4 3294
da78dbff 3295 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
1da177e4 3296
4da19633 3297 tp->phy_reset_enable(tp);
1da177e4
LT
3298
3299out_mod_timer:
3300 mod_timer(timer, jiffies + timeout);
da78dbff
FR
3301}
3302
3303static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3304{
da78dbff
FR
3305 if (!test_and_set_bit(flag, tp->wk.flags))
3306 schedule_work(&tp->wk.work);
da78dbff
FR
3307}
3308
3309static void rtl8169_phy_timer(unsigned long __opaque)
3310{
3311 struct net_device *dev = (struct net_device *)__opaque;
3312 struct rtl8169_private *tp = netdev_priv(dev);
3313
98ddf986 3314 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
1da177e4
LT
3315}
3316
1da177e4
LT
3317static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3318 void __iomem *ioaddr)
3319{
3320 iounmap(ioaddr);
3321 pci_release_regions(pdev);
87aeec76 3322 pci_clear_mwi(pdev);
1da177e4
LT
3323 pci_disable_device(pdev);
3324 free_netdev(dev);
3325}
3326
bf793295
FR
3327static void rtl8169_phy_reset(struct net_device *dev,
3328 struct rtl8169_private *tp)
3329{
07d3f51f 3330 unsigned int i;
bf793295 3331
4da19633 3332 tp->phy_reset_enable(tp);
bf793295 3333 for (i = 0; i < 100; i++) {
4da19633 3334 if (!tp->phy_reset_pending(tp))
bf793295
FR
3335 return;
3336 msleep(1);
3337 }
bf82c189 3338 netif_err(tp, link, dev, "PHY reset failed\n");
bf793295
FR
3339}
3340
2544bfc0
FR
3341static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3342{
3343 void __iomem *ioaddr = tp->mmio_addr;
3344
3345 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3346 (RTL_R8(PHYstatus) & TBI_Enable);
3347}
3348
4ff96fa6
FR
3349static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3350{
3351 void __iomem *ioaddr = tp->mmio_addr;
4ff96fa6 3352
5615d9f1 3353 rtl_hw_phy_config(dev);
4ff96fa6 3354
77332894
MS
3355 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3356 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3357 RTL_W8(0x82, 0x01);
3358 }
4ff96fa6 3359
6dccd16b
FR
3360 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3361
3362 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3363 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
4ff96fa6 3364
bcf0bf90 3365 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
3366 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3367 RTL_W8(0x82, 0x01);
3368 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
4da19633 3369 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
4ff96fa6
FR
3370 }
3371
bf793295
FR
3372 rtl8169_phy_reset(dev, tp);
3373
54405cde 3374 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
cecb5fd7
FR
3375 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3376 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3377 (tp->mii.supports_gmii ?
3378 ADVERTISED_1000baseT_Half |
3379 ADVERTISED_1000baseT_Full : 0));
4ff96fa6 3380
2544bfc0 3381 if (rtl_tbi_enabled(tp))
bf82c189 3382 netif_info(tp, link, dev, "TBI auto-negotiating\n");
4ff96fa6
FR
3383}
3384
773d2021
FR
3385static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3386{
3387 void __iomem *ioaddr = tp->mmio_addr;
3388 u32 high;
3389 u32 low;
3390
3391 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3392 high = addr[4] | (addr[5] << 8);
3393
da78dbff 3394 rtl_lock_work(tp);
773d2021
FR
3395
3396 RTL_W8(Cfg9346, Cfg9346_Unlock);
908ba2bf 3397
773d2021 3398 RTL_W32(MAC4, high);
908ba2bf 3399 RTL_R32(MAC4);
3400
78f1cd02 3401 RTL_W32(MAC0, low);
908ba2bf 3402 RTL_R32(MAC0);
3403
c28aa385 3404 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3405 const struct exgmac_reg e[] = {
3406 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3407 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3408 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3409 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3410 low >> 16 },
3411 };
3412
3413 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
3414 }
3415
773d2021
FR
3416 RTL_W8(Cfg9346, Cfg9346_Lock);
3417
da78dbff 3418 rtl_unlock_work(tp);
773d2021
FR
3419}
3420
3421static int rtl_set_mac_address(struct net_device *dev, void *p)
3422{
3423 struct rtl8169_private *tp = netdev_priv(dev);
3424 struct sockaddr *addr = p;
3425
3426 if (!is_valid_ether_addr(addr->sa_data))
3427 return -EADDRNOTAVAIL;
3428
3429 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3430
3431 rtl_rar_set(tp, dev->dev_addr);
3432
3433 return 0;
3434}
3435
5f787a1a
FR
3436static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3437{
3438 struct rtl8169_private *tp = netdev_priv(dev);
3439 struct mii_ioctl_data *data = if_mii(ifr);
3440
8b4ab28d
FR
3441 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3442}
5f787a1a 3443
cecb5fd7
FR
3444static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3445 struct mii_ioctl_data *data, int cmd)
8b4ab28d 3446{
5f787a1a
FR
3447 switch (cmd) {
3448 case SIOCGMIIPHY:
3449 data->phy_id = 32; /* Internal PHY */
3450 return 0;
3451
3452 case SIOCGMIIREG:
4da19633 3453 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
5f787a1a
FR
3454 return 0;
3455
3456 case SIOCSMIIREG:
4da19633 3457 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
5f787a1a
FR
3458 return 0;
3459 }
3460 return -EOPNOTSUPP;
3461}
3462
8b4ab28d
FR
3463static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3464{
3465 return -EOPNOTSUPP;
3466}
3467
fbac58fc
FR
3468static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3469{
3470 if (tp->features & RTL_FEATURE_MSI) {
3471 pci_disable_msi(pdev);
3472 tp->features &= ~RTL_FEATURE_MSI;
3473 }
3474}
3475
c0e45c1c 3476static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3477{
3478 struct mdio_ops *ops = &tp->mdio_ops;
3479
3480 switch (tp->mac_version) {
3481 case RTL_GIGA_MAC_VER_27:
3482 ops->write = r8168dp_1_mdio_write;
3483 ops->read = r8168dp_1_mdio_read;
3484 break;
e6de30d6 3485 case RTL_GIGA_MAC_VER_28:
4804b3b3 3486 case RTL_GIGA_MAC_VER_31:
e6de30d6 3487 ops->write = r8168dp_2_mdio_write;
3488 ops->read = r8168dp_2_mdio_read;
3489 break;
c0e45c1c 3490 default:
3491 ops->write = r8169_mdio_write;
3492 ops->read = r8169_mdio_read;
3493 break;
3494 }
3495}
3496
649b3b8c 3497static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3498{
3499 void __iomem *ioaddr = tp->mmio_addr;
3500
3501 switch (tp->mac_version) {
3502 case RTL_GIGA_MAC_VER_29:
3503 case RTL_GIGA_MAC_VER_30:
3504 case RTL_GIGA_MAC_VER_32:
3505 case RTL_GIGA_MAC_VER_33:
3506 case RTL_GIGA_MAC_VER_34:
7e18dca1 3507 case RTL_GIGA_MAC_VER_37:
649b3b8c 3508 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3509 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3510 break;
3511 default:
3512 break;
3513 }
3514}
3515
3516static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3517{
3518 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3519 return false;
3520
3521 rtl_writephy(tp, 0x1f, 0x0000);
3522 rtl_writephy(tp, MII_BMCR, 0x0000);
3523
3524 rtl_wol_suspend_quirk(tp);
3525
3526 return true;
3527}
3528
065c27c1 3529static void r810x_phy_power_down(struct rtl8169_private *tp)
3530{
3531 rtl_writephy(tp, 0x1f, 0x0000);
3532 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3533}
3534
3535static void r810x_phy_power_up(struct rtl8169_private *tp)
3536{
3537 rtl_writephy(tp, 0x1f, 0x0000);
3538 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3539}
3540
3541static void r810x_pll_power_down(struct rtl8169_private *tp)
3542{
0004299a
HW
3543 void __iomem *ioaddr = tp->mmio_addr;
3544
649b3b8c 3545 if (rtl_wol_pll_power_down(tp))
065c27c1 3546 return;
065c27c1 3547
3548 r810x_phy_power_down(tp);
0004299a
HW
3549
3550 switch (tp->mac_version) {
3551 case RTL_GIGA_MAC_VER_07:
3552 case RTL_GIGA_MAC_VER_08:
3553 case RTL_GIGA_MAC_VER_09:
3554 case RTL_GIGA_MAC_VER_10:
3555 case RTL_GIGA_MAC_VER_13:
3556 case RTL_GIGA_MAC_VER_16:
3557 break;
3558 default:
3559 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3560 break;
3561 }
065c27c1 3562}
3563
3564static void r810x_pll_power_up(struct rtl8169_private *tp)
3565{
0004299a
HW
3566 void __iomem *ioaddr = tp->mmio_addr;
3567
065c27c1 3568 r810x_phy_power_up(tp);
0004299a
HW
3569
3570 switch (tp->mac_version) {
3571 case RTL_GIGA_MAC_VER_07:
3572 case RTL_GIGA_MAC_VER_08:
3573 case RTL_GIGA_MAC_VER_09:
3574 case RTL_GIGA_MAC_VER_10:
3575 case RTL_GIGA_MAC_VER_13:
3576 case RTL_GIGA_MAC_VER_16:
3577 break;
3578 default:
3579 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3580 break;
3581 }
065c27c1 3582}
3583
3584static void r8168_phy_power_up(struct rtl8169_private *tp)
3585{
3586 rtl_writephy(tp, 0x1f, 0x0000);
01dc7fec 3587 switch (tp->mac_version) {
3588 case RTL_GIGA_MAC_VER_11:
3589 case RTL_GIGA_MAC_VER_12:
3590 case RTL_GIGA_MAC_VER_17:
3591 case RTL_GIGA_MAC_VER_18:
3592 case RTL_GIGA_MAC_VER_19:
3593 case RTL_GIGA_MAC_VER_20:
3594 case RTL_GIGA_MAC_VER_21:
3595 case RTL_GIGA_MAC_VER_22:
3596 case RTL_GIGA_MAC_VER_23:
3597 case RTL_GIGA_MAC_VER_24:
3598 case RTL_GIGA_MAC_VER_25:
3599 case RTL_GIGA_MAC_VER_26:
3600 case RTL_GIGA_MAC_VER_27:
3601 case RTL_GIGA_MAC_VER_28:
3602 case RTL_GIGA_MAC_VER_31:
3603 rtl_writephy(tp, 0x0e, 0x0000);
3604 break;
3605 default:
3606 break;
3607 }
065c27c1 3608 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3609}
3610
3611static void r8168_phy_power_down(struct rtl8169_private *tp)
3612{
3613 rtl_writephy(tp, 0x1f, 0x0000);
01dc7fec 3614 switch (tp->mac_version) {
3615 case RTL_GIGA_MAC_VER_32:
3616 case RTL_GIGA_MAC_VER_33:
3617 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3618 break;
3619
3620 case RTL_GIGA_MAC_VER_11:
3621 case RTL_GIGA_MAC_VER_12:
3622 case RTL_GIGA_MAC_VER_17:
3623 case RTL_GIGA_MAC_VER_18:
3624 case RTL_GIGA_MAC_VER_19:
3625 case RTL_GIGA_MAC_VER_20:
3626 case RTL_GIGA_MAC_VER_21:
3627 case RTL_GIGA_MAC_VER_22:
3628 case RTL_GIGA_MAC_VER_23:
3629 case RTL_GIGA_MAC_VER_24:
3630 case RTL_GIGA_MAC_VER_25:
3631 case RTL_GIGA_MAC_VER_26:
3632 case RTL_GIGA_MAC_VER_27:
3633 case RTL_GIGA_MAC_VER_28:
3634 case RTL_GIGA_MAC_VER_31:
3635 rtl_writephy(tp, 0x0e, 0x0200);
3636 default:
3637 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3638 break;
3639 }
065c27c1 3640}
3641
3642static void r8168_pll_power_down(struct rtl8169_private *tp)
3643{
3644 void __iomem *ioaddr = tp->mmio_addr;
3645
cecb5fd7
FR
3646 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3647 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3648 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4804b3b3 3649 r8168dp_check_dash(tp)) {
065c27c1 3650 return;
5d2e1957 3651 }
065c27c1 3652
cecb5fd7
FR
3653 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3654 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
065c27c1 3655 (RTL_R16(CPlusCmd) & ASF)) {
3656 return;
3657 }
3658
01dc7fec 3659 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3660 tp->mac_version == RTL_GIGA_MAC_VER_33)
3661 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3662
649b3b8c 3663 if (rtl_wol_pll_power_down(tp))
065c27c1 3664 return;
065c27c1 3665
3666 r8168_phy_power_down(tp);
3667
3668 switch (tp->mac_version) {
3669 case RTL_GIGA_MAC_VER_25:
3670 case RTL_GIGA_MAC_VER_26:
5d2e1957
HW
3671 case RTL_GIGA_MAC_VER_27:
3672 case RTL_GIGA_MAC_VER_28:
4804b3b3 3673 case RTL_GIGA_MAC_VER_31:
01dc7fec 3674 case RTL_GIGA_MAC_VER_32:
3675 case RTL_GIGA_MAC_VER_33:
065c27c1 3676 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3677 break;
3678 }
3679}
3680
3681static void r8168_pll_power_up(struct rtl8169_private *tp)
3682{
3683 void __iomem *ioaddr = tp->mmio_addr;
3684
065c27c1 3685 switch (tp->mac_version) {
3686 case RTL_GIGA_MAC_VER_25:
3687 case RTL_GIGA_MAC_VER_26:
5d2e1957
HW
3688 case RTL_GIGA_MAC_VER_27:
3689 case RTL_GIGA_MAC_VER_28:
4804b3b3 3690 case RTL_GIGA_MAC_VER_31:
01dc7fec 3691 case RTL_GIGA_MAC_VER_32:
3692 case RTL_GIGA_MAC_VER_33:
065c27c1 3693 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3694 break;
3695 }
3696
3697 r8168_phy_power_up(tp);
3698}
3699
d58d46b5
FR
3700static void rtl_generic_op(struct rtl8169_private *tp,
3701 void (*op)(struct rtl8169_private *))
065c27c1 3702{
3703 if (op)
3704 op(tp);
3705}
3706
3707static void rtl_pll_power_down(struct rtl8169_private *tp)
3708{
d58d46b5 3709 rtl_generic_op(tp, tp->pll_power_ops.down);
065c27c1 3710}
3711
3712static void rtl_pll_power_up(struct rtl8169_private *tp)
3713{
d58d46b5 3714 rtl_generic_op(tp, tp->pll_power_ops.up);
065c27c1 3715}
3716
3717static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3718{
3719 struct pll_power_ops *ops = &tp->pll_power_ops;
3720
3721 switch (tp->mac_version) {
3722 case RTL_GIGA_MAC_VER_07:
3723 case RTL_GIGA_MAC_VER_08:
3724 case RTL_GIGA_MAC_VER_09:
3725 case RTL_GIGA_MAC_VER_10:
3726 case RTL_GIGA_MAC_VER_16:
5a5e4443
HW
3727 case RTL_GIGA_MAC_VER_29:
3728 case RTL_GIGA_MAC_VER_30:
7e18dca1 3729 case RTL_GIGA_MAC_VER_37:
065c27c1 3730 ops->down = r810x_pll_power_down;
3731 ops->up = r810x_pll_power_up;
3732 break;
3733
3734 case RTL_GIGA_MAC_VER_11:
3735 case RTL_GIGA_MAC_VER_12:
3736 case RTL_GIGA_MAC_VER_17:
3737 case RTL_GIGA_MAC_VER_18:
3738 case RTL_GIGA_MAC_VER_19:
3739 case RTL_GIGA_MAC_VER_20:
3740 case RTL_GIGA_MAC_VER_21:
3741 case RTL_GIGA_MAC_VER_22:
3742 case RTL_GIGA_MAC_VER_23:
3743 case RTL_GIGA_MAC_VER_24:
3744 case RTL_GIGA_MAC_VER_25:
3745 case RTL_GIGA_MAC_VER_26:
3746 case RTL_GIGA_MAC_VER_27:
e6de30d6 3747 case RTL_GIGA_MAC_VER_28:
4804b3b3 3748 case RTL_GIGA_MAC_VER_31:
01dc7fec 3749 case RTL_GIGA_MAC_VER_32:
3750 case RTL_GIGA_MAC_VER_33:
70090424 3751 case RTL_GIGA_MAC_VER_34:
c2218925
HW
3752 case RTL_GIGA_MAC_VER_35:
3753 case RTL_GIGA_MAC_VER_36:
065c27c1 3754 ops->down = r8168_pll_power_down;
3755 ops->up = r8168_pll_power_up;
3756 break;
3757
3758 default:
3759 ops->down = NULL;
3760 ops->up = NULL;
3761 break;
3762 }
3763}
3764
e542a226
HW
3765static void rtl_init_rxcfg(struct rtl8169_private *tp)
3766{
3767 void __iomem *ioaddr = tp->mmio_addr;
3768
3769 switch (tp->mac_version) {
3770 case RTL_GIGA_MAC_VER_01:
3771 case RTL_GIGA_MAC_VER_02:
3772 case RTL_GIGA_MAC_VER_03:
3773 case RTL_GIGA_MAC_VER_04:
3774 case RTL_GIGA_MAC_VER_05:
3775 case RTL_GIGA_MAC_VER_06:
3776 case RTL_GIGA_MAC_VER_10:
3777 case RTL_GIGA_MAC_VER_11:
3778 case RTL_GIGA_MAC_VER_12:
3779 case RTL_GIGA_MAC_VER_13:
3780 case RTL_GIGA_MAC_VER_14:
3781 case RTL_GIGA_MAC_VER_15:
3782 case RTL_GIGA_MAC_VER_16:
3783 case RTL_GIGA_MAC_VER_17:
3784 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3785 break;
3786 case RTL_GIGA_MAC_VER_18:
3787 case RTL_GIGA_MAC_VER_19:
3788 case RTL_GIGA_MAC_VER_20:
3789 case RTL_GIGA_MAC_VER_21:
3790 case RTL_GIGA_MAC_VER_22:
3791 case RTL_GIGA_MAC_VER_23:
3792 case RTL_GIGA_MAC_VER_24:
3793 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3794 break;
3795 default:
3796 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
3797 break;
3798 }
3799}
3800
92fc43b4
HW
3801static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3802{
3803 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3804}
3805
d58d46b5
FR
3806static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3807{
9c5028e9 3808 void __iomem *ioaddr = tp->mmio_addr;
3809
3810 RTL_W8(Cfg9346, Cfg9346_Unlock);
d58d46b5 3811 rtl_generic_op(tp, tp->jumbo_ops.enable);
9c5028e9 3812 RTL_W8(Cfg9346, Cfg9346_Lock);
d58d46b5
FR
3813}
3814
3815static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
3816{
9c5028e9 3817 void __iomem *ioaddr = tp->mmio_addr;
3818
3819 RTL_W8(Cfg9346, Cfg9346_Unlock);
d58d46b5 3820 rtl_generic_op(tp, tp->jumbo_ops.disable);
9c5028e9 3821 RTL_W8(Cfg9346, Cfg9346_Lock);
d58d46b5
FR
3822}
3823
3824static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3825{
3826 void __iomem *ioaddr = tp->mmio_addr;
3827
3828 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3829 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
3830 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3831}
3832
3833static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3834{
3835 void __iomem *ioaddr = tp->mmio_addr;
3836
3837 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3838 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
3839 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3840}
3841
3842static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3843{
3844 void __iomem *ioaddr = tp->mmio_addr;
3845
3846 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3847}
3848
3849static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3850{
3851 void __iomem *ioaddr = tp->mmio_addr;
3852
3853 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3854}
3855
3856static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3857{
3858 void __iomem *ioaddr = tp->mmio_addr;
d58d46b5
FR
3859
3860 RTL_W8(MaxTxPacketSize, 0x3f);
3861 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3862 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4512ff9f 3863 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
d58d46b5
FR
3864}
3865
3866static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3867{
3868 void __iomem *ioaddr = tp->mmio_addr;
d58d46b5
FR
3869
3870 RTL_W8(MaxTxPacketSize, 0x0c);
3871 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3872 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4512ff9f 3873 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
d58d46b5
FR
3874}
3875
3876static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
3877{
3878 rtl_tx_performance_tweak(tp->pci_dev,
3879 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3880}
3881
3882static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
3883{
3884 rtl_tx_performance_tweak(tp->pci_dev,
3885 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3886}
3887
3888static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
3889{
3890 void __iomem *ioaddr = tp->mmio_addr;
3891
3892 r8168b_0_hw_jumbo_enable(tp);
3893
3894 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
3895}
3896
3897static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
3898{
3899 void __iomem *ioaddr = tp->mmio_addr;
3900
3901 r8168b_0_hw_jumbo_disable(tp);
3902
3903 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3904}
3905
3906static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
3907{
3908 struct jumbo_ops *ops = &tp->jumbo_ops;
3909
3910 switch (tp->mac_version) {
3911 case RTL_GIGA_MAC_VER_11:
3912 ops->disable = r8168b_0_hw_jumbo_disable;
3913 ops->enable = r8168b_0_hw_jumbo_enable;
3914 break;
3915 case RTL_GIGA_MAC_VER_12:
3916 case RTL_GIGA_MAC_VER_17:
3917 ops->disable = r8168b_1_hw_jumbo_disable;
3918 ops->enable = r8168b_1_hw_jumbo_enable;
3919 break;
3920 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
3921 case RTL_GIGA_MAC_VER_19:
3922 case RTL_GIGA_MAC_VER_20:
3923 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
3924 case RTL_GIGA_MAC_VER_22:
3925 case RTL_GIGA_MAC_VER_23:
3926 case RTL_GIGA_MAC_VER_24:
3927 case RTL_GIGA_MAC_VER_25:
3928 case RTL_GIGA_MAC_VER_26:
3929 ops->disable = r8168c_hw_jumbo_disable;
3930 ops->enable = r8168c_hw_jumbo_enable;
3931 break;
3932 case RTL_GIGA_MAC_VER_27:
3933 case RTL_GIGA_MAC_VER_28:
3934 ops->disable = r8168dp_hw_jumbo_disable;
3935 ops->enable = r8168dp_hw_jumbo_enable;
3936 break;
3937 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
3938 case RTL_GIGA_MAC_VER_32:
3939 case RTL_GIGA_MAC_VER_33:
3940 case RTL_GIGA_MAC_VER_34:
3941 ops->disable = r8168e_hw_jumbo_disable;
3942 ops->enable = r8168e_hw_jumbo_enable;
3943 break;
3944
3945 /*
3946 * No action needed for jumbo frames with 8169.
3947 * No jumbo for 810x at all.
3948 */
3949 default:
3950 ops->disable = NULL;
3951 ops->enable = NULL;
3952 break;
3953 }
3954}
3955
6f43adc8
FR
3956static void rtl_hw_reset(struct rtl8169_private *tp)
3957{
3958 void __iomem *ioaddr = tp->mmio_addr;
3959 int i;
3960
3961 /* Soft reset the chip. */
3962 RTL_W8(ChipCmd, CmdReset);
3963
3964 /* Check that the chip has finished the reset. */
3965 for (i = 0; i < 100; i++) {
3966 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3967 break;
92fc43b4 3968 udelay(100);
6f43adc8
FR
3969 }
3970}
3971
b6ffd97f 3972static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
953a12cc 3973{
b6ffd97f
FR
3974 struct rtl_fw *rtl_fw;
3975 const char *name;
3976 int rc = -ENOMEM;
953a12cc 3977
b6ffd97f
FR
3978 name = rtl_lookup_firmware_name(tp);
3979 if (!name)
3980 goto out_no_firmware;
953a12cc 3981
b6ffd97f
FR
3982 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
3983 if (!rtl_fw)
3984 goto err_warn;
31bd204f 3985
b6ffd97f
FR
3986 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
3987 if (rc < 0)
3988 goto err_free;
3989
fd112f2e
FR
3990 rc = rtl_check_firmware(tp, rtl_fw);
3991 if (rc < 0)
3992 goto err_release_firmware;
3993
b6ffd97f
FR
3994 tp->rtl_fw = rtl_fw;
3995out:
3996 return;
3997
fd112f2e
FR
3998err_release_firmware:
3999 release_firmware(rtl_fw->fw);
b6ffd97f
FR
4000err_free:
4001 kfree(rtl_fw);
4002err_warn:
4003 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4004 name, rc);
4005out_no_firmware:
4006 tp->rtl_fw = NULL;
4007 goto out;
4008}
4009
4010static void rtl_request_firmware(struct rtl8169_private *tp)
4011{
4012 if (IS_ERR(tp->rtl_fw))
4013 rtl_request_uncached_firmware(tp);
953a12cc
FR
4014}
4015
92fc43b4
HW
4016static void rtl_rx_close(struct rtl8169_private *tp)
4017{
4018 void __iomem *ioaddr = tp->mmio_addr;
92fc43b4 4019
1687b566 4020 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
92fc43b4
HW
4021}
4022
e6de30d6 4023static void rtl8169_hw_reset(struct rtl8169_private *tp)
1da177e4 4024{
e6de30d6 4025 void __iomem *ioaddr = tp->mmio_addr;
4026
1da177e4 4027 /* Disable interrupts */
811fd301 4028 rtl8169_irq_mask_and_ack(tp);
1da177e4 4029
92fc43b4
HW
4030 rtl_rx_close(tp);
4031
5d2e1957 4032 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4804b3b3 4033 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4034 tp->mac_version == RTL_GIGA_MAC_VER_31) {
e6de30d6 4035 while (RTL_R8(TxPoll) & NPQ)
4036 udelay(20);
c2218925
HW
4037 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4038 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
7e18dca1
HW
4039 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4040 tp->mac_version == RTL_GIGA_MAC_VER_37) {
c2b0c1e7 4041 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
70090424
HW
4042 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
4043 udelay(100);
92fc43b4
HW
4044 } else {
4045 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4046 udelay(100);
e6de30d6 4047 }
4048
92fc43b4 4049 rtl_hw_reset(tp);
1da177e4
LT
4050}
4051
7f796d83 4052static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
9cb427b6
FR
4053{
4054 void __iomem *ioaddr = tp->mmio_addr;
9cb427b6
FR
4055
4056 /* Set DMA burst size and Interframe Gap Time */
4057 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4058 (InterFrameGap << TxInterFrameGapShift));
4059}
4060
07ce4064 4061static void rtl_hw_start(struct net_device *dev)
1da177e4
LT
4062{
4063 struct rtl8169_private *tp = netdev_priv(dev);
1da177e4 4064
07ce4064
FR
4065 tp->hw_start(dev);
4066
da78dbff 4067 rtl_irq_enable_all(tp);
07ce4064
FR
4068}
4069
7f796d83
FR
4070static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4071 void __iomem *ioaddr)
4072{
4073 /*
4074 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4075 * register to be written before TxDescAddrLow to work.
4076 * Switching from MMIO to I/O access fixes the issue as well.
4077 */
4078 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
284901a9 4079 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
7f796d83 4080 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
284901a9 4081 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
7f796d83
FR
4082}
4083
4084static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4085{
4086 u16 cmd;
4087
4088 cmd = RTL_R16(CPlusCmd);
4089 RTL_W16(CPlusCmd, cmd);
4090 return cmd;
4091}
4092
fdd7b4c3 4093static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
7f796d83
FR
4094{
4095 /* Low hurts. Let's disable the filtering. */
207d6e87 4096 RTL_W16(RxMaxSize, rx_buf_sz + 1);
7f796d83
FR
4097}
4098
6dccd16b
FR
4099static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4100{
3744100e 4101 static const struct rtl_cfg2_info {
6dccd16b
FR
4102 u32 mac_version;
4103 u32 clk;
4104 u32 val;
4105 } cfg2_info [] = {
4106 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4107 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4108 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4109 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3744100e
FR
4110 };
4111 const struct rtl_cfg2_info *p = cfg2_info;
6dccd16b
FR
4112 unsigned int i;
4113 u32 clk;
4114
4115 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
cadf1855 4116 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
6dccd16b
FR
4117 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4118 RTL_W32(0x7c, p->val);
4119 break;
4120 }
4121 }
4122}
4123
e6b763ea
FR
4124static void rtl_set_rx_mode(struct net_device *dev)
4125{
4126 struct rtl8169_private *tp = netdev_priv(dev);
4127 void __iomem *ioaddr = tp->mmio_addr;
4128 u32 mc_filter[2]; /* Multicast hash filter */
4129 int rx_mode;
4130 u32 tmp = 0;
4131
4132 if (dev->flags & IFF_PROMISC) {
4133 /* Unconditionally log net taps. */
4134 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4135 rx_mode =
4136 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4137 AcceptAllPhys;
4138 mc_filter[1] = mc_filter[0] = 0xffffffff;
4139 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4140 (dev->flags & IFF_ALLMULTI)) {
4141 /* Too many to filter perfectly -- accept all multicasts. */
4142 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4143 mc_filter[1] = mc_filter[0] = 0xffffffff;
4144 } else {
4145 struct netdev_hw_addr *ha;
4146
4147 rx_mode = AcceptBroadcast | AcceptMyPhys;
4148 mc_filter[1] = mc_filter[0] = 0;
4149 netdev_for_each_mc_addr(ha, dev) {
4150 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4151 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4152 rx_mode |= AcceptMulticast;
4153 }
4154 }
4155
4156 if (dev->features & NETIF_F_RXALL)
4157 rx_mode |= (AcceptErr | AcceptRunt);
4158
4159 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4160
4161 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4162 u32 data = mc_filter[0];
4163
4164 mc_filter[0] = swab32(mc_filter[1]);
4165 mc_filter[1] = swab32(data);
4166 }
4167
4168 RTL_W32(MAR0 + 4, mc_filter[1]);
4169 RTL_W32(MAR0 + 0, mc_filter[0]);
4170
4171 RTL_W32(RxConfig, tmp);
4172}
4173
07ce4064
FR
4174static void rtl_hw_start_8169(struct net_device *dev)
4175{
4176 struct rtl8169_private *tp = netdev_priv(dev);
4177 void __iomem *ioaddr = tp->mmio_addr;
4178 struct pci_dev *pdev = tp->pci_dev;
07ce4064 4179
9cb427b6
FR
4180 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4181 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4182 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4183 }
4184
1da177e4 4185 RTL_W8(Cfg9346, Cfg9346_Unlock);
cecb5fd7
FR
4186 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4187 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4188 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4189 tp->mac_version == RTL_GIGA_MAC_VER_04)
9cb427b6
FR
4190 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4191
e542a226
HW
4192 rtl_init_rxcfg(tp);
4193
f0298f81 4194 RTL_W8(EarlyTxThres, NoEarlyTx);
1da177e4 4195
6f0333b8 4196 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
1da177e4 4197
cecb5fd7
FR
4198 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4199 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4200 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4201 tp->mac_version == RTL_GIGA_MAC_VER_04)
c946b304 4202 rtl_set_rx_tx_config_registers(tp);
1da177e4 4203
7f796d83 4204 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1da177e4 4205
cecb5fd7
FR
4206 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4207 tp->mac_version == RTL_GIGA_MAC_VER_03) {
06fa7358 4208 dprintk("Set MAC Reg C+CR Offset 0xE0. "
1da177e4 4209 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 4210 tp->cp_cmd |= (1 << 14);
1da177e4
LT
4211 }
4212
bcf0bf90
FR
4213 RTL_W16(CPlusCmd, tp->cp_cmd);
4214
6dccd16b
FR
4215 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4216
1da177e4
LT
4217 /*
4218 * Undocumented corner. Supposedly:
4219 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4220 */
4221 RTL_W16(IntrMitigate, 0x0000);
4222
7f796d83 4223 rtl_set_rx_tx_desc_registers(tp, ioaddr);
9cb427b6 4224
cecb5fd7
FR
4225 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4226 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4227 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4228 tp->mac_version != RTL_GIGA_MAC_VER_04) {
c946b304
FR
4229 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4230 rtl_set_rx_tx_config_registers(tp);
4231 }
4232
1da177e4 4233 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
4234
4235 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4236 RTL_R8(IntrMask);
1da177e4
LT
4237
4238 RTL_W32(RxMissed, 0);
4239
07ce4064 4240 rtl_set_rx_mode(dev);
1da177e4
LT
4241
4242 /* no early-rx interrupts */
4243 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 4244}
1da177e4 4245
beb1fe18
HW
4246static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4247{
4248 if (tp->csi_ops.write)
4249 tp->csi_ops.write(tp->mmio_addr, addr, value);
4250}
4251
4252static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4253{
4254 if (tp->csi_ops.read)
4255 return tp->csi_ops.read(tp->mmio_addr, addr);
4256 else
4257 return ~0;
4258}
4259
4260static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
dacf8154
FR
4261{
4262 u32 csi;
4263
beb1fe18
HW
4264 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4265 rtl_csi_write(tp, 0x070c, csi | bits);
4266}
4267
4268static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4269{
4270 rtl_csi_access_enable(tp, 0x17000000);
650e8d5d 4271}
4272
beb1fe18 4273static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
e6de30d6 4274{
beb1fe18 4275 rtl_csi_access_enable(tp, 0x27000000);
e6de30d6 4276}
4277
beb1fe18 4278static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
650e8d5d 4279{
beb1fe18
HW
4280 unsigned int i;
4281
4282 RTL_W32(CSIDR, value);
4283 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4284 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4285
4286 for (i = 0; i < 100; i++) {
4287 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4288 break;
4289 udelay(10);
4290 }
4291}
4292
4293static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
4294{
4295 u32 value = ~0x00;
4296 unsigned int i;
4297
4298 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4299 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4300
4301 for (i = 0; i < 100; i++) {
4302 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4303 value = RTL_R32(CSIDR);
4304 break;
4305 }
4306 udelay(10);
4307 }
4308
4309 return value;
4310}
4311
7e18dca1
HW
4312static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
4313{
4314 unsigned int i;
4315
4316 RTL_W32(CSIDR, value);
4317 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4318 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4319 CSIAR_FUNC_NIC);
4320
4321 for (i = 0; i < 100; i++) {
4322 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4323 break;
4324 udelay(10);
4325 }
4326}
4327
4328static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
4329{
4330 u32 value = ~0x00;
4331 unsigned int i;
4332
4333 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4334 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4335
4336 for (i = 0; i < 100; i++) {
4337 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4338 value = RTL_R32(CSIDR);
4339 break;
4340 }
4341 udelay(10);
4342 }
4343
4344 return value;
4345}
4346
beb1fe18
HW
4347static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4348{
4349 struct csi_ops *ops = &tp->csi_ops;
4350
4351 switch (tp->mac_version) {
4352 case RTL_GIGA_MAC_VER_01:
4353 case RTL_GIGA_MAC_VER_02:
4354 case RTL_GIGA_MAC_VER_03:
4355 case RTL_GIGA_MAC_VER_04:
4356 case RTL_GIGA_MAC_VER_05:
4357 case RTL_GIGA_MAC_VER_06:
4358 case RTL_GIGA_MAC_VER_10:
4359 case RTL_GIGA_MAC_VER_11:
4360 case RTL_GIGA_MAC_VER_12:
4361 case RTL_GIGA_MAC_VER_13:
4362 case RTL_GIGA_MAC_VER_14:
4363 case RTL_GIGA_MAC_VER_15:
4364 case RTL_GIGA_MAC_VER_16:
4365 case RTL_GIGA_MAC_VER_17:
4366 ops->write = NULL;
4367 ops->read = NULL;
4368 break;
4369
7e18dca1
HW
4370 case RTL_GIGA_MAC_VER_37:
4371 ops->write = r8402_csi_write;
4372 ops->read = r8402_csi_read;
4373 break;
4374
beb1fe18
HW
4375 default:
4376 ops->write = r8169_csi_write;
4377 ops->read = r8169_csi_read;
4378 break;
4379 }
dacf8154
FR
4380}
4381
4382struct ephy_info {
4383 unsigned int offset;
4384 u16 mask;
4385 u16 bits;
4386};
4387
350f7596 4388static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
dacf8154
FR
4389{
4390 u16 w;
4391
4392 while (len-- > 0) {
4393 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
4394 rtl_ephy_write(ioaddr, e->offset, w);
4395 e++;
4396 }
4397}
4398
b726e493
FR
4399static void rtl_disable_clock_request(struct pci_dev *pdev)
4400{
e44daade 4401 int cap = pci_pcie_cap(pdev);
b726e493
FR
4402
4403 if (cap) {
4404 u16 ctl;
4405
4406 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4407 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4408 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4409 }
4410}
4411
e6de30d6 4412static void rtl_enable_clock_request(struct pci_dev *pdev)
4413{
e44daade 4414 int cap = pci_pcie_cap(pdev);
e6de30d6 4415
4416 if (cap) {
4417 u16 ctl;
4418
4419 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4420 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4421 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4422 }
4423}
4424
b726e493
FR
4425#define R8168_CPCMD_QUIRK_MASK (\
4426 EnableBist | \
4427 Mac_dbgo_oe | \
4428 Force_half_dup | \
4429 Force_rxflow_en | \
4430 Force_txflow_en | \
4431 Cxpl_dbg_sel | \
4432 ASF | \
4433 PktCntrDisable | \
4434 Mac_dbgo_sel)
4435
beb1fe18 4436static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
219a1e9d 4437{
beb1fe18
HW
4438 void __iomem *ioaddr = tp->mmio_addr;
4439 struct pci_dev *pdev = tp->pci_dev;
4440
b726e493
FR
4441 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4442
4443 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4444
2e68ae44
FR
4445 rtl_tx_performance_tweak(pdev,
4446 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
219a1e9d
FR
4447}
4448
beb1fe18 4449static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
219a1e9d 4450{
beb1fe18
HW
4451 void __iomem *ioaddr = tp->mmio_addr;
4452
4453 rtl_hw_start_8168bb(tp);
b726e493 4454
f0298f81 4455 RTL_W8(MaxTxPacketSize, TxPacketMax);
b726e493
FR
4456
4457 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
219a1e9d
FR
4458}
4459
beb1fe18 4460static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
219a1e9d 4461{
beb1fe18
HW
4462 void __iomem *ioaddr = tp->mmio_addr;
4463 struct pci_dev *pdev = tp->pci_dev;
4464
b726e493
FR
4465 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4466
4467 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4468
219a1e9d 4469 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
b726e493
FR
4470
4471 rtl_disable_clock_request(pdev);
4472
4473 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
219a1e9d
FR
4474}
4475
beb1fe18 4476static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
219a1e9d 4477{
beb1fe18 4478 void __iomem *ioaddr = tp->mmio_addr;
350f7596 4479 static const struct ephy_info e_info_8168cp[] = {
b726e493
FR
4480 { 0x01, 0, 0x0001 },
4481 { 0x02, 0x0800, 0x1000 },
4482 { 0x03, 0, 0x0042 },
4483 { 0x06, 0x0080, 0x0000 },
4484 { 0x07, 0, 0x2000 }
4485 };
4486
beb1fe18 4487 rtl_csi_access_enable_2(tp);
b726e493
FR
4488
4489 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4490
beb1fe18 4491 __rtl_hw_start_8168cp(tp);
219a1e9d
FR
4492}
4493
beb1fe18 4494static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
ef3386f0 4495{
beb1fe18
HW
4496 void __iomem *ioaddr = tp->mmio_addr;
4497 struct pci_dev *pdev = tp->pci_dev;
4498
4499 rtl_csi_access_enable_2(tp);
ef3386f0
FR
4500
4501 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4502
4503 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4504
4505 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4506}
4507
beb1fe18 4508static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
7f3e3d3a 4509{
beb1fe18
HW
4510 void __iomem *ioaddr = tp->mmio_addr;
4511 struct pci_dev *pdev = tp->pci_dev;
4512
4513 rtl_csi_access_enable_2(tp);
7f3e3d3a
FR
4514
4515 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4516
4517 /* Magic. */
4518 RTL_W8(DBG_REG, 0x20);
4519
f0298f81 4520 RTL_W8(MaxTxPacketSize, TxPacketMax);
7f3e3d3a
FR
4521
4522 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4523
4524 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4525}
4526
beb1fe18 4527static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
219a1e9d 4528{
beb1fe18 4529 void __iomem *ioaddr = tp->mmio_addr;
350f7596 4530 static const struct ephy_info e_info_8168c_1[] = {
b726e493
FR
4531 { 0x02, 0x0800, 0x1000 },
4532 { 0x03, 0, 0x0002 },
4533 { 0x06, 0x0080, 0x0000 }
4534 };
4535
beb1fe18 4536 rtl_csi_access_enable_2(tp);
b726e493
FR
4537
4538 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4539
4540 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4541
beb1fe18 4542 __rtl_hw_start_8168cp(tp);
219a1e9d
FR
4543}
4544
beb1fe18 4545static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
219a1e9d 4546{
beb1fe18 4547 void __iomem *ioaddr = tp->mmio_addr;
350f7596 4548 static const struct ephy_info e_info_8168c_2[] = {
b726e493
FR
4549 { 0x01, 0, 0x0001 },
4550 { 0x03, 0x0400, 0x0220 }
4551 };
4552
beb1fe18 4553 rtl_csi_access_enable_2(tp);
b726e493
FR
4554
4555 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4556
beb1fe18 4557 __rtl_hw_start_8168cp(tp);
219a1e9d
FR
4558}
4559
beb1fe18 4560static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
197ff761 4561{
beb1fe18 4562 rtl_hw_start_8168c_2(tp);
197ff761
FR
4563}
4564
beb1fe18 4565static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
6fb07058 4566{
beb1fe18 4567 rtl_csi_access_enable_2(tp);
6fb07058 4568
beb1fe18 4569 __rtl_hw_start_8168cp(tp);
6fb07058
FR
4570}
4571
beb1fe18 4572static void rtl_hw_start_8168d(struct rtl8169_private *tp)
5b538df9 4573{
beb1fe18
HW
4574 void __iomem *ioaddr = tp->mmio_addr;
4575 struct pci_dev *pdev = tp->pci_dev;
4576
4577 rtl_csi_access_enable_2(tp);
5b538df9
FR
4578
4579 rtl_disable_clock_request(pdev);
4580
f0298f81 4581 RTL_W8(MaxTxPacketSize, TxPacketMax);
5b538df9
FR
4582
4583 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4584
4585 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4586}
4587
beb1fe18 4588static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4804b3b3 4589{
beb1fe18
HW
4590 void __iomem *ioaddr = tp->mmio_addr;
4591 struct pci_dev *pdev = tp->pci_dev;
4592
4593 rtl_csi_access_enable_1(tp);
4804b3b3 4594
4595 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4596
4597 RTL_W8(MaxTxPacketSize, TxPacketMax);
4598
4599 rtl_disable_clock_request(pdev);
4600}
4601
beb1fe18 4602static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
e6de30d6 4603{
beb1fe18
HW
4604 void __iomem *ioaddr = tp->mmio_addr;
4605 struct pci_dev *pdev = tp->pci_dev;
e6de30d6 4606 static const struct ephy_info e_info_8168d_4[] = {
4607 { 0x0b, ~0, 0x48 },
4608 { 0x19, 0x20, 0x50 },
4609 { 0x0c, ~0, 0x20 }
4610 };
4611 int i;
4612
beb1fe18 4613 rtl_csi_access_enable_1(tp);
e6de30d6 4614
4615 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4616
4617 RTL_W8(MaxTxPacketSize, TxPacketMax);
4618
4619 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4620 const struct ephy_info *e = e_info_8168d_4 + i;
4621 u16 w;
4622
4623 w = rtl_ephy_read(ioaddr, e->offset);
4624 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
4625 }
4626
4627 rtl_enable_clock_request(pdev);
4628}
4629
beb1fe18 4630static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
01dc7fec 4631{
beb1fe18
HW
4632 void __iomem *ioaddr = tp->mmio_addr;
4633 struct pci_dev *pdev = tp->pci_dev;
70090424 4634 static const struct ephy_info e_info_8168e_1[] = {
01dc7fec 4635 { 0x00, 0x0200, 0x0100 },
4636 { 0x00, 0x0000, 0x0004 },
4637 { 0x06, 0x0002, 0x0001 },
4638 { 0x06, 0x0000, 0x0030 },
4639 { 0x07, 0x0000, 0x2000 },
4640 { 0x00, 0x0000, 0x0020 },
4641 { 0x03, 0x5800, 0x2000 },
4642 { 0x03, 0x0000, 0x0001 },
4643 { 0x01, 0x0800, 0x1000 },
4644 { 0x07, 0x0000, 0x4000 },
4645 { 0x1e, 0x0000, 0x2000 },
4646 { 0x19, 0xffff, 0xfe6c },
4647 { 0x0a, 0x0000, 0x0040 }
4648 };
4649
beb1fe18 4650 rtl_csi_access_enable_2(tp);
01dc7fec 4651
70090424 4652 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
01dc7fec 4653
4654 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4655
4656 RTL_W8(MaxTxPacketSize, TxPacketMax);
4657
4658 rtl_disable_clock_request(pdev);
4659
4660 /* Reset tx FIFO pointer */
cecb5fd7
FR
4661 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4662 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
01dc7fec 4663
cecb5fd7 4664 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
01dc7fec 4665}
4666
beb1fe18 4667static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
70090424 4668{
beb1fe18
HW
4669 void __iomem *ioaddr = tp->mmio_addr;
4670 struct pci_dev *pdev = tp->pci_dev;
70090424
HW
4671 static const struct ephy_info e_info_8168e_2[] = {
4672 { 0x09, 0x0000, 0x0080 },
4673 { 0x19, 0x0000, 0x0224 }
4674 };
4675
beb1fe18 4676 rtl_csi_access_enable_1(tp);
70090424
HW
4677
4678 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4679
4680 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4681
4682 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4683 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4684 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4685 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4686 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4687 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4688 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4689 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4690 ERIAR_EXGMAC);
4691
3090bd9a 4692 RTL_W8(MaxTxPacketSize, EarlySize);
70090424
HW
4693
4694 rtl_disable_clock_request(pdev);
4695
4696 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4697 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4698
4699 /* Adjust EEE LED frequency */
4700 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4701
4702 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4703 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4704 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4705}
4706
beb1fe18 4707static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
c2218925 4708{
beb1fe18
HW
4709 void __iomem *ioaddr = tp->mmio_addr;
4710 struct pci_dev *pdev = tp->pci_dev;
c2218925
HW
4711 static const struct ephy_info e_info_8168f_1[] = {
4712 { 0x06, 0x00c0, 0x0020 },
4713 { 0x08, 0x0001, 0x0002 },
4714 { 0x09, 0x0000, 0x0080 },
4715 { 0x19, 0x0000, 0x0224 }
4716 };
4717
beb1fe18 4718 rtl_csi_access_enable_1(tp);
c2218925
HW
4719
4720 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4721
4722 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4723
4724 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4725 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4726 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4727 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4728 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4729 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4730 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4731 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4732 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4733 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4734 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4735 ERIAR_EXGMAC);
4736
4737 RTL_W8(MaxTxPacketSize, EarlySize);
4738
4739 rtl_disable_clock_request(pdev);
4740
4741 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4742 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4743
4744 /* Adjust EEE LED frequency */
4745 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4746
4747 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4748 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4749 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4750}
4751
07ce4064
FR
4752static void rtl_hw_start_8168(struct net_device *dev)
4753{
2dd99530
FR
4754 struct rtl8169_private *tp = netdev_priv(dev);
4755 void __iomem *ioaddr = tp->mmio_addr;
4756
4757 RTL_W8(Cfg9346, Cfg9346_Unlock);
4758
f0298f81 4759 RTL_W8(MaxTxPacketSize, TxPacketMax);
2dd99530 4760
6f0333b8 4761 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
2dd99530 4762
0e485150 4763 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
2dd99530
FR
4764
4765 RTL_W16(CPlusCmd, tp->cp_cmd);
4766
0e485150 4767 RTL_W16(IntrMitigate, 0x5151);
2dd99530 4768
0e485150 4769 /* Work around for RxFIFO overflow. */
811fd301 4770 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
da78dbff
FR
4771 tp->event_slow |= RxFIFOOver | PCSTimeout;
4772 tp->event_slow &= ~RxOverflow;
0e485150
FR
4773 }
4774
4775 rtl_set_rx_tx_desc_registers(tp, ioaddr);
2dd99530 4776
b8363901
FR
4777 rtl_set_rx_mode(dev);
4778
4779 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4780 (InterFrameGap << TxInterFrameGapShift));
2dd99530
FR
4781
4782 RTL_R8(IntrMask);
4783
219a1e9d
FR
4784 switch (tp->mac_version) {
4785 case RTL_GIGA_MAC_VER_11:
beb1fe18 4786 rtl_hw_start_8168bb(tp);
4804b3b3 4787 break;
219a1e9d
FR
4788
4789 case RTL_GIGA_MAC_VER_12:
4790 case RTL_GIGA_MAC_VER_17:
beb1fe18 4791 rtl_hw_start_8168bef(tp);
4804b3b3 4792 break;
219a1e9d
FR
4793
4794 case RTL_GIGA_MAC_VER_18:
beb1fe18 4795 rtl_hw_start_8168cp_1(tp);
4804b3b3 4796 break;
219a1e9d
FR
4797
4798 case RTL_GIGA_MAC_VER_19:
beb1fe18 4799 rtl_hw_start_8168c_1(tp);
4804b3b3 4800 break;
219a1e9d
FR
4801
4802 case RTL_GIGA_MAC_VER_20:
beb1fe18 4803 rtl_hw_start_8168c_2(tp);
4804b3b3 4804 break;
219a1e9d 4805
197ff761 4806 case RTL_GIGA_MAC_VER_21:
beb1fe18 4807 rtl_hw_start_8168c_3(tp);
4804b3b3 4808 break;
197ff761 4809
6fb07058 4810 case RTL_GIGA_MAC_VER_22:
beb1fe18 4811 rtl_hw_start_8168c_4(tp);
4804b3b3 4812 break;
6fb07058 4813
ef3386f0 4814 case RTL_GIGA_MAC_VER_23:
beb1fe18 4815 rtl_hw_start_8168cp_2(tp);
4804b3b3 4816 break;
ef3386f0 4817
7f3e3d3a 4818 case RTL_GIGA_MAC_VER_24:
beb1fe18 4819 rtl_hw_start_8168cp_3(tp);
4804b3b3 4820 break;
7f3e3d3a 4821
5b538df9 4822 case RTL_GIGA_MAC_VER_25:
daf9df6d 4823 case RTL_GIGA_MAC_VER_26:
4824 case RTL_GIGA_MAC_VER_27:
beb1fe18 4825 rtl_hw_start_8168d(tp);
4804b3b3 4826 break;
5b538df9 4827
e6de30d6 4828 case RTL_GIGA_MAC_VER_28:
beb1fe18 4829 rtl_hw_start_8168d_4(tp);
4804b3b3 4830 break;
cecb5fd7 4831
4804b3b3 4832 case RTL_GIGA_MAC_VER_31:
beb1fe18 4833 rtl_hw_start_8168dp(tp);
4804b3b3 4834 break;
4835
01dc7fec 4836 case RTL_GIGA_MAC_VER_32:
4837 case RTL_GIGA_MAC_VER_33:
beb1fe18 4838 rtl_hw_start_8168e_1(tp);
70090424
HW
4839 break;
4840 case RTL_GIGA_MAC_VER_34:
beb1fe18 4841 rtl_hw_start_8168e_2(tp);
01dc7fec 4842 break;
e6de30d6 4843
c2218925
HW
4844 case RTL_GIGA_MAC_VER_35:
4845 case RTL_GIGA_MAC_VER_36:
beb1fe18 4846 rtl_hw_start_8168f_1(tp);
c2218925
HW
4847 break;
4848
219a1e9d
FR
4849 default:
4850 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
4851 dev->name, tp->mac_version);
4804b3b3 4852 break;
219a1e9d 4853 }
2dd99530 4854
0e485150
FR
4855 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4856
b8363901
FR
4857 RTL_W8(Cfg9346, Cfg9346_Lock);
4858
2dd99530 4859 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 4860}
1da177e4 4861
2857ffb7
FR
4862#define R810X_CPCMD_QUIRK_MASK (\
4863 EnableBist | \
4864 Mac_dbgo_oe | \
4865 Force_half_dup | \
5edcc537 4866 Force_rxflow_en | \
2857ffb7
FR
4867 Force_txflow_en | \
4868 Cxpl_dbg_sel | \
4869 ASF | \
4870 PktCntrDisable | \
d24e9aaf 4871 Mac_dbgo_sel)
2857ffb7 4872
beb1fe18 4873static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
2857ffb7 4874{
beb1fe18
HW
4875 void __iomem *ioaddr = tp->mmio_addr;
4876 struct pci_dev *pdev = tp->pci_dev;
350f7596 4877 static const struct ephy_info e_info_8102e_1[] = {
2857ffb7
FR
4878 { 0x01, 0, 0x6e65 },
4879 { 0x02, 0, 0x091f },
4880 { 0x03, 0, 0xc2f9 },
4881 { 0x06, 0, 0xafb5 },
4882 { 0x07, 0, 0x0e00 },
4883 { 0x19, 0, 0xec80 },
4884 { 0x01, 0, 0x2e65 },
4885 { 0x01, 0, 0x6e65 }
4886 };
4887 u8 cfg1;
4888
beb1fe18 4889 rtl_csi_access_enable_2(tp);
2857ffb7
FR
4890
4891 RTL_W8(DBG_REG, FIX_NAK_1);
4892
4893 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4894
4895 RTL_W8(Config1,
4896 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
4897 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4898
4899 cfg1 = RTL_R8(Config1);
4900 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
4901 RTL_W8(Config1, cfg1 & ~LEDS0);
4902
2857ffb7
FR
4903 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4904}
4905
beb1fe18 4906static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
2857ffb7 4907{
beb1fe18
HW
4908 void __iomem *ioaddr = tp->mmio_addr;
4909 struct pci_dev *pdev = tp->pci_dev;
4910
4911 rtl_csi_access_enable_2(tp);
2857ffb7
FR
4912
4913 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4914
4915 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
4916 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
2857ffb7
FR
4917}
4918
beb1fe18 4919static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
2857ffb7 4920{
beb1fe18 4921 rtl_hw_start_8102e_2(tp);
2857ffb7 4922
beb1fe18 4923 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
2857ffb7
FR
4924}
4925
beb1fe18 4926static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5a5e4443 4927{
beb1fe18 4928 void __iomem *ioaddr = tp->mmio_addr;
5a5e4443
HW
4929 static const struct ephy_info e_info_8105e_1[] = {
4930 { 0x07, 0, 0x4000 },
4931 { 0x19, 0, 0x0200 },
4932 { 0x19, 0, 0x0020 },
4933 { 0x1e, 0, 0x2000 },
4934 { 0x03, 0, 0x0001 },
4935 { 0x19, 0, 0x0100 },
4936 { 0x19, 0, 0x0004 },
4937 { 0x0a, 0, 0x0020 }
4938 };
4939
cecb5fd7 4940 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5a5e4443
HW
4941 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
4942
cecb5fd7 4943 /* Disable Early Tally Counter */
5a5e4443
HW
4944 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
4945
4946 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
4f6b00e5 4947 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5a5e4443
HW
4948
4949 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4950}
4951
beb1fe18 4952static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5a5e4443 4953{
beb1fe18
HW
4954 void __iomem *ioaddr = tp->mmio_addr;
4955
4956 rtl_hw_start_8105e_1(tp);
5a5e4443
HW
4957 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4958}
4959
7e18dca1
HW
4960static void rtl_hw_start_8402(struct rtl8169_private *tp)
4961{
4962 void __iomem *ioaddr = tp->mmio_addr;
4963 static const struct ephy_info e_info_8402[] = {
4964 { 0x19, 0xffff, 0xff64 },
4965 { 0x1e, 0, 0x4000 }
4966 };
4967
4968 rtl_csi_access_enable_2(tp);
4969
4970 /* Force LAN exit from ASPM if Rx/Tx are not idle */
4971 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
4972
4973 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4974 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4975
4976 rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
4977
4978 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4979
4980 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
4981 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
4982 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4983 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4984 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4985 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4986 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
4987 ERIAR_EXGMAC);
4988}
4989
07ce4064
FR
4990static void rtl_hw_start_8101(struct net_device *dev)
4991{
cdf1a608
FR
4992 struct rtl8169_private *tp = netdev_priv(dev);
4993 void __iomem *ioaddr = tp->mmio_addr;
4994 struct pci_dev *pdev = tp->pci_dev;
4995
da78dbff
FR
4996 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
4997 tp->event_slow &= ~RxFIFOOver;
811fd301 4998
cecb5fd7
FR
4999 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5000 tp->mac_version == RTL_GIGA_MAC_VER_16) {
e44daade 5001 int cap = pci_pcie_cap(pdev);
9c14ceaf
FR
5002
5003 if (cap) {
5004 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
5005 PCI_EXP_DEVCTL_NOSNOOP_EN);
5006 }
cdf1a608
FR
5007 }
5008
d24e9aaf
HW
5009 RTL_W8(Cfg9346, Cfg9346_Unlock);
5010
2857ffb7
FR
5011 switch (tp->mac_version) {
5012 case RTL_GIGA_MAC_VER_07:
beb1fe18 5013 rtl_hw_start_8102e_1(tp);
2857ffb7
FR
5014 break;
5015
5016 case RTL_GIGA_MAC_VER_08:
beb1fe18 5017 rtl_hw_start_8102e_3(tp);
2857ffb7
FR
5018 break;
5019
5020 case RTL_GIGA_MAC_VER_09:
beb1fe18 5021 rtl_hw_start_8102e_2(tp);
2857ffb7 5022 break;
5a5e4443
HW
5023
5024 case RTL_GIGA_MAC_VER_29:
beb1fe18 5025 rtl_hw_start_8105e_1(tp);
5a5e4443
HW
5026 break;
5027 case RTL_GIGA_MAC_VER_30:
beb1fe18 5028 rtl_hw_start_8105e_2(tp);
5a5e4443 5029 break;
7e18dca1
HW
5030
5031 case RTL_GIGA_MAC_VER_37:
5032 rtl_hw_start_8402(tp);
5033 break;
cdf1a608
FR
5034 }
5035
d24e9aaf 5036 RTL_W8(Cfg9346, Cfg9346_Lock);
cdf1a608 5037
f0298f81 5038 RTL_W8(MaxTxPacketSize, TxPacketMax);
cdf1a608 5039
6f0333b8 5040 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
cdf1a608 5041
d24e9aaf 5042 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
cdf1a608
FR
5043 RTL_W16(CPlusCmd, tp->cp_cmd);
5044
5045 RTL_W16(IntrMitigate, 0x0000);
5046
5047 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5048
5049 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5050 rtl_set_rx_tx_config_registers(tp);
5051
cdf1a608
FR
5052 RTL_R8(IntrMask);
5053
cdf1a608
FR
5054 rtl_set_rx_mode(dev);
5055
5056 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
1da177e4
LT
5057}
5058
5059static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5060{
d58d46b5
FR
5061 struct rtl8169_private *tp = netdev_priv(dev);
5062
5063 if (new_mtu < ETH_ZLEN ||
5064 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
1da177e4
LT
5065 return -EINVAL;
5066
d58d46b5
FR
5067 if (new_mtu > ETH_DATA_LEN)
5068 rtl_hw_jumbo_enable(tp);
5069 else
5070 rtl_hw_jumbo_disable(tp);
5071
1da177e4 5072 dev->mtu = new_mtu;
350fb32a
MM
5073 netdev_update_features(dev);
5074
323bb685 5075 return 0;
1da177e4
LT
5076}
5077
5078static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5079{
95e0918d 5080 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
1da177e4
LT
5081 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5082}
5083
6f0333b8
ED
5084static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5085 void **data_buff, struct RxDesc *desc)
1da177e4 5086{
48addcc9 5087 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
231aee63 5088 DMA_FROM_DEVICE);
48addcc9 5089
6f0333b8
ED
5090 kfree(*data_buff);
5091 *data_buff = NULL;
1da177e4
LT
5092 rtl8169_make_unusable_by_asic(desc);
5093}
5094
5095static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5096{
5097 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5098
5099 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5100}
5101
5102static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5103 u32 rx_buf_sz)
5104{
5105 desc->addr = cpu_to_le64(mapping);
5106 wmb();
5107 rtl8169_mark_to_asic(desc, rx_buf_sz);
5108}
5109
6f0333b8
ED
5110static inline void *rtl8169_align(void *data)
5111{
5112 return (void *)ALIGN((long)data, 16);
5113}
5114
0ecbe1ca
SG
5115static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5116 struct RxDesc *desc)
1da177e4 5117{
6f0333b8 5118 void *data;
1da177e4 5119 dma_addr_t mapping;
48addcc9 5120 struct device *d = &tp->pci_dev->dev;
0ecbe1ca 5121 struct net_device *dev = tp->dev;
6f0333b8 5122 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
1da177e4 5123
6f0333b8
ED
5124 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5125 if (!data)
5126 return NULL;
e9f63f30 5127
6f0333b8
ED
5128 if (rtl8169_align(data) != data) {
5129 kfree(data);
5130 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5131 if (!data)
5132 return NULL;
5133 }
3eafe507 5134
48addcc9 5135 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
231aee63 5136 DMA_FROM_DEVICE);
d827d86b
SG
5137 if (unlikely(dma_mapping_error(d, mapping))) {
5138 if (net_ratelimit())
5139 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
3eafe507 5140 goto err_out;
d827d86b 5141 }
1da177e4
LT
5142
5143 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
6f0333b8 5144 return data;
3eafe507
SG
5145
5146err_out:
5147 kfree(data);
5148 return NULL;
1da177e4
LT
5149}
5150
5151static void rtl8169_rx_clear(struct rtl8169_private *tp)
5152{
07d3f51f 5153 unsigned int i;
1da177e4
LT
5154
5155 for (i = 0; i < NUM_RX_DESC; i++) {
6f0333b8
ED
5156 if (tp->Rx_databuff[i]) {
5157 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
1da177e4
LT
5158 tp->RxDescArray + i);
5159 }
5160 }
5161}
5162
0ecbe1ca 5163static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
1da177e4 5164{
0ecbe1ca
SG
5165 desc->opts1 |= cpu_to_le32(RingEnd);
5166}
5b0384f4 5167
0ecbe1ca
SG
5168static int rtl8169_rx_fill(struct rtl8169_private *tp)
5169{
5170 unsigned int i;
1da177e4 5171
0ecbe1ca
SG
5172 for (i = 0; i < NUM_RX_DESC; i++) {
5173 void *data;
4ae47c2d 5174
6f0333b8 5175 if (tp->Rx_databuff[i])
1da177e4 5176 continue;
bcf0bf90 5177
0ecbe1ca 5178 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
6f0333b8
ED
5179 if (!data) {
5180 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
0ecbe1ca 5181 goto err_out;
6f0333b8
ED
5182 }
5183 tp->Rx_databuff[i] = data;
1da177e4 5184 }
1da177e4 5185
0ecbe1ca
SG
5186 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5187 return 0;
5188
5189err_out:
5190 rtl8169_rx_clear(tp);
5191 return -ENOMEM;
1da177e4
LT
5192}
5193
1da177e4
LT
5194static int rtl8169_init_ring(struct net_device *dev)
5195{
5196 struct rtl8169_private *tp = netdev_priv(dev);
5197
5198 rtl8169_init_ring_indexes(tp);
5199
5200 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
6f0333b8 5201 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
1da177e4 5202
0ecbe1ca 5203 return rtl8169_rx_fill(tp);
1da177e4
LT
5204}
5205
48addcc9 5206static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
1da177e4
LT
5207 struct TxDesc *desc)
5208{
5209 unsigned int len = tx_skb->len;
5210
48addcc9
SG
5211 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5212
1da177e4
LT
5213 desc->opts1 = 0x00;
5214 desc->opts2 = 0x00;
5215 desc->addr = 0x00;
5216 tx_skb->len = 0;
5217}
5218
3eafe507
SG
5219static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5220 unsigned int n)
1da177e4
LT
5221{
5222 unsigned int i;
5223
3eafe507
SG
5224 for (i = 0; i < n; i++) {
5225 unsigned int entry = (start + i) % NUM_TX_DESC;
1da177e4
LT
5226 struct ring_info *tx_skb = tp->tx_skb + entry;
5227 unsigned int len = tx_skb->len;
5228
5229 if (len) {
5230 struct sk_buff *skb = tx_skb->skb;
5231
48addcc9 5232 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
1da177e4
LT
5233 tp->TxDescArray + entry);
5234 if (skb) {
cac4b22f 5235 tp->dev->stats.tx_dropped++;
1da177e4
LT
5236 dev_kfree_skb(skb);
5237 tx_skb->skb = NULL;
5238 }
1da177e4
LT
5239 }
5240 }
3eafe507
SG
5241}
5242
5243static void rtl8169_tx_clear(struct rtl8169_private *tp)
5244{
5245 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
1da177e4 5246 tp->cur_tx = tp->dirty_tx = 0;
036dafa2 5247 netdev_reset_queue(tp->dev);
1da177e4
LT
5248}
5249
4422bcd4 5250static void rtl_reset_work(struct rtl8169_private *tp)
1da177e4 5251{
c4028958 5252 struct net_device *dev = tp->dev;
56de414c 5253 int i;
1da177e4 5254
da78dbff
FR
5255 napi_disable(&tp->napi);
5256 netif_stop_queue(dev);
5257 synchronize_sched();
1da177e4 5258
c7c2c39b 5259 rtl8169_hw_reset(tp);
5260
56de414c
FR
5261 for (i = 0; i < NUM_RX_DESC; i++)
5262 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5263
1da177e4 5264 rtl8169_tx_clear(tp);
c7c2c39b 5265 rtl8169_init_ring_indexes(tp);
1da177e4 5266
da78dbff 5267 napi_enable(&tp->napi);
56de414c
FR
5268 rtl_hw_start(dev);
5269 netif_wake_queue(dev);
5270 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1da177e4
LT
5271}
5272
5273static void rtl8169_tx_timeout(struct net_device *dev)
5274{
da78dbff
FR
5275 struct rtl8169_private *tp = netdev_priv(dev);
5276
5277 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
1da177e4
LT
5278}
5279
5280static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2b7b4318 5281 u32 *opts)
1da177e4
LT
5282{
5283 struct skb_shared_info *info = skb_shinfo(skb);
5284 unsigned int cur_frag, entry;
a6343afb 5285 struct TxDesc * uninitialized_var(txd);
48addcc9 5286 struct device *d = &tp->pci_dev->dev;
1da177e4
LT
5287
5288 entry = tp->cur_tx;
5289 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
9e903e08 5290 const skb_frag_t *frag = info->frags + cur_frag;
1da177e4
LT
5291 dma_addr_t mapping;
5292 u32 status, len;
5293 void *addr;
5294
5295 entry = (entry + 1) % NUM_TX_DESC;
5296
5297 txd = tp->TxDescArray + entry;
9e903e08 5298 len = skb_frag_size(frag);
929f6189 5299 addr = skb_frag_address(frag);
48addcc9 5300 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
d827d86b
SG
5301 if (unlikely(dma_mapping_error(d, mapping))) {
5302 if (net_ratelimit())
5303 netif_err(tp, drv, tp->dev,
5304 "Failed to map TX fragments DMA!\n");
3eafe507 5305 goto err_out;
d827d86b 5306 }
1da177e4 5307
cecb5fd7 5308 /* Anti gcc 2.95.3 bugware (sic) */
2b7b4318
FR
5309 status = opts[0] | len |
5310 (RingEnd * !((entry + 1) % NUM_TX_DESC));
1da177e4
LT
5311
5312 txd->opts1 = cpu_to_le32(status);
2b7b4318 5313 txd->opts2 = cpu_to_le32(opts[1]);
1da177e4
LT
5314 txd->addr = cpu_to_le64(mapping);
5315
5316 tp->tx_skb[entry].len = len;
5317 }
5318
5319 if (cur_frag) {
5320 tp->tx_skb[entry].skb = skb;
5321 txd->opts1 |= cpu_to_le32(LastFrag);
5322 }
5323
5324 return cur_frag;
3eafe507
SG
5325
5326err_out:
5327 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5328 return -EIO;
1da177e4
LT
5329}
5330
2b7b4318
FR
5331static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5332 struct sk_buff *skb, u32 *opts)
1da177e4 5333{
2b7b4318 5334 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
350fb32a 5335 u32 mss = skb_shinfo(skb)->gso_size;
2b7b4318 5336 int offset = info->opts_offset;
350fb32a 5337
2b7b4318
FR
5338 if (mss) {
5339 opts[0] |= TD_LSO;
5340 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5341 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
eddc9ec5 5342 const struct iphdr *ip = ip_hdr(skb);
1da177e4
LT
5343
5344 if (ip->protocol == IPPROTO_TCP)
2b7b4318 5345 opts[offset] |= info->checksum.tcp;
1da177e4 5346 else if (ip->protocol == IPPROTO_UDP)
2b7b4318
FR
5347 opts[offset] |= info->checksum.udp;
5348 else
5349 WARN_ON_ONCE(1);
1da177e4 5350 }
1da177e4
LT
5351}
5352
61357325
SH
5353static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5354 struct net_device *dev)
1da177e4
LT
5355{
5356 struct rtl8169_private *tp = netdev_priv(dev);
3eafe507 5357 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
1da177e4
LT
5358 struct TxDesc *txd = tp->TxDescArray + entry;
5359 void __iomem *ioaddr = tp->mmio_addr;
48addcc9 5360 struct device *d = &tp->pci_dev->dev;
1da177e4
LT
5361 dma_addr_t mapping;
5362 u32 status, len;
2b7b4318 5363 u32 opts[2];
3eafe507 5364 int frags;
5b0384f4 5365
1da177e4 5366 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
bf82c189 5367 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
3eafe507 5368 goto err_stop_0;
1da177e4
LT
5369 }
5370
5371 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
3eafe507
SG
5372 goto err_stop_0;
5373
5374 len = skb_headlen(skb);
48addcc9 5375 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
d827d86b
SG
5376 if (unlikely(dma_mapping_error(d, mapping))) {
5377 if (net_ratelimit())
5378 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
3eafe507 5379 goto err_dma_0;
d827d86b 5380 }
3eafe507
SG
5381
5382 tp->tx_skb[entry].len = len;
5383 txd->addr = cpu_to_le64(mapping);
1da177e4 5384
2b7b4318
FR
5385 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5386 opts[0] = DescOwn;
1da177e4 5387
2b7b4318
FR
5388 rtl8169_tso_csum(tp, skb, opts);
5389
5390 frags = rtl8169_xmit_frags(tp, skb, opts);
3eafe507
SG
5391 if (frags < 0)
5392 goto err_dma_1;
5393 else if (frags)
2b7b4318 5394 opts[0] |= FirstFrag;
3eafe507 5395 else {
2b7b4318 5396 opts[0] |= FirstFrag | LastFrag;
1da177e4
LT
5397 tp->tx_skb[entry].skb = skb;
5398 }
5399
2b7b4318
FR
5400 txd->opts2 = cpu_to_le32(opts[1]);
5401
036dafa2
IM
5402 netdev_sent_queue(dev, skb->len);
5403
5047fb5d
RC
5404 skb_tx_timestamp(skb);
5405
1da177e4
LT
5406 wmb();
5407
cecb5fd7 5408 /* Anti gcc 2.95.3 bugware (sic) */
2b7b4318 5409 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
1da177e4
LT
5410 txd->opts1 = cpu_to_le32(status);
5411
1da177e4
LT
5412 tp->cur_tx += frags + 1;
5413
4c020a96 5414 wmb();
1da177e4 5415
cecb5fd7 5416 RTL_W8(TxPoll, NPQ);
1da177e4 5417
da78dbff
FR
5418 mmiowb();
5419
1da177e4 5420 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
ae1f23fb
FR
5421 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5422 * not miss a ring update when it notices a stopped queue.
5423 */
5424 smp_wmb();
1da177e4 5425 netif_stop_queue(dev);
ae1f23fb
FR
5426 /* Sync with rtl_tx:
5427 * - publish queue status and cur_tx ring index (write barrier)
5428 * - refresh dirty_tx ring index (read barrier).
5429 * May the current thread have a pessimistic view of the ring
5430 * status and forget to wake up queue, a racing rtl_tx thread
5431 * can't.
5432 */
1e874e04 5433 smp_mb();
1da177e4
LT
5434 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5435 netif_wake_queue(dev);
5436 }
5437
61357325 5438 return NETDEV_TX_OK;
1da177e4 5439
3eafe507 5440err_dma_1:
48addcc9 5441 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
3eafe507
SG
5442err_dma_0:
5443 dev_kfree_skb(skb);
5444 dev->stats.tx_dropped++;
5445 return NETDEV_TX_OK;
5446
5447err_stop_0:
1da177e4 5448 netif_stop_queue(dev);
cebf8cc7 5449 dev->stats.tx_dropped++;
61357325 5450 return NETDEV_TX_BUSY;
1da177e4
LT
5451}
5452
5453static void rtl8169_pcierr_interrupt(struct net_device *dev)
5454{
5455 struct rtl8169_private *tp = netdev_priv(dev);
5456 struct pci_dev *pdev = tp->pci_dev;
1da177e4
LT
5457 u16 pci_status, pci_cmd;
5458
5459 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5460 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5461
bf82c189
JP
5462 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5463 pci_cmd, pci_status);
1da177e4
LT
5464
5465 /*
5466 * The recovery sequence below admits a very elaborated explanation:
5467 * - it seems to work;
d03902b8
FR
5468 * - I did not see what else could be done;
5469 * - it makes iop3xx happy.
1da177e4
LT
5470 *
5471 * Feel free to adjust to your needs.
5472 */
a27993f3 5473 if (pdev->broken_parity_status)
d03902b8
FR
5474 pci_cmd &= ~PCI_COMMAND_PARITY;
5475 else
5476 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5477
5478 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
5479
5480 pci_write_config_word(pdev, PCI_STATUS,
5481 pci_status & (PCI_STATUS_DETECTED_PARITY |
5482 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5483 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5484
5485 /* The infamous DAC f*ckup only happens at boot time */
5486 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
e6de30d6 5487 void __iomem *ioaddr = tp->mmio_addr;
5488
bf82c189 5489 netif_info(tp, intr, dev, "disabling PCI DAC\n");
1da177e4
LT
5490 tp->cp_cmd &= ~PCIDAC;
5491 RTL_W16(CPlusCmd, tp->cp_cmd);
5492 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
5493 }
5494
e6de30d6 5495 rtl8169_hw_reset(tp);
d03902b8 5496
98ddf986 5497 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
1da177e4
LT
5498}
5499
036dafa2
IM
5500struct rtl_txc {
5501 int packets;
5502 int bytes;
5503};
5504
da78dbff 5505static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
1da177e4 5506{
036dafa2 5507 struct rtl8169_stats *tx_stats = &tp->tx_stats;
1da177e4 5508 unsigned int dirty_tx, tx_left;
036dafa2 5509 struct rtl_txc txc = { 0, 0 };
1da177e4 5510
1da177e4
LT
5511 dirty_tx = tp->dirty_tx;
5512 smp_rmb();
5513 tx_left = tp->cur_tx - dirty_tx;
5514
5515 while (tx_left > 0) {
5516 unsigned int entry = dirty_tx % NUM_TX_DESC;
5517 struct ring_info *tx_skb = tp->tx_skb + entry;
1da177e4
LT
5518 u32 status;
5519
5520 rmb();
5521 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5522 if (status & DescOwn)
5523 break;
5524
48addcc9
SG
5525 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5526 tp->TxDescArray + entry);
1da177e4 5527 if (status & LastFrag) {
036dafa2
IM
5528 struct sk_buff *skb = tx_skb->skb;
5529
5530 txc.packets++;
5531 txc.bytes += skb->len;
5532 dev_kfree_skb(skb);
1da177e4
LT
5533 tx_skb->skb = NULL;
5534 }
5535 dirty_tx++;
5536 tx_left--;
5537 }
5538
036dafa2
IM
5539 u64_stats_update_begin(&tx_stats->syncp);
5540 tx_stats->packets += txc.packets;
5541 tx_stats->bytes += txc.bytes;
5542 u64_stats_update_end(&tx_stats->syncp);
5543
5544 netdev_completed_queue(dev, txc.packets, txc.bytes);
5545
1da177e4
LT
5546 if (tp->dirty_tx != dirty_tx) {
5547 tp->dirty_tx = dirty_tx;
ae1f23fb
FR
5548 /* Sync with rtl8169_start_xmit:
5549 * - publish dirty_tx ring index (write barrier)
5550 * - refresh cur_tx ring index and queue status (read barrier)
5551 * May the current thread miss the stopped queue condition,
5552 * a racing xmit thread can only have a right view of the
5553 * ring status.
5554 */
1e874e04 5555 smp_mb();
1da177e4
LT
5556 if (netif_queue_stopped(dev) &&
5557 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5558 netif_wake_queue(dev);
5559 }
d78ae2dc
FR
5560 /*
5561 * 8168 hack: TxPoll requests are lost when the Tx packets are
5562 * too close. Let's kick an extra TxPoll request when a burst
5563 * of start_xmit activity is detected (if it is not detected,
5564 * it is slow enough). -- FR
5565 */
da78dbff
FR
5566 if (tp->cur_tx != dirty_tx) {
5567 void __iomem *ioaddr = tp->mmio_addr;
5568
d78ae2dc 5569 RTL_W8(TxPoll, NPQ);
da78dbff 5570 }
1da177e4
LT
5571 }
5572}
5573
126fa4b9
FR
5574static inline int rtl8169_fragmented_frame(u32 status)
5575{
5576 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5577}
5578
adea1ac7 5579static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
1da177e4 5580{
1da177e4
LT
5581 u32 status = opts1 & RxProtoMask;
5582
5583 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
d5d3ebe3 5584 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
1da177e4
LT
5585 skb->ip_summed = CHECKSUM_UNNECESSARY;
5586 else
bc8acf2c 5587 skb_checksum_none_assert(skb);
1da177e4
LT
5588}
5589
6f0333b8
ED
5590static struct sk_buff *rtl8169_try_rx_copy(void *data,
5591 struct rtl8169_private *tp,
5592 int pkt_size,
5593 dma_addr_t addr)
1da177e4 5594{
b449655f 5595 struct sk_buff *skb;
48addcc9 5596 struct device *d = &tp->pci_dev->dev;
b449655f 5597
6f0333b8 5598 data = rtl8169_align(data);
48addcc9 5599 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6f0333b8
ED
5600 prefetch(data);
5601 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5602 if (skb)
5603 memcpy(skb->data, data, pkt_size);
48addcc9
SG
5604 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5605
6f0333b8 5606 return skb;
1da177e4
LT
5607}
5608
da78dbff 5609static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
1da177e4
LT
5610{
5611 unsigned int cur_rx, rx_left;
6f0333b8 5612 unsigned int count;
1da177e4 5613
1da177e4
LT
5614 cur_rx = tp->cur_rx;
5615 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
865c652d 5616 rx_left = min(rx_left, budget);
1da177e4 5617
4dcb7d33 5618 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 5619 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 5620 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
5621 u32 status;
5622
5623 rmb();
e03f33af 5624 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
1da177e4
LT
5625
5626 if (status & DescOwn)
5627 break;
4dcb7d33 5628 if (unlikely(status & RxRES)) {
bf82c189
JP
5629 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5630 status);
cebf8cc7 5631 dev->stats.rx_errors++;
1da177e4 5632 if (status & (RxRWT | RxRUNT))
cebf8cc7 5633 dev->stats.rx_length_errors++;
1da177e4 5634 if (status & RxCRC)
cebf8cc7 5635 dev->stats.rx_crc_errors++;
9dccf611 5636 if (status & RxFOVF) {
da78dbff 5637 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
cebf8cc7 5638 dev->stats.rx_fifo_errors++;
9dccf611 5639 }
6bbe021d
BG
5640 if ((status & (RxRUNT | RxCRC)) &&
5641 !(status & (RxRWT | RxFOVF)) &&
5642 (dev->features & NETIF_F_RXALL))
5643 goto process_pkt;
5644
6f0333b8 5645 rtl8169_mark_to_asic(desc, rx_buf_sz);
1da177e4 5646 } else {
6f0333b8 5647 struct sk_buff *skb;
6bbe021d
BG
5648 dma_addr_t addr;
5649 int pkt_size;
5650
5651process_pkt:
5652 addr = le64_to_cpu(desc->addr);
79d0c1d2
BG
5653 if (likely(!(dev->features & NETIF_F_RXFCS)))
5654 pkt_size = (status & 0x00003fff) - 4;
5655 else
5656 pkt_size = status & 0x00003fff;
1da177e4 5657
126fa4b9
FR
5658 /*
5659 * The driver does not support incoming fragmented
5660 * frames. They are seen as a symptom of over-mtu
5661 * sized frames.
5662 */
5663 if (unlikely(rtl8169_fragmented_frame(status))) {
cebf8cc7
FR
5664 dev->stats.rx_dropped++;
5665 dev->stats.rx_length_errors++;
6f0333b8 5666 rtl8169_mark_to_asic(desc, rx_buf_sz);
4dcb7d33 5667 continue;
126fa4b9
FR
5668 }
5669
6f0333b8
ED
5670 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
5671 tp, pkt_size, addr);
5672 rtl8169_mark_to_asic(desc, rx_buf_sz);
5673 if (!skb) {
5674 dev->stats.rx_dropped++;
5675 continue;
1da177e4
LT
5676 }
5677
adea1ac7 5678 rtl8169_rx_csum(skb, status);
1da177e4
LT
5679 skb_put(skb, pkt_size);
5680 skb->protocol = eth_type_trans(skb, dev);
5681
7a8fc77b
FR
5682 rtl8169_rx_vlan_tag(desc, skb);
5683
56de414c 5684 napi_gro_receive(&tp->napi, skb);
1da177e4 5685
8027aa24
JW
5686 u64_stats_update_begin(&tp->rx_stats.syncp);
5687 tp->rx_stats.packets++;
5688 tp->rx_stats.bytes += pkt_size;
5689 u64_stats_update_end(&tp->rx_stats.syncp);
1da177e4 5690 }
6dccd16b
FR
5691
5692 /* Work around for AMD plateform. */
95e0918d 5693 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
6dccd16b
FR
5694 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
5695 desc->opts2 = 0;
5696 cur_rx++;
5697 }
1da177e4
LT
5698 }
5699
5700 count = cur_rx - tp->cur_rx;
5701 tp->cur_rx = cur_rx;
5702
6f0333b8 5703 tp->dirty_rx += count;
1da177e4
LT
5704
5705 return count;
5706}
5707
07d3f51f 5708static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
1da177e4 5709{
07d3f51f 5710 struct net_device *dev = dev_instance;
1da177e4 5711 struct rtl8169_private *tp = netdev_priv(dev);
1da177e4 5712 int handled = 0;
9085cdfa 5713 u16 status;
1da177e4 5714
9085cdfa 5715 status = rtl_get_events(tp);
da78dbff
FR
5716 if (status && status != 0xffff) {
5717 status &= RTL_EVENT_NAPI | tp->event_slow;
5718 if (status) {
5719 handled = 1;
1da177e4 5720
da78dbff
FR
5721 rtl_irq_disable(tp);
5722 napi_schedule(&tp->napi);
f11a377b 5723 }
da78dbff
FR
5724 }
5725 return IRQ_RETVAL(handled);
5726}
1da177e4 5727
da78dbff
FR
5728/*
5729 * Workqueue context.
5730 */
5731static void rtl_slow_event_work(struct rtl8169_private *tp)
5732{
5733 struct net_device *dev = tp->dev;
5734 u16 status;
5735
5736 status = rtl_get_events(tp) & tp->event_slow;
5737 rtl_ack_events(tp, status);
1da177e4 5738
da78dbff
FR
5739 if (unlikely(status & RxFIFOOver)) {
5740 switch (tp->mac_version) {
5741 /* Work around for rx fifo overflow */
5742 case RTL_GIGA_MAC_VER_11:
5743 netif_stop_queue(dev);
934714d0
FR
5744 /* XXX - Hack alert. See rtl_task(). */
5745 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
da78dbff 5746 default:
f11a377b
DD
5747 break;
5748 }
da78dbff 5749 }
1da177e4 5750
da78dbff
FR
5751 if (unlikely(status & SYSErr))
5752 rtl8169_pcierr_interrupt(dev);
0e485150 5753
da78dbff
FR
5754 if (status & LinkChg)
5755 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
1da177e4 5756
da78dbff
FR
5757 napi_disable(&tp->napi);
5758 rtl_irq_disable(tp);
5759
5760 napi_enable(&tp->napi);
5761 napi_schedule(&tp->napi);
1da177e4
LT
5762}
5763
4422bcd4
FR
5764static void rtl_task(struct work_struct *work)
5765{
da78dbff
FR
5766 static const struct {
5767 int bitnr;
5768 void (*action)(struct rtl8169_private *);
5769 } rtl_work[] = {
934714d0 5770 /* XXX - keep rtl_slow_event_work() as first element. */
da78dbff
FR
5771 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5772 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5773 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5774 };
4422bcd4
FR
5775 struct rtl8169_private *tp =
5776 container_of(work, struct rtl8169_private, wk.work);
da78dbff
FR
5777 struct net_device *dev = tp->dev;
5778 int i;
5779
5780 rtl_lock_work(tp);
5781
6c4a70c5
FR
5782 if (!netif_running(dev) ||
5783 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
da78dbff
FR
5784 goto out_unlock;
5785
5786 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5787 bool pending;
5788
da78dbff 5789 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
da78dbff
FR
5790 if (pending)
5791 rtl_work[i].action(tp);
5792 }
4422bcd4 5793
da78dbff
FR
5794out_unlock:
5795 rtl_unlock_work(tp);
4422bcd4
FR
5796}
5797
bea3348e 5798static int rtl8169_poll(struct napi_struct *napi, int budget)
1da177e4 5799{
bea3348e
SH
5800 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5801 struct net_device *dev = tp->dev;
da78dbff
FR
5802 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5803 int work_done= 0;
5804 u16 status;
5805
5806 status = rtl_get_events(tp);
5807 rtl_ack_events(tp, status & ~tp->event_slow);
5808
5809 if (status & RTL_EVENT_NAPI_RX)
5810 work_done = rtl_rx(dev, tp, (u32) budget);
5811
5812 if (status & RTL_EVENT_NAPI_TX)
5813 rtl_tx(dev, tp);
1da177e4 5814
da78dbff
FR
5815 if (status & tp->event_slow) {
5816 enable_mask &= ~tp->event_slow;
5817
5818 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
5819 }
1da177e4 5820
bea3348e 5821 if (work_done < budget) {
288379f0 5822 napi_complete(napi);
f11a377b 5823
da78dbff
FR
5824 rtl_irq_enable(tp, enable_mask);
5825 mmiowb();
1da177e4
LT
5826 }
5827
bea3348e 5828 return work_done;
1da177e4 5829}
1da177e4 5830
523a6094
FR
5831static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
5832{
5833 struct rtl8169_private *tp = netdev_priv(dev);
5834
5835 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
5836 return;
5837
5838 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
5839 RTL_W32(RxMissed, 0);
5840}
5841
1da177e4
LT
5842static void rtl8169_down(struct net_device *dev)
5843{
5844 struct rtl8169_private *tp = netdev_priv(dev);
5845 void __iomem *ioaddr = tp->mmio_addr;
1da177e4 5846
4876cc1e 5847 del_timer_sync(&tp->timer);
1da177e4 5848
93dd79e8 5849 napi_disable(&tp->napi);
da78dbff 5850 netif_stop_queue(dev);
1da177e4 5851
92fc43b4 5852 rtl8169_hw_reset(tp);
323bb685
SG
5853 /*
5854 * At this point device interrupts can not be enabled in any function,
209e5ac8
FR
5855 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
5856 * and napi is disabled (rtl8169_poll).
323bb685 5857 */
523a6094 5858 rtl8169_rx_missed(dev, ioaddr);
1da177e4 5859
1da177e4 5860 /* Give a racing hard_start_xmit a few cycles to complete. */
da78dbff 5861 synchronize_sched();
1da177e4 5862
1da177e4
LT
5863 rtl8169_tx_clear(tp);
5864
5865 rtl8169_rx_clear(tp);
065c27c1 5866
5867 rtl_pll_power_down(tp);
1da177e4
LT
5868}
5869
5870static int rtl8169_close(struct net_device *dev)
5871{
5872 struct rtl8169_private *tp = netdev_priv(dev);
5873 struct pci_dev *pdev = tp->pci_dev;
5874
e1759441
RW
5875 pm_runtime_get_sync(&pdev->dev);
5876
cecb5fd7 5877 /* Update counters before going down */
355423d0
IV
5878 rtl8169_update_counters(dev);
5879
da78dbff 5880 rtl_lock_work(tp);
6c4a70c5 5881 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
da78dbff 5882
1da177e4 5883 rtl8169_down(dev);
da78dbff 5884 rtl_unlock_work(tp);
1da177e4 5885
92a7c4e7 5886 free_irq(pdev->irq, dev);
1da177e4 5887
82553bb6
SG
5888 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5889 tp->RxPhyAddr);
5890 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5891 tp->TxPhyAddr);
1da177e4
LT
5892 tp->TxDescArray = NULL;
5893 tp->RxDescArray = NULL;
5894
e1759441
RW
5895 pm_runtime_put_sync(&pdev->dev);
5896
1da177e4
LT
5897 return 0;
5898}
5899
dc1c00ce
FR
5900#ifdef CONFIG_NET_POLL_CONTROLLER
5901static void rtl8169_netpoll(struct net_device *dev)
5902{
5903 struct rtl8169_private *tp = netdev_priv(dev);
5904
5905 rtl8169_interrupt(tp->pci_dev->irq, dev);
5906}
5907#endif
5908
df43ac78
FR
5909static int rtl_open(struct net_device *dev)
5910{
5911 struct rtl8169_private *tp = netdev_priv(dev);
5912 void __iomem *ioaddr = tp->mmio_addr;
5913 struct pci_dev *pdev = tp->pci_dev;
5914 int retval = -ENOMEM;
5915
5916 pm_runtime_get_sync(&pdev->dev);
5917
5918 /*
5919 * Rx and Tx desscriptors needs 256 bytes alignment.
5920 * dma_alloc_coherent provides more.
5921 */
5922 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
5923 &tp->TxPhyAddr, GFP_KERNEL);
5924 if (!tp->TxDescArray)
5925 goto err_pm_runtime_put;
5926
5927 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
5928 &tp->RxPhyAddr, GFP_KERNEL);
5929 if (!tp->RxDescArray)
5930 goto err_free_tx_0;
5931
5932 retval = rtl8169_init_ring(dev);
5933 if (retval < 0)
5934 goto err_free_rx_1;
5935
5936 INIT_WORK(&tp->wk.work, rtl_task);
5937
5938 smp_mb();
5939
5940 rtl_request_firmware(tp);
5941
92a7c4e7 5942 retval = request_irq(pdev->irq, rtl8169_interrupt,
df43ac78
FR
5943 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
5944 dev->name, dev);
5945 if (retval < 0)
5946 goto err_release_fw_2;
5947
5948 rtl_lock_work(tp);
5949
5950 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5951
5952 napi_enable(&tp->napi);
5953
5954 rtl8169_init_phy(dev, tp);
5955
5956 __rtl8169_set_features(dev, dev->features);
5957
5958 rtl_pll_power_up(tp);
5959
5960 rtl_hw_start(dev);
5961
5962 netif_start_queue(dev);
5963
5964 rtl_unlock_work(tp);
5965
5966 tp->saved_wolopts = 0;
5967 pm_runtime_put_noidle(&pdev->dev);
5968
5969 rtl8169_check_link_status(dev, tp, ioaddr);
5970out:
5971 return retval;
5972
5973err_release_fw_2:
5974 rtl_release_firmware(tp);
5975 rtl8169_rx_clear(tp);
5976err_free_rx_1:
5977 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
5978 tp->RxPhyAddr);
5979 tp->RxDescArray = NULL;
5980err_free_tx_0:
5981 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
5982 tp->TxPhyAddr);
5983 tp->TxDescArray = NULL;
5984err_pm_runtime_put:
5985 pm_runtime_put_noidle(&pdev->dev);
5986 goto out;
5987}
5988
8027aa24
JW
5989static struct rtnl_link_stats64 *
5990rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1da177e4
LT
5991{
5992 struct rtl8169_private *tp = netdev_priv(dev);
5993 void __iomem *ioaddr = tp->mmio_addr;
8027aa24 5994 unsigned int start;
1da177e4 5995
da78dbff 5996 if (netif_running(dev))
523a6094 5997 rtl8169_rx_missed(dev, ioaddr);
5b0384f4 5998
8027aa24
JW
5999 do {
6000 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6001 stats->rx_packets = tp->rx_stats.packets;
6002 stats->rx_bytes = tp->rx_stats.bytes;
6003 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6004
6005
6006 do {
6007 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6008 stats->tx_packets = tp->tx_stats.packets;
6009 stats->tx_bytes = tp->tx_stats.bytes;
6010 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6011
6012 stats->rx_dropped = dev->stats.rx_dropped;
6013 stats->tx_dropped = dev->stats.tx_dropped;
6014 stats->rx_length_errors = dev->stats.rx_length_errors;
6015 stats->rx_errors = dev->stats.rx_errors;
6016 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6017 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6018 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6019
6020 return stats;
1da177e4
LT
6021}
6022
861ab440 6023static void rtl8169_net_suspend(struct net_device *dev)
5d06a99f 6024{
065c27c1 6025 struct rtl8169_private *tp = netdev_priv(dev);
6026
5d06a99f 6027 if (!netif_running(dev))
861ab440 6028 return;
5d06a99f
FR
6029
6030 netif_device_detach(dev);
6031 netif_stop_queue(dev);
da78dbff
FR
6032
6033 rtl_lock_work(tp);
6034 napi_disable(&tp->napi);
6c4a70c5 6035 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
da78dbff
FR
6036 rtl_unlock_work(tp);
6037
6038 rtl_pll_power_down(tp);
861ab440
RW
6039}
6040
6041#ifdef CONFIG_PM
6042
6043static int rtl8169_suspend(struct device *device)
6044{
6045 struct pci_dev *pdev = to_pci_dev(device);
6046 struct net_device *dev = pci_get_drvdata(pdev);
5d06a99f 6047
861ab440 6048 rtl8169_net_suspend(dev);
1371fa6d 6049
5d06a99f
FR
6050 return 0;
6051}
6052
e1759441
RW
6053static void __rtl8169_resume(struct net_device *dev)
6054{
065c27c1 6055 struct rtl8169_private *tp = netdev_priv(dev);
6056
e1759441 6057 netif_device_attach(dev);
065c27c1 6058
6059 rtl_pll_power_up(tp);
6060
cff4c162
AS
6061 rtl_lock_work(tp);
6062 napi_enable(&tp->napi);
6c4a70c5 6063 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
cff4c162 6064 rtl_unlock_work(tp);
da78dbff 6065
98ddf986 6066 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
e1759441
RW
6067}
6068
861ab440 6069static int rtl8169_resume(struct device *device)
5d06a99f 6070{
861ab440 6071 struct pci_dev *pdev = to_pci_dev(device);
5d06a99f 6072 struct net_device *dev = pci_get_drvdata(pdev);
fccec10b
SG
6073 struct rtl8169_private *tp = netdev_priv(dev);
6074
6075 rtl8169_init_phy(dev, tp);
5d06a99f 6076
e1759441
RW
6077 if (netif_running(dev))
6078 __rtl8169_resume(dev);
5d06a99f 6079
e1759441
RW
6080 return 0;
6081}
6082
6083static int rtl8169_runtime_suspend(struct device *device)
6084{
6085 struct pci_dev *pdev = to_pci_dev(device);
6086 struct net_device *dev = pci_get_drvdata(pdev);
6087 struct rtl8169_private *tp = netdev_priv(dev);
6088
6089 if (!tp->TxDescArray)
6090 return 0;
6091
da78dbff 6092 rtl_lock_work(tp);
e1759441
RW
6093 tp->saved_wolopts = __rtl8169_get_wol(tp);
6094 __rtl8169_set_wol(tp, WAKE_ANY);
da78dbff 6095 rtl_unlock_work(tp);
e1759441
RW
6096
6097 rtl8169_net_suspend(dev);
6098
6099 return 0;
6100}
6101
6102static int rtl8169_runtime_resume(struct device *device)
6103{
6104 struct pci_dev *pdev = to_pci_dev(device);
6105 struct net_device *dev = pci_get_drvdata(pdev);
6106 struct rtl8169_private *tp = netdev_priv(dev);
6107
6108 if (!tp->TxDescArray)
6109 return 0;
6110
da78dbff 6111 rtl_lock_work(tp);
e1759441
RW
6112 __rtl8169_set_wol(tp, tp->saved_wolopts);
6113 tp->saved_wolopts = 0;
da78dbff 6114 rtl_unlock_work(tp);
e1759441 6115
fccec10b
SG
6116 rtl8169_init_phy(dev, tp);
6117
e1759441 6118 __rtl8169_resume(dev);
5d06a99f 6119
5d06a99f
FR
6120 return 0;
6121}
6122
e1759441
RW
6123static int rtl8169_runtime_idle(struct device *device)
6124{
6125 struct pci_dev *pdev = to_pci_dev(device);
6126 struct net_device *dev = pci_get_drvdata(pdev);
6127 struct rtl8169_private *tp = netdev_priv(dev);
6128
e4fbce74 6129 return tp->TxDescArray ? -EBUSY : 0;
e1759441
RW
6130}
6131
47145210 6132static const struct dev_pm_ops rtl8169_pm_ops = {
cecb5fd7
FR
6133 .suspend = rtl8169_suspend,
6134 .resume = rtl8169_resume,
6135 .freeze = rtl8169_suspend,
6136 .thaw = rtl8169_resume,
6137 .poweroff = rtl8169_suspend,
6138 .restore = rtl8169_resume,
6139 .runtime_suspend = rtl8169_runtime_suspend,
6140 .runtime_resume = rtl8169_runtime_resume,
6141 .runtime_idle = rtl8169_runtime_idle,
861ab440
RW
6142};
6143
6144#define RTL8169_PM_OPS (&rtl8169_pm_ops)
6145
6146#else /* !CONFIG_PM */
6147
6148#define RTL8169_PM_OPS NULL
6149
6150#endif /* !CONFIG_PM */
6151
649b3b8c 6152static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6153{
6154 void __iomem *ioaddr = tp->mmio_addr;
6155
6156 /* WoL fails with 8168b when the receiver is disabled. */
6157 switch (tp->mac_version) {
6158 case RTL_GIGA_MAC_VER_11:
6159 case RTL_GIGA_MAC_VER_12:
6160 case RTL_GIGA_MAC_VER_17:
6161 pci_clear_master(tp->pci_dev);
6162
6163 RTL_W8(ChipCmd, CmdRxEnb);
6164 /* PCI commit */
6165 RTL_R8(ChipCmd);
6166 break;
6167 default:
6168 break;
6169 }
6170}
6171
1765f95d
FR
6172static void rtl_shutdown(struct pci_dev *pdev)
6173{
861ab440 6174 struct net_device *dev = pci_get_drvdata(pdev);
4bb3f522 6175 struct rtl8169_private *tp = netdev_priv(dev);
2a15cd2f 6176 struct device *d = &pdev->dev;
6177
6178 pm_runtime_get_sync(d);
861ab440
RW
6179
6180 rtl8169_net_suspend(dev);
1765f95d 6181
cecb5fd7 6182 /* Restore original MAC address */
cc098dc7
IV
6183 rtl_rar_set(tp, dev->perm_addr);
6184
92fc43b4 6185 rtl8169_hw_reset(tp);
4bb3f522 6186
861ab440 6187 if (system_state == SYSTEM_POWER_OFF) {
649b3b8c 6188 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6189 rtl_wol_suspend_quirk(tp);
6190 rtl_wol_shutdown_quirk(tp);
ca52efd5 6191 }
6192
861ab440
RW
6193 pci_wake_from_d3(pdev, true);
6194 pci_set_power_state(pdev, PCI_D3hot);
6195 }
2a15cd2f 6196
6197 pm_runtime_put_noidle(d);
861ab440 6198}
5d06a99f 6199
e27566ed
FR
6200static void __devexit rtl_remove_one(struct pci_dev *pdev)
6201{
6202 struct net_device *dev = pci_get_drvdata(pdev);
6203 struct rtl8169_private *tp = netdev_priv(dev);
6204
6205 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6206 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6207 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6208 rtl8168_driver_stop(tp);
6209 }
6210
6211 cancel_work_sync(&tp->wk.work);
6212
6213 unregister_netdev(dev);
6214
6215 rtl_release_firmware(tp);
6216
6217 if (pci_dev_run_wake(pdev))
6218 pm_runtime_get_noresume(&pdev->dev);
6219
6220 /* restore original MAC address */
6221 rtl_rar_set(tp, dev->perm_addr);
6222
6223 rtl_disable_msi(pdev, tp);
6224 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6225 pci_set_drvdata(pdev, NULL);
6226}
6227
fa9c385e 6228static const struct net_device_ops rtl_netdev_ops = {
df43ac78 6229 .ndo_open = rtl_open,
fa9c385e
FR
6230 .ndo_stop = rtl8169_close,
6231 .ndo_get_stats64 = rtl8169_get_stats64,
6232 .ndo_start_xmit = rtl8169_start_xmit,
6233 .ndo_tx_timeout = rtl8169_tx_timeout,
6234 .ndo_validate_addr = eth_validate_addr,
6235 .ndo_change_mtu = rtl8169_change_mtu,
6236 .ndo_fix_features = rtl8169_fix_features,
6237 .ndo_set_features = rtl8169_set_features,
6238 .ndo_set_mac_address = rtl_set_mac_address,
6239 .ndo_do_ioctl = rtl8169_ioctl,
6240 .ndo_set_rx_mode = rtl_set_rx_mode,
6241#ifdef CONFIG_NET_POLL_CONTROLLER
6242 .ndo_poll_controller = rtl8169_netpoll,
6243#endif
6244
6245};
6246
31fa8b18
FR
6247static const struct rtl_cfg_info {
6248 void (*hw_start)(struct net_device *);
6249 unsigned int region;
6250 unsigned int align;
6251 u16 event_slow;
6252 unsigned features;
6253 u8 default_ver;
6254} rtl_cfg_infos [] = {
6255 [RTL_CFG_0] = {
6256 .hw_start = rtl_hw_start_8169,
6257 .region = 1,
6258 .align = 0,
6259 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6260 .features = RTL_FEATURE_GMII,
6261 .default_ver = RTL_GIGA_MAC_VER_01,
6262 },
6263 [RTL_CFG_1] = {
6264 .hw_start = rtl_hw_start_8168,
6265 .region = 2,
6266 .align = 8,
6267 .event_slow = SYSErr | LinkChg | RxOverflow,
6268 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6269 .default_ver = RTL_GIGA_MAC_VER_11,
6270 },
6271 [RTL_CFG_2] = {
6272 .hw_start = rtl_hw_start_8101,
6273 .region = 2,
6274 .align = 8,
6275 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6276 PCSTimeout,
6277 .features = RTL_FEATURE_MSI,
6278 .default_ver = RTL_GIGA_MAC_VER_13,
6279 }
6280};
6281
6282/* Cfg9346_Unlock assumed. */
6283static unsigned rtl_try_msi(struct rtl8169_private *tp,
6284 const struct rtl_cfg_info *cfg)
6285{
6286 void __iomem *ioaddr = tp->mmio_addr;
6287 unsigned msi = 0;
6288 u8 cfg2;
6289
6290 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6291 if (cfg->features & RTL_FEATURE_MSI) {
6292 if (pci_enable_msi(tp->pci_dev)) {
6293 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6294 } else {
6295 cfg2 |= MSIEnable;
6296 msi = RTL_FEATURE_MSI;
6297 }
6298 }
6299 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6300 RTL_W8(Config2, cfg2);
6301 return msi;
6302}
6303
3b6cf25d
FR
6304static int __devinit
6305rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6306{
6307 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6308 const unsigned int region = cfg->region;
6309 struct rtl8169_private *tp;
6310 struct mii_if_info *mii;
6311 struct net_device *dev;
6312 void __iomem *ioaddr;
6313 int chipset, i;
6314 int rc;
6315
6316 if (netif_msg_drv(&debug)) {
6317 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6318 MODULENAME, RTL8169_VERSION);
6319 }
6320
6321 dev = alloc_etherdev(sizeof (*tp));
6322 if (!dev) {
6323 rc = -ENOMEM;
6324 goto out;
6325 }
6326
6327 SET_NETDEV_DEV(dev, &pdev->dev);
fa9c385e 6328 dev->netdev_ops = &rtl_netdev_ops;
3b6cf25d
FR
6329 tp = netdev_priv(dev);
6330 tp->dev = dev;
6331 tp->pci_dev = pdev;
6332 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6333
6334 mii = &tp->mii;
6335 mii->dev = dev;
6336 mii->mdio_read = rtl_mdio_read;
6337 mii->mdio_write = rtl_mdio_write;
6338 mii->phy_id_mask = 0x1f;
6339 mii->reg_num_mask = 0x1f;
6340 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6341
6342 /* disable ASPM completely as that cause random device stop working
6343 * problems as well as full system hangs for some PCIe devices users */
6344 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6345 PCIE_LINK_STATE_CLKPM);
6346
6347 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6348 rc = pci_enable_device(pdev);
6349 if (rc < 0) {
6350 netif_err(tp, probe, dev, "enable failure\n");
6351 goto err_out_free_dev_1;
6352 }
6353
6354 if (pci_set_mwi(pdev) < 0)
6355 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6356
6357 /* make sure PCI base addr 1 is MMIO */
6358 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6359 netif_err(tp, probe, dev,
6360 "region #%d not an MMIO resource, aborting\n",
6361 region);
6362 rc = -ENODEV;
6363 goto err_out_mwi_2;
6364 }
6365
6366 /* check for weird/broken PCI region reporting */
6367 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6368 netif_err(tp, probe, dev,
6369 "Invalid PCI region size(s), aborting\n");
6370 rc = -ENODEV;
6371 goto err_out_mwi_2;
6372 }
6373
6374 rc = pci_request_regions(pdev, MODULENAME);
6375 if (rc < 0) {
6376 netif_err(tp, probe, dev, "could not request regions\n");
6377 goto err_out_mwi_2;
6378 }
6379
6380 tp->cp_cmd = RxChkSum;
6381
6382 if ((sizeof(dma_addr_t) > 4) &&
6383 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6384 tp->cp_cmd |= PCIDAC;
6385 dev->features |= NETIF_F_HIGHDMA;
6386 } else {
6387 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6388 if (rc < 0) {
6389 netif_err(tp, probe, dev, "DMA configuration failed\n");
6390 goto err_out_free_res_3;
6391 }
6392 }
6393
6394 /* ioremap MMIO region */
6395 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6396 if (!ioaddr) {
6397 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6398 rc = -EIO;
6399 goto err_out_free_res_3;
6400 }
6401 tp->mmio_addr = ioaddr;
6402
6403 if (!pci_is_pcie(pdev))
6404 netif_info(tp, probe, dev, "not PCI Express\n");
6405
6406 /* Identify chip attached to board */
6407 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6408
6409 rtl_init_rxcfg(tp);
6410
6411 rtl_irq_disable(tp);
6412
6413 rtl_hw_reset(tp);
6414
6415 rtl_ack_events(tp, 0xffff);
6416
6417 pci_set_master(pdev);
6418
6419 /*
6420 * Pretend we are using VLANs; This bypasses a nasty bug where
6421 * Interrupts stop flowing on high load on 8110SCd controllers.
6422 */
6423 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6424 tp->cp_cmd |= RxVlan;
6425
6426 rtl_init_mdio_ops(tp);
6427 rtl_init_pll_power_ops(tp);
6428 rtl_init_jumbo_ops(tp);
beb1fe18 6429 rtl_init_csi_ops(tp);
3b6cf25d
FR
6430
6431 rtl8169_print_mac_version(tp);
6432
6433 chipset = tp->mac_version;
6434 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6435
6436 RTL_W8(Cfg9346, Cfg9346_Unlock);
6437 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6438 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6439 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6440 tp->features |= RTL_FEATURE_WOL;
6441 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6442 tp->features |= RTL_FEATURE_WOL;
6443 tp->features |= rtl_try_msi(tp, cfg);
6444 RTL_W8(Cfg9346, Cfg9346_Lock);
6445
6446 if (rtl_tbi_enabled(tp)) {
6447 tp->set_speed = rtl8169_set_speed_tbi;
6448 tp->get_settings = rtl8169_gset_tbi;
6449 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6450 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6451 tp->link_ok = rtl8169_tbi_link_ok;
6452 tp->do_ioctl = rtl_tbi_ioctl;
6453 } else {
6454 tp->set_speed = rtl8169_set_speed_xmii;
6455 tp->get_settings = rtl8169_gset_xmii;
6456 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6457 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6458 tp->link_ok = rtl8169_xmii_link_ok;
6459 tp->do_ioctl = rtl_xmii_ioctl;
6460 }
6461
6462 mutex_init(&tp->wk.mutex);
6463
6464 /* Get MAC address */
6465 for (i = 0; i < ETH_ALEN; i++)
6466 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6467 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6468
6469 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6470 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
3b6cf25d
FR
6471
6472 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6473
6474 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6475 * properly for all devices */
6476 dev->features |= NETIF_F_RXCSUM |
6477 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6478
6479 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6480 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6481 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6482 NETIF_F_HIGHDMA;
6483
6484 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6485 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6486 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6487
6488 dev->hw_features |= NETIF_F_RXALL;
6489 dev->hw_features |= NETIF_F_RXFCS;
6490
6491 tp->hw_start = cfg->hw_start;
6492 tp->event_slow = cfg->event_slow;
6493
6494 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6495 ~(RxBOVF | RxFOVF) : ~0;
6496
6497 init_timer(&tp->timer);
6498 tp->timer.data = (unsigned long) dev;
6499 tp->timer.function = rtl8169_phy_timer;
6500
6501 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6502
6503 rc = register_netdev(dev);
6504 if (rc < 0)
6505 goto err_out_msi_4;
6506
6507 pci_set_drvdata(pdev, dev);
6508
92a7c4e7
FR
6509 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6510 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6511 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
3b6cf25d
FR
6512 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6513 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6514 "tx checksumming: %s]\n",
6515 rtl_chip_infos[chipset].jumbo_max,
6516 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6517 }
6518
6519 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6520 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6521 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6522 rtl8168_driver_start(tp);
6523 }
6524
6525 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6526
6527 if (pci_dev_run_wake(pdev))
6528 pm_runtime_put_noidle(&pdev->dev);
6529
6530 netif_carrier_off(dev);
6531
6532out:
6533 return rc;
6534
6535err_out_msi_4:
6536 rtl_disable_msi(pdev, tp);
6537 iounmap(ioaddr);
6538err_out_free_res_3:
6539 pci_release_regions(pdev);
6540err_out_mwi_2:
6541 pci_clear_mwi(pdev);
6542 pci_disable_device(pdev);
6543err_out_free_dev_1:
6544 free_netdev(dev);
6545 goto out;
6546}
6547
1da177e4
LT
6548static struct pci_driver rtl8169_pci_driver = {
6549 .name = MODULENAME,
6550 .id_table = rtl8169_pci_tbl,
3b6cf25d 6551 .probe = rtl_init_one,
e27566ed 6552 .remove = __devexit_p(rtl_remove_one),
1765f95d 6553 .shutdown = rtl_shutdown,
861ab440 6554 .driver.pm = RTL8169_PM_OPS,
1da177e4
LT
6555};
6556
07d3f51f 6557static int __init rtl8169_init_module(void)
1da177e4 6558{
29917620 6559 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
6560}
6561
07d3f51f 6562static void __exit rtl8169_cleanup_module(void)
1da177e4
LT
6563{
6564 pci_unregister_driver(&rtl8169_pci_driver);
6565}
6566
6567module_init(rtl8169_init_module);
6568module_exit(rtl8169_cleanup_module);