2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit
= 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
84 #define R8169_REGS_SIZE 256
85 #define R8169_NAPI_WEIGHT 64
86 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
89 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
92 #define RTL8169_TX_TIMEOUT (6*HZ)
93 #define RTL8169_PHY_TIMEOUT (10*HZ)
95 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97 #define RTL_EEPROM_SIG_ADDR 0x0000
99 /* write/read MMIO register */
100 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
102 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
103 #define RTL_R8(reg) readb (ioaddr + (reg))
104 #define RTL_R16(reg) readw (ioaddr + (reg))
105 #define RTL_R32(reg) readl (ioaddr + (reg))
108 RTL_GIGA_MAC_VER_01
= 0,
149 RTL_GIGA_MAC_NONE
= 0xff,
152 enum rtl_tx_desc_version
{
157 #define JUMBO_1K ETH_DATA_LEN
158 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
159 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
160 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
161 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
163 #define _R(NAME,TD,FW,SZ,B) { \
171 static const struct {
173 enum rtl_tx_desc_version txd_version
;
177 } rtl_chip_infos
[] = {
179 [RTL_GIGA_MAC_VER_01
] =
180 _R("RTL8169", RTL_TD_0
, NULL
, JUMBO_7K
, true),
181 [RTL_GIGA_MAC_VER_02
] =
182 _R("RTL8169s", RTL_TD_0
, NULL
, JUMBO_7K
, true),
183 [RTL_GIGA_MAC_VER_03
] =
184 _R("RTL8110s", RTL_TD_0
, NULL
, JUMBO_7K
, true),
185 [RTL_GIGA_MAC_VER_04
] =
186 _R("RTL8169sb/8110sb", RTL_TD_0
, NULL
, JUMBO_7K
, true),
187 [RTL_GIGA_MAC_VER_05
] =
188 _R("RTL8169sc/8110sc", RTL_TD_0
, NULL
, JUMBO_7K
, true),
189 [RTL_GIGA_MAC_VER_06
] =
190 _R("RTL8169sc/8110sc", RTL_TD_0
, NULL
, JUMBO_7K
, true),
192 [RTL_GIGA_MAC_VER_07
] =
193 _R("RTL8102e", RTL_TD_1
, NULL
, JUMBO_1K
, true),
194 [RTL_GIGA_MAC_VER_08
] =
195 _R("RTL8102e", RTL_TD_1
, NULL
, JUMBO_1K
, true),
196 [RTL_GIGA_MAC_VER_09
] =
197 _R("RTL8102e", RTL_TD_1
, NULL
, JUMBO_1K
, true),
198 [RTL_GIGA_MAC_VER_10
] =
199 _R("RTL8101e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
200 [RTL_GIGA_MAC_VER_11
] =
201 _R("RTL8168b/8111b", RTL_TD_0
, NULL
, JUMBO_4K
, false),
202 [RTL_GIGA_MAC_VER_12
] =
203 _R("RTL8168b/8111b", RTL_TD_0
, NULL
, JUMBO_4K
, false),
204 [RTL_GIGA_MAC_VER_13
] =
205 _R("RTL8101e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
206 [RTL_GIGA_MAC_VER_14
] =
207 _R("RTL8100e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
208 [RTL_GIGA_MAC_VER_15
] =
209 _R("RTL8100e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
210 [RTL_GIGA_MAC_VER_16
] =
211 _R("RTL8101e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
212 [RTL_GIGA_MAC_VER_17
] =
213 _R("RTL8168b/8111b", RTL_TD_1
, NULL
, JUMBO_4K
, false),
214 [RTL_GIGA_MAC_VER_18
] =
215 _R("RTL8168cp/8111cp", RTL_TD_1
, NULL
, JUMBO_6K
, false),
216 [RTL_GIGA_MAC_VER_19
] =
217 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
218 [RTL_GIGA_MAC_VER_20
] =
219 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
220 [RTL_GIGA_MAC_VER_21
] =
221 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
222 [RTL_GIGA_MAC_VER_22
] =
223 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
224 [RTL_GIGA_MAC_VER_23
] =
225 _R("RTL8168cp/8111cp", RTL_TD_1
, NULL
, JUMBO_6K
, false),
226 [RTL_GIGA_MAC_VER_24
] =
227 _R("RTL8168cp/8111cp", RTL_TD_1
, NULL
, JUMBO_6K
, false),
228 [RTL_GIGA_MAC_VER_25
] =
229 _R("RTL8168d/8111d", RTL_TD_1
, FIRMWARE_8168D_1
,
231 [RTL_GIGA_MAC_VER_26
] =
232 _R("RTL8168d/8111d", RTL_TD_1
, FIRMWARE_8168D_2
,
234 [RTL_GIGA_MAC_VER_27
] =
235 _R("RTL8168dp/8111dp", RTL_TD_1
, NULL
, JUMBO_9K
, false),
236 [RTL_GIGA_MAC_VER_28
] =
237 _R("RTL8168dp/8111dp", RTL_TD_1
, NULL
, JUMBO_9K
, false),
238 [RTL_GIGA_MAC_VER_29
] =
239 _R("RTL8105e", RTL_TD_1
, FIRMWARE_8105E_1
,
241 [RTL_GIGA_MAC_VER_30
] =
242 _R("RTL8105e", RTL_TD_1
, FIRMWARE_8105E_1
,
244 [RTL_GIGA_MAC_VER_31
] =
245 _R("RTL8168dp/8111dp", RTL_TD_1
, NULL
, JUMBO_9K
, false),
246 [RTL_GIGA_MAC_VER_32
] =
247 _R("RTL8168e/8111e", RTL_TD_1
, FIRMWARE_8168E_1
,
249 [RTL_GIGA_MAC_VER_33
] =
250 _R("RTL8168e/8111e", RTL_TD_1
, FIRMWARE_8168E_2
,
252 [RTL_GIGA_MAC_VER_34
] =
253 _R("RTL8168evl/8111evl",RTL_TD_1
, FIRMWARE_8168E_3
,
255 [RTL_GIGA_MAC_VER_35
] =
256 _R("RTL8168f/8111f", RTL_TD_1
, FIRMWARE_8168F_1
,
258 [RTL_GIGA_MAC_VER_36
] =
259 _R("RTL8168f/8111f", RTL_TD_1
, FIRMWARE_8168F_2
,
261 [RTL_GIGA_MAC_VER_37
] =
262 _R("RTL8402", RTL_TD_1
, FIRMWARE_8402_1
,
264 [RTL_GIGA_MAC_VER_38
] =
265 _R("RTL8411", RTL_TD_1
, FIRMWARE_8411_1
,
267 [RTL_GIGA_MAC_VER_39
] =
268 _R("RTL8106e", RTL_TD_1
, FIRMWARE_8106E_1
,
270 [RTL_GIGA_MAC_VER_40
] =
271 _R("RTL8168g/8111g", RTL_TD_1
, FIRMWARE_8168G_1
,
273 [RTL_GIGA_MAC_VER_41
] =
274 _R("RTL8168g/8111g", RTL_TD_1
, NULL
, JUMBO_9K
, false),
284 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl
) = {
285 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8129), 0, 0, RTL_CFG_0
},
286 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8136), 0, 0, RTL_CFG_2
},
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8167), 0, 0, RTL_CFG_0
},
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8168), 0, 0, RTL_CFG_1
},
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8169), 0, 0, RTL_CFG_0
},
290 { PCI_VENDOR_ID_DLINK
, 0x4300,
291 PCI_VENDOR_ID_DLINK
, 0x4b10, 0, 0, RTL_CFG_1
},
292 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, 0x4300), 0, 0, RTL_CFG_0
},
293 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, 0x4302), 0, 0, RTL_CFG_0
},
294 { PCI_DEVICE(PCI_VENDOR_ID_AT
, 0xc107), 0, 0, RTL_CFG_0
},
295 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0
},
296 { PCI_VENDOR_ID_LINKSYS
, 0x1032,
297 PCI_ANY_ID
, 0x0024, 0, 0, RTL_CFG_0
},
299 PCI_ANY_ID
, 0x2410, 0, 0, RTL_CFG_2
},
303 MODULE_DEVICE_TABLE(pci
, rtl8169_pci_tbl
);
305 static int rx_buf_sz
= 16383;
312 MAC0
= 0, /* Ethernet hardware address. */
314 MAR0
= 8, /* Multicast filter. */
315 CounterAddrLow
= 0x10,
316 CounterAddrHigh
= 0x14,
317 TxDescStartAddrLow
= 0x20,
318 TxDescStartAddrHigh
= 0x24,
319 TxHDescStartAddrLow
= 0x28,
320 TxHDescStartAddrHigh
= 0x2c,
329 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
330 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
333 #define RX128_INT_EN (1 << 15) /* 8111c and later */
334 #define RX_MULTI_EN (1 << 14) /* 8111c only */
335 #define RXCFG_FIFO_SHIFT 13
336 /* No threshold before first PCI xfer */
337 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
338 #define RXCFG_DMA_SHIFT 8
339 /* Unlimited maximum PCI burst. */
340 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
347 #define PME_SIGNAL (1 << 5) /* 8168c and later */
358 RxDescAddrLow
= 0xe4,
359 RxDescAddrHigh
= 0xe8,
360 EarlyTxThres
= 0xec, /* 8169. Unit of 32 bytes. */
362 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
364 MaxTxPacketSize
= 0xec, /* 8101/8168. Unit of 128 bytes. */
366 #define TxPacketMax (8064 >> 7)
367 #define EarlySize 0x27
370 FuncEventMask
= 0xf4,
371 FuncPresetState
= 0xf8,
372 FuncForceEvent
= 0xfc,
375 enum rtl8110_registers
{
381 enum rtl8168_8101_registers
{
384 #define CSIAR_FLAG 0x80000000
385 #define CSIAR_WRITE_CMD 0x80000000
386 #define CSIAR_BYTE_ENABLE 0x0f
387 #define CSIAR_BYTE_ENABLE_SHIFT 12
388 #define CSIAR_ADDR_MASK 0x0fff
389 #define CSIAR_FUNC_CARD 0x00000000
390 #define CSIAR_FUNC_SDIO 0x00010000
391 #define CSIAR_FUNC_NIC 0x00020000
394 #define EPHYAR_FLAG 0x80000000
395 #define EPHYAR_WRITE_CMD 0x80000000
396 #define EPHYAR_REG_MASK 0x1f
397 #define EPHYAR_REG_SHIFT 16
398 #define EPHYAR_DATA_MASK 0xffff
400 #define PFM_EN (1 << 6)
402 #define FIX_NAK_1 (1 << 4)
403 #define FIX_NAK_2 (1 << 3)
406 #define NOW_IS_OOB (1 << 7)
407 #define TX_EMPTY (1 << 5)
408 #define RX_EMPTY (1 << 4)
409 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
410 #define EN_NDP (1 << 3)
411 #define EN_OOB_RESET (1 << 2)
412 #define LINK_LIST_RDY (1 << 1)
414 #define EFUSEAR_FLAG 0x80000000
415 #define EFUSEAR_WRITE_CMD 0x80000000
416 #define EFUSEAR_READ_CMD 0x00000000
417 #define EFUSEAR_REG_MASK 0x03ff
418 #define EFUSEAR_REG_SHIFT 8
419 #define EFUSEAR_DATA_MASK 0xff
422 enum rtl8168_registers
{
427 #define ERIAR_FLAG 0x80000000
428 #define ERIAR_WRITE_CMD 0x80000000
429 #define ERIAR_READ_CMD 0x00000000
430 #define ERIAR_ADDR_BYTE_ALIGN 4
431 #define ERIAR_TYPE_SHIFT 16
432 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
433 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
434 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
435 #define ERIAR_MASK_SHIFT 12
436 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
437 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
438 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
439 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
440 EPHY_RXER_NUM
= 0x7c,
441 OCPDR
= 0xb0, /* OCP GPHY access */
442 #define OCPDR_WRITE_CMD 0x80000000
443 #define OCPDR_READ_CMD 0x00000000
444 #define OCPDR_REG_MASK 0x7f
445 #define OCPDR_GPHY_REG_SHIFT 16
446 #define OCPDR_DATA_MASK 0xffff
448 #define OCPAR_FLAG 0x80000000
449 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
450 #define OCPAR_GPHY_READ_CMD 0x0000f060
452 RDSAR1
= 0xd0, /* 8168c only. Undocumented on 8168dp */
453 MISC
= 0xf0, /* 8168e only. */
454 #define TXPLA_RST (1 << 29)
455 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
456 #define PWM_EN (1 << 22)
457 #define RXDV_GATED_EN (1 << 19)
458 #define EARLY_TALLY_EN (1 << 16)
461 enum rtl_register_content
{
462 /* InterruptStatusBits */
466 TxDescUnavail
= 0x0080,
490 /* TXPoll register p.5 */
491 HPQ
= 0x80, /* Poll cmd on the high prio queue */
492 NPQ
= 0x40, /* Poll cmd on the low prio queue */
493 FSWInt
= 0x01, /* Forced software interrupt */
497 Cfg9346_Unlock
= 0xc0,
502 AcceptBroadcast
= 0x08,
503 AcceptMulticast
= 0x04,
505 AcceptAllPhys
= 0x01,
506 #define RX_CONFIG_ACCEPT_MASK 0x3f
509 TxInterFrameGapShift
= 24,
510 TxDMAShift
= 8, /* DMA burst value (0-7) is shift this many bits */
512 /* Config1 register p.24 */
515 Speed_down
= (1 << 4),
519 PMEnable
= (1 << 0), /* Power Management Enable */
521 /* Config2 register p. 25 */
522 MSIEnable
= (1 << 5), /* 8169 only. Reserved in the 8168. */
523 PCI_Clock_66MHz
= 0x01,
524 PCI_Clock_33MHz
= 0x00,
526 /* Config3 register p.25 */
527 MagicPacket
= (1 << 5), /* Wake up when receives a Magic Packet */
528 LinkUp
= (1 << 4), /* Wake up when the cable connection is re-established */
529 Jumbo_En0
= (1 << 2), /* 8168 only. Reserved in the 8168b */
530 Beacon_en
= (1 << 0), /* 8168 only. Reserved in the 8168b */
532 /* Config4 register */
533 Jumbo_En1
= (1 << 1), /* 8168 only. Reserved in the 8168b */
535 /* Config5 register p.27 */
536 BWF
= (1 << 6), /* Accept Broadcast wakeup frame */
537 MWF
= (1 << 5), /* Accept Multicast wakeup frame */
538 UWF
= (1 << 4), /* Accept Unicast wakeup frame */
540 LanWake
= (1 << 1), /* LanWake enable/disable */
541 PMEStatus
= (1 << 0), /* PME status can be reset by PCI RST# */
544 TBIReset
= 0x80000000,
545 TBILoopback
= 0x40000000,
546 TBINwEnable
= 0x20000000,
547 TBINwRestart
= 0x10000000,
548 TBILinkOk
= 0x02000000,
549 TBINwComplete
= 0x01000000,
552 EnableBist
= (1 << 15), // 8168 8101
553 Mac_dbgo_oe
= (1 << 14), // 8168 8101
554 Normal_mode
= (1 << 13), // unused
555 Force_half_dup
= (1 << 12), // 8168 8101
556 Force_rxflow_en
= (1 << 11), // 8168 8101
557 Force_txflow_en
= (1 << 10), // 8168 8101
558 Cxpl_dbg_sel
= (1 << 9), // 8168 8101
559 ASF
= (1 << 8), // 8168 8101
560 PktCntrDisable
= (1 << 7), // 8168 8101
561 Mac_dbgo_sel
= 0x001c, // 8168
566 INTT_0
= 0x0000, // 8168
567 INTT_1
= 0x0001, // 8168
568 INTT_2
= 0x0002, // 8168
569 INTT_3
= 0x0003, // 8168
571 /* rtl8169_PHYstatus */
582 TBILinkOK
= 0x02000000,
584 /* DumpCounterCommand */
589 /* First doubleword. */
590 DescOwn
= (1 << 31), /* Descriptor is owned by NIC */
591 RingEnd
= (1 << 30), /* End of descriptor ring */
592 FirstFrag
= (1 << 29), /* First segment of a packet */
593 LastFrag
= (1 << 28), /* Final segment of a packet */
597 enum rtl_tx_desc_bit
{
598 /* First doubleword. */
599 TD_LSO
= (1 << 27), /* Large Send Offload */
600 #define TD_MSS_MAX 0x07ffu /* MSS value */
602 /* Second doubleword. */
603 TxVlanTag
= (1 << 17), /* Add VLAN tag */
606 /* 8169, 8168b and 810x except 8102e. */
607 enum rtl_tx_desc_bit_0
{
608 /* First doubleword. */
609 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
610 TD0_TCP_CS
= (1 << 16), /* Calculate TCP/IP checksum */
611 TD0_UDP_CS
= (1 << 17), /* Calculate UDP/IP checksum */
612 TD0_IP_CS
= (1 << 18), /* Calculate IP checksum */
615 /* 8102e, 8168c and beyond. */
616 enum rtl_tx_desc_bit_1
{
617 /* Second doubleword. */
618 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
619 TD1_IP_CS
= (1 << 29), /* Calculate IP checksum */
620 TD1_TCP_CS
= (1 << 30), /* Calculate TCP/IP checksum */
621 TD1_UDP_CS
= (1 << 31), /* Calculate UDP/IP checksum */
624 static const struct rtl_tx_desc_info
{
631 } tx_desc_info
[] = {
634 .udp
= TD0_IP_CS
| TD0_UDP_CS
,
635 .tcp
= TD0_IP_CS
| TD0_TCP_CS
637 .mss_shift
= TD0_MSS_SHIFT
,
642 .udp
= TD1_IP_CS
| TD1_UDP_CS
,
643 .tcp
= TD1_IP_CS
| TD1_TCP_CS
645 .mss_shift
= TD1_MSS_SHIFT
,
650 enum rtl_rx_desc_bit
{
652 PID1
= (1 << 18), /* Protocol ID bit 1/2 */
653 PID0
= (1 << 17), /* Protocol ID bit 2/2 */
655 #define RxProtoUDP (PID1)
656 #define RxProtoTCP (PID0)
657 #define RxProtoIP (PID1 | PID0)
658 #define RxProtoMask RxProtoIP
660 IPFail
= (1 << 16), /* IP checksum failed */
661 UDPFail
= (1 << 15), /* UDP/IP checksum failed */
662 TCPFail
= (1 << 14), /* TCP/IP checksum failed */
663 RxVlanTag
= (1 << 16), /* VLAN tag available */
666 #define RsvdMask 0x3fffc000
683 u8 __pad
[sizeof(void *) - sizeof(u32
)];
687 RTL_FEATURE_WOL
= (1 << 0),
688 RTL_FEATURE_MSI
= (1 << 1),
689 RTL_FEATURE_GMII
= (1 << 2),
692 struct rtl8169_counters
{
699 __le32 tx_one_collision
;
700 __le32 tx_multi_collision
;
709 RTL_FLAG_TASK_ENABLED
,
710 RTL_FLAG_TASK_SLOW_PENDING
,
711 RTL_FLAG_TASK_RESET_PENDING
,
712 RTL_FLAG_TASK_PHY_PENDING
,
716 struct rtl8169_stats
{
719 struct u64_stats_sync syncp
;
722 struct rtl8169_private
{
723 void __iomem
*mmio_addr
; /* memory map physical address */
724 struct pci_dev
*pci_dev
;
725 struct net_device
*dev
;
726 struct napi_struct napi
;
730 u32 cur_rx
; /* Index into the Rx descriptor buffer of next Rx pkt. */
731 u32 cur_tx
; /* Index into the Tx descriptor buffer of next Rx pkt. */
734 struct rtl8169_stats rx_stats
;
735 struct rtl8169_stats tx_stats
;
736 struct TxDesc
*TxDescArray
; /* 256-aligned Tx descriptor ring */
737 struct RxDesc
*RxDescArray
; /* 256-aligned Rx descriptor ring */
738 dma_addr_t TxPhyAddr
;
739 dma_addr_t RxPhyAddr
;
740 void *Rx_databuff
[NUM_RX_DESC
]; /* Rx data buffers */
741 struct ring_info tx_skb
[NUM_TX_DESC
]; /* Tx data buffers */
742 struct timer_list timer
;
748 void (*write
)(struct rtl8169_private
*, int, int);
749 int (*read
)(struct rtl8169_private
*, int);
752 struct pll_power_ops
{
753 void (*down
)(struct rtl8169_private
*);
754 void (*up
)(struct rtl8169_private
*);
758 void (*enable
)(struct rtl8169_private
*);
759 void (*disable
)(struct rtl8169_private
*);
763 void (*write
)(struct rtl8169_private
*, int, int);
764 u32 (*read
)(struct rtl8169_private
*, int);
767 int (*set_speed
)(struct net_device
*, u8 aneg
, u16 sp
, u8 dpx
, u32 adv
);
768 int (*get_settings
)(struct net_device
*, struct ethtool_cmd
*);
769 void (*phy_reset_enable
)(struct rtl8169_private
*tp
);
770 void (*hw_start
)(struct net_device
*);
771 unsigned int (*phy_reset_pending
)(struct rtl8169_private
*tp
);
772 unsigned int (*link_ok
)(void __iomem
*);
773 int (*do_ioctl
)(struct rtl8169_private
*tp
, struct mii_ioctl_data
*data
, int cmd
);
776 DECLARE_BITMAP(flags
, RTL_FLAG_MAX
);
778 struct work_struct work
;
783 struct mii_if_info mii
;
784 struct rtl8169_counters counters
;
789 const struct firmware
*fw
;
791 #define RTL_VER_SIZE 32
793 char version
[RTL_VER_SIZE
];
795 struct rtl_fw_phy_action
{
800 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
805 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
806 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
807 module_param(use_dac
, int, 0);
808 MODULE_PARM_DESC(use_dac
, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
809 module_param_named(debug
, debug
.msg_enable
, int, 0);
810 MODULE_PARM_DESC(debug
, "Debug verbosity level (0=none, ..., 16=all)");
811 MODULE_LICENSE("GPL");
812 MODULE_VERSION(RTL8169_VERSION
);
813 MODULE_FIRMWARE(FIRMWARE_8168D_1
);
814 MODULE_FIRMWARE(FIRMWARE_8168D_2
);
815 MODULE_FIRMWARE(FIRMWARE_8168E_1
);
816 MODULE_FIRMWARE(FIRMWARE_8168E_2
);
817 MODULE_FIRMWARE(FIRMWARE_8168E_3
);
818 MODULE_FIRMWARE(FIRMWARE_8105E_1
);
819 MODULE_FIRMWARE(FIRMWARE_8168F_1
);
820 MODULE_FIRMWARE(FIRMWARE_8168F_2
);
821 MODULE_FIRMWARE(FIRMWARE_8402_1
);
822 MODULE_FIRMWARE(FIRMWARE_8411_1
);
823 MODULE_FIRMWARE(FIRMWARE_8106E_1
);
824 MODULE_FIRMWARE(FIRMWARE_8168G_1
);
826 static void rtl_lock_work(struct rtl8169_private
*tp
)
828 mutex_lock(&tp
->wk
.mutex
);
831 static void rtl_unlock_work(struct rtl8169_private
*tp
)
833 mutex_unlock(&tp
->wk
.mutex
);
836 static void rtl_tx_performance_tweak(struct pci_dev
*pdev
, u16 force
)
838 pcie_capability_clear_and_set_word(pdev
, PCI_EXP_DEVCTL
,
839 PCI_EXP_DEVCTL_READRQ
, force
);
843 bool (*check
)(struct rtl8169_private
*);
847 static void rtl_udelay(unsigned int d
)
852 static bool rtl_loop_wait(struct rtl8169_private
*tp
, const struct rtl_cond
*c
,
853 void (*delay
)(unsigned int), unsigned int d
, int n
,
858 for (i
= 0; i
< n
; i
++) {
860 if (c
->check(tp
) == high
)
863 netif_err(tp
, drv
, tp
->dev
, "%s == %d (loop: %d, delay: %d).\n",
864 c
->msg
, !high
, n
, d
);
868 static bool rtl_udelay_loop_wait_high(struct rtl8169_private
*tp
,
869 const struct rtl_cond
*c
,
870 unsigned int d
, int n
)
872 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, true);
875 static bool rtl_udelay_loop_wait_low(struct rtl8169_private
*tp
,
876 const struct rtl_cond
*c
,
877 unsigned int d
, int n
)
879 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, false);
882 static bool rtl_msleep_loop_wait_high(struct rtl8169_private
*tp
,
883 const struct rtl_cond
*c
,
884 unsigned int d
, int n
)
886 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, true);
889 static bool rtl_msleep_loop_wait_low(struct rtl8169_private
*tp
,
890 const struct rtl_cond
*c
,
891 unsigned int d
, int n
)
893 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, false);
896 #define DECLARE_RTL_COND(name) \
897 static bool name ## _check(struct rtl8169_private *); \
899 static const struct rtl_cond name = { \
900 .check = name ## _check, \
904 static bool name ## _check(struct rtl8169_private *tp)
906 DECLARE_RTL_COND(rtl_ocpar_cond
)
908 void __iomem
*ioaddr
= tp
->mmio_addr
;
910 return RTL_R32(OCPAR
) & OCPAR_FLAG
;
913 static u32
ocp_read(struct rtl8169_private
*tp
, u8 mask
, u16 reg
)
915 void __iomem
*ioaddr
= tp
->mmio_addr
;
917 RTL_W32(OCPAR
, ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
919 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 100, 20) ?
923 static void ocp_write(struct rtl8169_private
*tp
, u8 mask
, u16 reg
, u32 data
)
925 void __iomem
*ioaddr
= tp
->mmio_addr
;
927 RTL_W32(OCPDR
, data
);
928 RTL_W32(OCPAR
, OCPAR_FLAG
| ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
930 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 100, 20);
933 DECLARE_RTL_COND(rtl_eriar_cond
)
935 void __iomem
*ioaddr
= tp
->mmio_addr
;
937 return RTL_R32(ERIAR
) & ERIAR_FLAG
;
940 static void rtl8168_oob_notify(struct rtl8169_private
*tp
, u8 cmd
)
942 void __iomem
*ioaddr
= tp
->mmio_addr
;
945 RTL_W32(ERIAR
, 0x800010e8);
948 if (!rtl_udelay_loop_wait_low(tp
, &rtl_eriar_cond
, 100, 5))
951 ocp_write(tp
, 0x1, 0x30, 0x00000001);
954 #define OOB_CMD_RESET 0x00
955 #define OOB_CMD_DRIVER_START 0x05
956 #define OOB_CMD_DRIVER_STOP 0x06
958 static u16
rtl8168_get_ocp_reg(struct rtl8169_private
*tp
)
960 return (tp
->mac_version
== RTL_GIGA_MAC_VER_31
) ? 0xb8 : 0x10;
963 DECLARE_RTL_COND(rtl_ocp_read_cond
)
967 reg
= rtl8168_get_ocp_reg(tp
);
969 return ocp_read(tp
, 0x0f, reg
) & 0x00000800;
972 static void rtl8168_driver_start(struct rtl8169_private
*tp
)
974 rtl8168_oob_notify(tp
, OOB_CMD_DRIVER_START
);
976 rtl_msleep_loop_wait_high(tp
, &rtl_ocp_read_cond
, 10, 10);
979 static void rtl8168_driver_stop(struct rtl8169_private
*tp
)
981 rtl8168_oob_notify(tp
, OOB_CMD_DRIVER_STOP
);
983 rtl_msleep_loop_wait_low(tp
, &rtl_ocp_read_cond
, 10, 10);
986 static int r8168dp_check_dash(struct rtl8169_private
*tp
)
988 u16 reg
= rtl8168_get_ocp_reg(tp
);
990 return (ocp_read(tp
, 0x0f, reg
) & 0x00008000) ? 1 : 0;
993 static bool rtl_ocp_reg_failure(struct rtl8169_private
*tp
, u32 reg
)
995 if (reg
& 0xffff0001) {
996 netif_err(tp
, drv
, tp
->dev
, "Invalid ocp reg %x!\n", reg
);
1002 DECLARE_RTL_COND(rtl_ocp_gphy_cond
)
1004 void __iomem
*ioaddr
= tp
->mmio_addr
;
1006 return RTL_R32(GPHY_OCP
) & OCPAR_FLAG
;
1009 static void r8168_phy_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
1011 void __iomem
*ioaddr
= tp
->mmio_addr
;
1013 if (rtl_ocp_reg_failure(tp
, reg
))
1016 RTL_W32(GPHY_OCP
, OCPAR_FLAG
| (reg
<< 15) | data
);
1018 rtl_udelay_loop_wait_low(tp
, &rtl_ocp_gphy_cond
, 25, 10);
1021 static u16
r8168_phy_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
1023 void __iomem
*ioaddr
= tp
->mmio_addr
;
1025 if (rtl_ocp_reg_failure(tp
, reg
))
1028 RTL_W32(GPHY_OCP
, reg
<< 15);
1030 return rtl_udelay_loop_wait_high(tp
, &rtl_ocp_gphy_cond
, 25, 10) ?
1031 (RTL_R32(GPHY_OCP
) & 0xffff) : ~0;
1034 static void rtl_w1w0_phy_ocp(struct rtl8169_private
*tp
, int reg
, int p
, int m
)
1038 val
= r8168_phy_ocp_read(tp
, reg
);
1039 r8168_phy_ocp_write(tp
, reg
, (val
| p
) & ~m
);
1042 static void r8168_mac_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
1044 void __iomem
*ioaddr
= tp
->mmio_addr
;
1046 if (rtl_ocp_reg_failure(tp
, reg
))
1049 RTL_W32(OCPDR
, OCPAR_FLAG
| (reg
<< 15) | data
);
1052 static u16
r8168_mac_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
1054 void __iomem
*ioaddr
= tp
->mmio_addr
;
1056 if (rtl_ocp_reg_failure(tp
, reg
))
1059 RTL_W32(OCPDR
, reg
<< 15);
1061 return RTL_R32(OCPDR
);
1064 #define OCP_STD_PHY_BASE 0xa400
1066 static void r8168g_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1069 tp
->ocp_base
= value
? value
<< 4 : OCP_STD_PHY_BASE
;
1073 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
1076 r8168_phy_ocp_write(tp
, tp
->ocp_base
+ reg
* 2, value
);
1079 static int r8168g_mdio_read(struct rtl8169_private
*tp
, int reg
)
1081 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
1084 return r8168_phy_ocp_read(tp
, tp
->ocp_base
+ reg
* 2);
1087 DECLARE_RTL_COND(rtl_phyar_cond
)
1089 void __iomem
*ioaddr
= tp
->mmio_addr
;
1091 return RTL_R32(PHYAR
) & 0x80000000;
1094 static void r8169_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1096 void __iomem
*ioaddr
= tp
->mmio_addr
;
1098 RTL_W32(PHYAR
, 0x80000000 | (reg
& 0x1f) << 16 | (value
& 0xffff));
1100 rtl_udelay_loop_wait_low(tp
, &rtl_phyar_cond
, 25, 20);
1102 * According to hardware specs a 20us delay is required after write
1103 * complete indication, but before sending next command.
1108 static int r8169_mdio_read(struct rtl8169_private
*tp
, int reg
)
1110 void __iomem
*ioaddr
= tp
->mmio_addr
;
1113 RTL_W32(PHYAR
, 0x0 | (reg
& 0x1f) << 16);
1115 value
= rtl_udelay_loop_wait_high(tp
, &rtl_phyar_cond
, 25, 20) ?
1116 RTL_R32(PHYAR
) & 0xffff : ~0;
1119 * According to hardware specs a 20us delay is required after read
1120 * complete indication, but before sending next command.
1127 static void r8168dp_1_mdio_access(struct rtl8169_private
*tp
, int reg
, u32 data
)
1129 void __iomem
*ioaddr
= tp
->mmio_addr
;
1131 RTL_W32(OCPDR
, data
| ((reg
& OCPDR_REG_MASK
) << OCPDR_GPHY_REG_SHIFT
));
1132 RTL_W32(OCPAR
, OCPAR_GPHY_WRITE_CMD
);
1133 RTL_W32(EPHY_RXER_NUM
, 0);
1135 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 1000, 100);
1138 static void r8168dp_1_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1140 r8168dp_1_mdio_access(tp
, reg
,
1141 OCPDR_WRITE_CMD
| (value
& OCPDR_DATA_MASK
));
1144 static int r8168dp_1_mdio_read(struct rtl8169_private
*tp
, int reg
)
1146 void __iomem
*ioaddr
= tp
->mmio_addr
;
1148 r8168dp_1_mdio_access(tp
, reg
, OCPDR_READ_CMD
);
1151 RTL_W32(OCPAR
, OCPAR_GPHY_READ_CMD
);
1152 RTL_W32(EPHY_RXER_NUM
, 0);
1154 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 1000, 100) ?
1155 RTL_R32(OCPDR
) & OCPDR_DATA_MASK
: ~0;
1158 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1160 static void r8168dp_2_mdio_start(void __iomem
*ioaddr
)
1162 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT
);
1165 static void r8168dp_2_mdio_stop(void __iomem
*ioaddr
)
1167 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT
);
1170 static void r8168dp_2_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1172 void __iomem
*ioaddr
= tp
->mmio_addr
;
1174 r8168dp_2_mdio_start(ioaddr
);
1176 r8169_mdio_write(tp
, reg
, value
);
1178 r8168dp_2_mdio_stop(ioaddr
);
1181 static int r8168dp_2_mdio_read(struct rtl8169_private
*tp
, int reg
)
1183 void __iomem
*ioaddr
= tp
->mmio_addr
;
1186 r8168dp_2_mdio_start(ioaddr
);
1188 value
= r8169_mdio_read(tp
, reg
);
1190 r8168dp_2_mdio_stop(ioaddr
);
1195 static void rtl_writephy(struct rtl8169_private
*tp
, int location
, u32 val
)
1197 tp
->mdio_ops
.write(tp
, location
, val
);
1200 static int rtl_readphy(struct rtl8169_private
*tp
, int location
)
1202 return tp
->mdio_ops
.read(tp
, location
);
1205 static void rtl_patchphy(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1207 rtl_writephy(tp
, reg_addr
, rtl_readphy(tp
, reg_addr
) | value
);
1210 static void rtl_w1w0_phy(struct rtl8169_private
*tp
, int reg_addr
, int p
, int m
)
1214 val
= rtl_readphy(tp
, reg_addr
);
1215 rtl_writephy(tp
, reg_addr
, (val
| p
) & ~m
);
1218 static void rtl_mdio_write(struct net_device
*dev
, int phy_id
, int location
,
1221 struct rtl8169_private
*tp
= netdev_priv(dev
);
1223 rtl_writephy(tp
, location
, val
);
1226 static int rtl_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
1228 struct rtl8169_private
*tp
= netdev_priv(dev
);
1230 return rtl_readphy(tp
, location
);
1233 DECLARE_RTL_COND(rtl_ephyar_cond
)
1235 void __iomem
*ioaddr
= tp
->mmio_addr
;
1237 return RTL_R32(EPHYAR
) & EPHYAR_FLAG
;
1240 static void rtl_ephy_write(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1242 void __iomem
*ioaddr
= tp
->mmio_addr
;
1244 RTL_W32(EPHYAR
, EPHYAR_WRITE_CMD
| (value
& EPHYAR_DATA_MASK
) |
1245 (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1247 rtl_udelay_loop_wait_low(tp
, &rtl_ephyar_cond
, 10, 100);
1252 static u16
rtl_ephy_read(struct rtl8169_private
*tp
, int reg_addr
)
1254 void __iomem
*ioaddr
= tp
->mmio_addr
;
1256 RTL_W32(EPHYAR
, (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1258 return rtl_udelay_loop_wait_high(tp
, &rtl_ephyar_cond
, 10, 100) ?
1259 RTL_R32(EPHYAR
) & EPHYAR_DATA_MASK
: ~0;
1262 static void rtl_eri_write(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1265 void __iomem
*ioaddr
= tp
->mmio_addr
;
1267 BUG_ON((addr
& 3) || (mask
== 0));
1268 RTL_W32(ERIDR
, val
);
1269 RTL_W32(ERIAR
, ERIAR_WRITE_CMD
| type
| mask
| addr
);
1271 rtl_udelay_loop_wait_low(tp
, &rtl_eriar_cond
, 100, 100);
1274 static u32
rtl_eri_read(struct rtl8169_private
*tp
, int addr
, int type
)
1276 void __iomem
*ioaddr
= tp
->mmio_addr
;
1278 RTL_W32(ERIAR
, ERIAR_READ_CMD
| type
| ERIAR_MASK_1111
| addr
);
1280 return rtl_udelay_loop_wait_high(tp
, &rtl_eriar_cond
, 100, 100) ?
1281 RTL_R32(ERIDR
) : ~0;
1284 static void rtl_w1w0_eri(struct rtl8169_private
*tp
, int addr
, u32 mask
, u32 p
,
1289 val
= rtl_eri_read(tp
, addr
, type
);
1290 rtl_eri_write(tp
, addr
, mask
, (val
& ~m
) | p
, type
);
1299 static void rtl_write_exgmac_batch(struct rtl8169_private
*tp
,
1300 const struct exgmac_reg
*r
, int len
)
1303 rtl_eri_write(tp
, r
->addr
, r
->mask
, r
->val
, ERIAR_EXGMAC
);
1308 DECLARE_RTL_COND(rtl_efusear_cond
)
1310 void __iomem
*ioaddr
= tp
->mmio_addr
;
1312 return RTL_R32(EFUSEAR
) & EFUSEAR_FLAG
;
1315 static u8
rtl8168d_efuse_read(struct rtl8169_private
*tp
, int reg_addr
)
1317 void __iomem
*ioaddr
= tp
->mmio_addr
;
1319 RTL_W32(EFUSEAR
, (reg_addr
& EFUSEAR_REG_MASK
) << EFUSEAR_REG_SHIFT
);
1321 return rtl_udelay_loop_wait_high(tp
, &rtl_efusear_cond
, 100, 300) ?
1322 RTL_R32(EFUSEAR
) & EFUSEAR_DATA_MASK
: ~0;
1325 static u16
rtl_get_events(struct rtl8169_private
*tp
)
1327 void __iomem
*ioaddr
= tp
->mmio_addr
;
1329 return RTL_R16(IntrStatus
);
1332 static void rtl_ack_events(struct rtl8169_private
*tp
, u16 bits
)
1334 void __iomem
*ioaddr
= tp
->mmio_addr
;
1336 RTL_W16(IntrStatus
, bits
);
1340 static void rtl_irq_disable(struct rtl8169_private
*tp
)
1342 void __iomem
*ioaddr
= tp
->mmio_addr
;
1344 RTL_W16(IntrMask
, 0);
1348 static void rtl_irq_enable(struct rtl8169_private
*tp
, u16 bits
)
1350 void __iomem
*ioaddr
= tp
->mmio_addr
;
1352 RTL_W16(IntrMask
, bits
);
1355 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1356 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1357 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1359 static void rtl_irq_enable_all(struct rtl8169_private
*tp
)
1361 rtl_irq_enable(tp
, RTL_EVENT_NAPI
| tp
->event_slow
);
1364 static void rtl8169_irq_mask_and_ack(struct rtl8169_private
*tp
)
1366 void __iomem
*ioaddr
= tp
->mmio_addr
;
1368 rtl_irq_disable(tp
);
1369 rtl_ack_events(tp
, RTL_EVENT_NAPI
| tp
->event_slow
);
1373 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private
*tp
)
1375 void __iomem
*ioaddr
= tp
->mmio_addr
;
1377 return RTL_R32(TBICSR
) & TBIReset
;
1380 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private
*tp
)
1382 return rtl_readphy(tp
, MII_BMCR
) & BMCR_RESET
;
1385 static unsigned int rtl8169_tbi_link_ok(void __iomem
*ioaddr
)
1387 return RTL_R32(TBICSR
) & TBILinkOk
;
1390 static unsigned int rtl8169_xmii_link_ok(void __iomem
*ioaddr
)
1392 return RTL_R8(PHYstatus
) & LinkStatus
;
1395 static void rtl8169_tbi_reset_enable(struct rtl8169_private
*tp
)
1397 void __iomem
*ioaddr
= tp
->mmio_addr
;
1399 RTL_W32(TBICSR
, RTL_R32(TBICSR
) | TBIReset
);
1402 static void rtl8169_xmii_reset_enable(struct rtl8169_private
*tp
)
1406 val
= rtl_readphy(tp
, MII_BMCR
) | BMCR_RESET
;
1407 rtl_writephy(tp
, MII_BMCR
, val
& 0xffff);
1410 static void rtl_link_chg_patch(struct rtl8169_private
*tp
)
1412 void __iomem
*ioaddr
= tp
->mmio_addr
;
1413 struct net_device
*dev
= tp
->dev
;
1415 if (!netif_running(dev
))
1418 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
||
1419 tp
->mac_version
== RTL_GIGA_MAC_VER_38
) {
1420 if (RTL_R8(PHYstatus
) & _1000bpsF
) {
1421 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011,
1423 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005,
1425 } else if (RTL_R8(PHYstatus
) & _100bps
) {
1426 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f,
1428 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005,
1431 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f,
1433 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f,
1436 /* Reset packet filter */
1437 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01,
1439 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00,
1441 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_35
||
1442 tp
->mac_version
== RTL_GIGA_MAC_VER_36
) {
1443 if (RTL_R8(PHYstatus
) & _1000bpsF
) {
1444 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011,
1446 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005,
1449 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f,
1451 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f,
1454 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_37
) {
1455 if (RTL_R8(PHYstatus
) & _10bps
) {
1456 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x4d02,
1458 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_0011
, 0x0060,
1461 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000,
1467 static void __rtl8169_check_link_status(struct net_device
*dev
,
1468 struct rtl8169_private
*tp
,
1469 void __iomem
*ioaddr
, bool pm
)
1471 if (tp
->link_ok(ioaddr
)) {
1472 rtl_link_chg_patch(tp
);
1473 /* This is to cancel a scheduled suspend if there's one. */
1475 pm_request_resume(&tp
->pci_dev
->dev
);
1476 netif_carrier_on(dev
);
1477 if (net_ratelimit())
1478 netif_info(tp
, ifup
, dev
, "link up\n");
1480 netif_carrier_off(dev
);
1481 netif_info(tp
, ifdown
, dev
, "link down\n");
1483 pm_schedule_suspend(&tp
->pci_dev
->dev
, 5000);
1487 static void rtl8169_check_link_status(struct net_device
*dev
,
1488 struct rtl8169_private
*tp
,
1489 void __iomem
*ioaddr
)
1491 __rtl8169_check_link_status(dev
, tp
, ioaddr
, false);
1494 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1496 static u32
__rtl8169_get_wol(struct rtl8169_private
*tp
)
1498 void __iomem
*ioaddr
= tp
->mmio_addr
;
1502 options
= RTL_R8(Config1
);
1503 if (!(options
& PMEnable
))
1506 options
= RTL_R8(Config3
);
1507 if (options
& LinkUp
)
1508 wolopts
|= WAKE_PHY
;
1509 if (options
& MagicPacket
)
1510 wolopts
|= WAKE_MAGIC
;
1512 options
= RTL_R8(Config5
);
1514 wolopts
|= WAKE_UCAST
;
1516 wolopts
|= WAKE_BCAST
;
1518 wolopts
|= WAKE_MCAST
;
1523 static void rtl8169_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1525 struct rtl8169_private
*tp
= netdev_priv(dev
);
1529 wol
->supported
= WAKE_ANY
;
1530 wol
->wolopts
= __rtl8169_get_wol(tp
);
1532 rtl_unlock_work(tp
);
1535 static void __rtl8169_set_wol(struct rtl8169_private
*tp
, u32 wolopts
)
1537 void __iomem
*ioaddr
= tp
->mmio_addr
;
1539 static const struct {
1544 { WAKE_PHY
, Config3
, LinkUp
},
1545 { WAKE_MAGIC
, Config3
, MagicPacket
},
1546 { WAKE_UCAST
, Config5
, UWF
},
1547 { WAKE_BCAST
, Config5
, BWF
},
1548 { WAKE_MCAST
, Config5
, MWF
},
1549 { WAKE_ANY
, Config5
, LanWake
}
1553 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
1555 for (i
= 0; i
< ARRAY_SIZE(cfg
); i
++) {
1556 options
= RTL_R8(cfg
[i
].reg
) & ~cfg
[i
].mask
;
1557 if (wolopts
& cfg
[i
].opt
)
1558 options
|= cfg
[i
].mask
;
1559 RTL_W8(cfg
[i
].reg
, options
);
1562 switch (tp
->mac_version
) {
1563 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_17
:
1564 options
= RTL_R8(Config1
) & ~PMEnable
;
1566 options
|= PMEnable
;
1567 RTL_W8(Config1
, options
);
1570 options
= RTL_R8(Config2
) & ~PME_SIGNAL
;
1572 options
|= PME_SIGNAL
;
1573 RTL_W8(Config2
, options
);
1577 RTL_W8(Cfg9346
, Cfg9346_Lock
);
1580 static int rtl8169_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1582 struct rtl8169_private
*tp
= netdev_priv(dev
);
1587 tp
->features
|= RTL_FEATURE_WOL
;
1589 tp
->features
&= ~RTL_FEATURE_WOL
;
1590 __rtl8169_set_wol(tp
, wol
->wolopts
);
1592 rtl_unlock_work(tp
);
1594 device_set_wakeup_enable(&tp
->pci_dev
->dev
, wol
->wolopts
);
1599 static const char *rtl_lookup_firmware_name(struct rtl8169_private
*tp
)
1601 return rtl_chip_infos
[tp
->mac_version
].fw_name
;
1604 static void rtl8169_get_drvinfo(struct net_device
*dev
,
1605 struct ethtool_drvinfo
*info
)
1607 struct rtl8169_private
*tp
= netdev_priv(dev
);
1608 struct rtl_fw
*rtl_fw
= tp
->rtl_fw
;
1610 strlcpy(info
->driver
, MODULENAME
, sizeof(info
->driver
));
1611 strlcpy(info
->version
, RTL8169_VERSION
, sizeof(info
->version
));
1612 strlcpy(info
->bus_info
, pci_name(tp
->pci_dev
), sizeof(info
->bus_info
));
1613 BUILD_BUG_ON(sizeof(info
->fw_version
) < sizeof(rtl_fw
->version
));
1614 if (!IS_ERR_OR_NULL(rtl_fw
))
1615 strlcpy(info
->fw_version
, rtl_fw
->version
,
1616 sizeof(info
->fw_version
));
1619 static int rtl8169_get_regs_len(struct net_device
*dev
)
1621 return R8169_REGS_SIZE
;
1624 static int rtl8169_set_speed_tbi(struct net_device
*dev
,
1625 u8 autoneg
, u16 speed
, u8 duplex
, u32 ignored
)
1627 struct rtl8169_private
*tp
= netdev_priv(dev
);
1628 void __iomem
*ioaddr
= tp
->mmio_addr
;
1632 reg
= RTL_R32(TBICSR
);
1633 if ((autoneg
== AUTONEG_DISABLE
) && (speed
== SPEED_1000
) &&
1634 (duplex
== DUPLEX_FULL
)) {
1635 RTL_W32(TBICSR
, reg
& ~(TBINwEnable
| TBINwRestart
));
1636 } else if (autoneg
== AUTONEG_ENABLE
)
1637 RTL_W32(TBICSR
, reg
| TBINwEnable
| TBINwRestart
);
1639 netif_warn(tp
, link
, dev
,
1640 "incorrect speed setting refused in TBI mode\n");
1647 static int rtl8169_set_speed_xmii(struct net_device
*dev
,
1648 u8 autoneg
, u16 speed
, u8 duplex
, u32 adv
)
1650 struct rtl8169_private
*tp
= netdev_priv(dev
);
1651 int giga_ctrl
, bmcr
;
1654 rtl_writephy(tp
, 0x1f, 0x0000);
1656 if (autoneg
== AUTONEG_ENABLE
) {
1659 auto_nego
= rtl_readphy(tp
, MII_ADVERTISE
);
1660 auto_nego
&= ~(ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1661 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1663 if (adv
& ADVERTISED_10baseT_Half
)
1664 auto_nego
|= ADVERTISE_10HALF
;
1665 if (adv
& ADVERTISED_10baseT_Full
)
1666 auto_nego
|= ADVERTISE_10FULL
;
1667 if (adv
& ADVERTISED_100baseT_Half
)
1668 auto_nego
|= ADVERTISE_100HALF
;
1669 if (adv
& ADVERTISED_100baseT_Full
)
1670 auto_nego
|= ADVERTISE_100FULL
;
1672 auto_nego
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1674 giga_ctrl
= rtl_readphy(tp
, MII_CTRL1000
);
1675 giga_ctrl
&= ~(ADVERTISE_1000FULL
| ADVERTISE_1000HALF
);
1677 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1678 if (tp
->mii
.supports_gmii
) {
1679 if (adv
& ADVERTISED_1000baseT_Half
)
1680 giga_ctrl
|= ADVERTISE_1000HALF
;
1681 if (adv
& ADVERTISED_1000baseT_Full
)
1682 giga_ctrl
|= ADVERTISE_1000FULL
;
1683 } else if (adv
& (ADVERTISED_1000baseT_Half
|
1684 ADVERTISED_1000baseT_Full
)) {
1685 netif_info(tp
, link
, dev
,
1686 "PHY does not support 1000Mbps\n");
1690 bmcr
= BMCR_ANENABLE
| BMCR_ANRESTART
;
1692 rtl_writephy(tp
, MII_ADVERTISE
, auto_nego
);
1693 rtl_writephy(tp
, MII_CTRL1000
, giga_ctrl
);
1697 if (speed
== SPEED_10
)
1699 else if (speed
== SPEED_100
)
1700 bmcr
= BMCR_SPEED100
;
1704 if (duplex
== DUPLEX_FULL
)
1705 bmcr
|= BMCR_FULLDPLX
;
1708 rtl_writephy(tp
, MII_BMCR
, bmcr
);
1710 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
1711 tp
->mac_version
== RTL_GIGA_MAC_VER_03
) {
1712 if ((speed
== SPEED_100
) && (autoneg
!= AUTONEG_ENABLE
)) {
1713 rtl_writephy(tp
, 0x17, 0x2138);
1714 rtl_writephy(tp
, 0x0e, 0x0260);
1716 rtl_writephy(tp
, 0x17, 0x2108);
1717 rtl_writephy(tp
, 0x0e, 0x0000);
1726 static int rtl8169_set_speed(struct net_device
*dev
,
1727 u8 autoneg
, u16 speed
, u8 duplex
, u32 advertising
)
1729 struct rtl8169_private
*tp
= netdev_priv(dev
);
1732 ret
= tp
->set_speed(dev
, autoneg
, speed
, duplex
, advertising
);
1736 if (netif_running(dev
) && (autoneg
== AUTONEG_ENABLE
) &&
1737 (advertising
& ADVERTISED_1000baseT_Full
)) {
1738 mod_timer(&tp
->timer
, jiffies
+ RTL8169_PHY_TIMEOUT
);
1744 static int rtl8169_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1746 struct rtl8169_private
*tp
= netdev_priv(dev
);
1749 del_timer_sync(&tp
->timer
);
1752 ret
= rtl8169_set_speed(dev
, cmd
->autoneg
, ethtool_cmd_speed(cmd
),
1753 cmd
->duplex
, cmd
->advertising
);
1754 rtl_unlock_work(tp
);
1759 static netdev_features_t
rtl8169_fix_features(struct net_device
*dev
,
1760 netdev_features_t features
)
1762 struct rtl8169_private
*tp
= netdev_priv(dev
);
1764 if (dev
->mtu
> TD_MSS_MAX
)
1765 features
&= ~NETIF_F_ALL_TSO
;
1767 if (dev
->mtu
> JUMBO_1K
&&
1768 !rtl_chip_infos
[tp
->mac_version
].jumbo_tx_csum
)
1769 features
&= ~NETIF_F_IP_CSUM
;
1774 static void __rtl8169_set_features(struct net_device
*dev
,
1775 netdev_features_t features
)
1777 struct rtl8169_private
*tp
= netdev_priv(dev
);
1778 netdev_features_t changed
= features
^ dev
->features
;
1779 void __iomem
*ioaddr
= tp
->mmio_addr
;
1781 if (!(changed
& (NETIF_F_RXALL
| NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_RX
)))
1784 if (changed
& (NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_RX
)) {
1785 if (features
& NETIF_F_RXCSUM
)
1786 tp
->cp_cmd
|= RxChkSum
;
1788 tp
->cp_cmd
&= ~RxChkSum
;
1790 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
1791 tp
->cp_cmd
|= RxVlan
;
1793 tp
->cp_cmd
&= ~RxVlan
;
1795 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
1798 if (changed
& NETIF_F_RXALL
) {
1799 int tmp
= (RTL_R32(RxConfig
) & ~(AcceptErr
| AcceptRunt
));
1800 if (features
& NETIF_F_RXALL
)
1801 tmp
|= (AcceptErr
| AcceptRunt
);
1802 RTL_W32(RxConfig
, tmp
);
1806 static int rtl8169_set_features(struct net_device
*dev
,
1807 netdev_features_t features
)
1809 struct rtl8169_private
*tp
= netdev_priv(dev
);
1812 __rtl8169_set_features(dev
, features
);
1813 rtl_unlock_work(tp
);
1819 static inline u32
rtl8169_tx_vlan_tag(struct rtl8169_private
*tp
,
1820 struct sk_buff
*skb
)
1822 return (vlan_tx_tag_present(skb
)) ?
1823 TxVlanTag
| swab16(vlan_tx_tag_get(skb
)) : 0x00;
1826 static void rtl8169_rx_vlan_tag(struct RxDesc
*desc
, struct sk_buff
*skb
)
1828 u32 opts2
= le32_to_cpu(desc
->opts2
);
1830 if (opts2
& RxVlanTag
)
1831 __vlan_hwaccel_put_tag(skb
, swab16(opts2
& 0xffff));
1836 static int rtl8169_gset_tbi(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1838 struct rtl8169_private
*tp
= netdev_priv(dev
);
1839 void __iomem
*ioaddr
= tp
->mmio_addr
;
1843 SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_FIBRE
;
1844 cmd
->port
= PORT_FIBRE
;
1845 cmd
->transceiver
= XCVR_INTERNAL
;
1847 status
= RTL_R32(TBICSR
);
1848 cmd
->advertising
= (status
& TBINwEnable
) ? ADVERTISED_Autoneg
: 0;
1849 cmd
->autoneg
= !!(status
& TBINwEnable
);
1851 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1852 cmd
->duplex
= DUPLEX_FULL
; /* Always set */
1857 static int rtl8169_gset_xmii(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1859 struct rtl8169_private
*tp
= netdev_priv(dev
);
1861 return mii_ethtool_gset(&tp
->mii
, cmd
);
1864 static int rtl8169_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1866 struct rtl8169_private
*tp
= netdev_priv(dev
);
1870 rc
= tp
->get_settings(dev
, cmd
);
1871 rtl_unlock_work(tp
);
1876 static void rtl8169_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1879 struct rtl8169_private
*tp
= netdev_priv(dev
);
1881 if (regs
->len
> R8169_REGS_SIZE
)
1882 regs
->len
= R8169_REGS_SIZE
;
1885 memcpy_fromio(p
, tp
->mmio_addr
, regs
->len
);
1886 rtl_unlock_work(tp
);
1889 static u32
rtl8169_get_msglevel(struct net_device
*dev
)
1891 struct rtl8169_private
*tp
= netdev_priv(dev
);
1893 return tp
->msg_enable
;
1896 static void rtl8169_set_msglevel(struct net_device
*dev
, u32 value
)
1898 struct rtl8169_private
*tp
= netdev_priv(dev
);
1900 tp
->msg_enable
= value
;
1903 static const char rtl8169_gstrings
[][ETH_GSTRING_LEN
] = {
1910 "tx_single_collisions",
1911 "tx_multi_collisions",
1919 static int rtl8169_get_sset_count(struct net_device
*dev
, int sset
)
1923 return ARRAY_SIZE(rtl8169_gstrings
);
1929 DECLARE_RTL_COND(rtl_counters_cond
)
1931 void __iomem
*ioaddr
= tp
->mmio_addr
;
1933 return RTL_R32(CounterAddrLow
) & CounterDump
;
1936 static void rtl8169_update_counters(struct net_device
*dev
)
1938 struct rtl8169_private
*tp
= netdev_priv(dev
);
1939 void __iomem
*ioaddr
= tp
->mmio_addr
;
1940 struct device
*d
= &tp
->pci_dev
->dev
;
1941 struct rtl8169_counters
*counters
;
1946 * Some chips are unable to dump tally counters when the receiver
1949 if ((RTL_R8(ChipCmd
) & CmdRxEnb
) == 0)
1952 counters
= dma_alloc_coherent(d
, sizeof(*counters
), &paddr
, GFP_KERNEL
);
1956 RTL_W32(CounterAddrHigh
, (u64
)paddr
>> 32);
1957 cmd
= (u64
)paddr
& DMA_BIT_MASK(32);
1958 RTL_W32(CounterAddrLow
, cmd
);
1959 RTL_W32(CounterAddrLow
, cmd
| CounterDump
);
1961 if (rtl_udelay_loop_wait_low(tp
, &rtl_counters_cond
, 10, 1000))
1962 memcpy(&tp
->counters
, counters
, sizeof(*counters
));
1964 RTL_W32(CounterAddrLow
, 0);
1965 RTL_W32(CounterAddrHigh
, 0);
1967 dma_free_coherent(d
, sizeof(*counters
), counters
, paddr
);
1970 static void rtl8169_get_ethtool_stats(struct net_device
*dev
,
1971 struct ethtool_stats
*stats
, u64
*data
)
1973 struct rtl8169_private
*tp
= netdev_priv(dev
);
1977 rtl8169_update_counters(dev
);
1979 data
[0] = le64_to_cpu(tp
->counters
.tx_packets
);
1980 data
[1] = le64_to_cpu(tp
->counters
.rx_packets
);
1981 data
[2] = le64_to_cpu(tp
->counters
.tx_errors
);
1982 data
[3] = le32_to_cpu(tp
->counters
.rx_errors
);
1983 data
[4] = le16_to_cpu(tp
->counters
.rx_missed
);
1984 data
[5] = le16_to_cpu(tp
->counters
.align_errors
);
1985 data
[6] = le32_to_cpu(tp
->counters
.tx_one_collision
);
1986 data
[7] = le32_to_cpu(tp
->counters
.tx_multi_collision
);
1987 data
[8] = le64_to_cpu(tp
->counters
.rx_unicast
);
1988 data
[9] = le64_to_cpu(tp
->counters
.rx_broadcast
);
1989 data
[10] = le32_to_cpu(tp
->counters
.rx_multicast
);
1990 data
[11] = le16_to_cpu(tp
->counters
.tx_aborted
);
1991 data
[12] = le16_to_cpu(tp
->counters
.tx_underun
);
1994 static void rtl8169_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1998 memcpy(data
, *rtl8169_gstrings
, sizeof(rtl8169_gstrings
));
2003 static const struct ethtool_ops rtl8169_ethtool_ops
= {
2004 .get_drvinfo
= rtl8169_get_drvinfo
,
2005 .get_regs_len
= rtl8169_get_regs_len
,
2006 .get_link
= ethtool_op_get_link
,
2007 .get_settings
= rtl8169_get_settings
,
2008 .set_settings
= rtl8169_set_settings
,
2009 .get_msglevel
= rtl8169_get_msglevel
,
2010 .set_msglevel
= rtl8169_set_msglevel
,
2011 .get_regs
= rtl8169_get_regs
,
2012 .get_wol
= rtl8169_get_wol
,
2013 .set_wol
= rtl8169_set_wol
,
2014 .get_strings
= rtl8169_get_strings
,
2015 .get_sset_count
= rtl8169_get_sset_count
,
2016 .get_ethtool_stats
= rtl8169_get_ethtool_stats
,
2017 .get_ts_info
= ethtool_op_get_ts_info
,
2020 static void rtl8169_get_mac_version(struct rtl8169_private
*tp
,
2021 struct net_device
*dev
, u8 default_version
)
2023 void __iomem
*ioaddr
= tp
->mmio_addr
;
2025 * The driver currently handles the 8168Bf and the 8168Be identically
2026 * but they can be identified more specifically through the test below
2029 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2031 * Same thing for the 8101Eb and the 8101Ec:
2033 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2035 static const struct rtl_mac_info
{
2041 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41
},
2042 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40
},
2045 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38
},
2046 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36
},
2047 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35
},
2050 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34
},
2051 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33
},
2052 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32
},
2053 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33
},
2056 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26
},
2057 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25
},
2058 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26
},
2060 /* 8168DP family. */
2061 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27
},
2062 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28
},
2063 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31
},
2066 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24
},
2067 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23
},
2068 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18
},
2069 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24
},
2070 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19
},
2071 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20
},
2072 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21
},
2073 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22
},
2074 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22
},
2077 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12
},
2078 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17
},
2079 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17
},
2080 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11
},
2083 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39
},
2084 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39
},
2085 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37
},
2086 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30
},
2087 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30
},
2088 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29
},
2089 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30
},
2090 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09
},
2091 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09
},
2092 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08
},
2093 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08
},
2094 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07
},
2095 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07
},
2096 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13
},
2097 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10
},
2098 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16
},
2099 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09
},
2100 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09
},
2101 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16
},
2102 /* FIXME: where did these entries come from ? -- FR */
2103 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15
},
2104 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14
},
2107 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06
},
2108 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05
},
2109 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04
},
2110 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03
},
2111 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02
},
2112 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01
},
2115 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE
}
2117 const struct rtl_mac_info
*p
= mac_info
;
2120 reg
= RTL_R32(TxConfig
);
2121 while ((reg
& p
->mask
) != p
->val
)
2123 tp
->mac_version
= p
->mac_version
;
2125 if (tp
->mac_version
== RTL_GIGA_MAC_NONE
) {
2126 netif_notice(tp
, probe
, dev
,
2127 "unknown MAC, using family default\n");
2128 tp
->mac_version
= default_version
;
2132 static void rtl8169_print_mac_version(struct rtl8169_private
*tp
)
2134 dprintk("mac_version = 0x%02x\n", tp
->mac_version
);
2142 static void rtl_writephy_batch(struct rtl8169_private
*tp
,
2143 const struct phy_reg
*regs
, int len
)
2146 rtl_writephy(tp
, regs
->reg
, regs
->val
);
2151 #define PHY_READ 0x00000000
2152 #define PHY_DATA_OR 0x10000000
2153 #define PHY_DATA_AND 0x20000000
2154 #define PHY_BJMPN 0x30000000
2155 #define PHY_READ_EFUSE 0x40000000
2156 #define PHY_READ_MAC_BYTE 0x50000000
2157 #define PHY_WRITE_MAC_BYTE 0x60000000
2158 #define PHY_CLEAR_READCOUNT 0x70000000
2159 #define PHY_WRITE 0x80000000
2160 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2161 #define PHY_COMP_EQ_SKIPN 0xa0000000
2162 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2163 #define PHY_WRITE_PREVIOUS 0xc0000000
2164 #define PHY_SKIPN 0xd0000000
2165 #define PHY_DELAY_MS 0xe0000000
2166 #define PHY_WRITE_ERI_WORD 0xf0000000
2170 char version
[RTL_VER_SIZE
];
2176 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2178 static bool rtl_fw_format_ok(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2180 const struct firmware
*fw
= rtl_fw
->fw
;
2181 struct fw_info
*fw_info
= (struct fw_info
*)fw
->data
;
2182 struct rtl_fw_phy_action
*pa
= &rtl_fw
->phy_action
;
2183 char *version
= rtl_fw
->version
;
2186 if (fw
->size
< FW_OPCODE_SIZE
)
2189 if (!fw_info
->magic
) {
2190 size_t i
, size
, start
;
2193 if (fw
->size
< sizeof(*fw_info
))
2196 for (i
= 0; i
< fw
->size
; i
++)
2197 checksum
+= fw
->data
[i
];
2201 start
= le32_to_cpu(fw_info
->fw_start
);
2202 if (start
> fw
->size
)
2205 size
= le32_to_cpu(fw_info
->fw_len
);
2206 if (size
> (fw
->size
- start
) / FW_OPCODE_SIZE
)
2209 memcpy(version
, fw_info
->version
, RTL_VER_SIZE
);
2211 pa
->code
= (__le32
*)(fw
->data
+ start
);
2214 if (fw
->size
% FW_OPCODE_SIZE
)
2217 strlcpy(version
, rtl_lookup_firmware_name(tp
), RTL_VER_SIZE
);
2219 pa
->code
= (__le32
*)fw
->data
;
2220 pa
->size
= fw
->size
/ FW_OPCODE_SIZE
;
2222 version
[RTL_VER_SIZE
- 1] = 0;
2229 static bool rtl_fw_data_ok(struct rtl8169_private
*tp
, struct net_device
*dev
,
2230 struct rtl_fw_phy_action
*pa
)
2235 for (index
= 0; index
< pa
->size
; index
++) {
2236 u32 action
= le32_to_cpu(pa
->code
[index
]);
2237 u32 regno
= (action
& 0x0fff0000) >> 16;
2239 switch(action
& 0xf0000000) {
2243 case PHY_READ_EFUSE
:
2244 case PHY_CLEAR_READCOUNT
:
2246 case PHY_WRITE_PREVIOUS
:
2251 if (regno
> index
) {
2252 netif_err(tp
, ifup
, tp
->dev
,
2253 "Out of range of firmware\n");
2257 case PHY_READCOUNT_EQ_SKIP
:
2258 if (index
+ 2 >= pa
->size
) {
2259 netif_err(tp
, ifup
, tp
->dev
,
2260 "Out of range of firmware\n");
2264 case PHY_COMP_EQ_SKIPN
:
2265 case PHY_COMP_NEQ_SKIPN
:
2267 if (index
+ 1 + regno
>= pa
->size
) {
2268 netif_err(tp
, ifup
, tp
->dev
,
2269 "Out of range of firmware\n");
2274 case PHY_READ_MAC_BYTE
:
2275 case PHY_WRITE_MAC_BYTE
:
2276 case PHY_WRITE_ERI_WORD
:
2278 netif_err(tp
, ifup
, tp
->dev
,
2279 "Invalid action 0x%08x\n", action
);
2288 static int rtl_check_firmware(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2290 struct net_device
*dev
= tp
->dev
;
2293 if (!rtl_fw_format_ok(tp
, rtl_fw
)) {
2294 netif_err(tp
, ifup
, dev
, "invalid firwmare\n");
2298 if (rtl_fw_data_ok(tp
, dev
, &rtl_fw
->phy_action
))
2304 static void rtl_phy_write_fw(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2306 struct rtl_fw_phy_action
*pa
= &rtl_fw
->phy_action
;
2310 predata
= count
= 0;
2312 for (index
= 0; index
< pa
->size
; ) {
2313 u32 action
= le32_to_cpu(pa
->code
[index
]);
2314 u32 data
= action
& 0x0000ffff;
2315 u32 regno
= (action
& 0x0fff0000) >> 16;
2320 switch(action
& 0xf0000000) {
2322 predata
= rtl_readphy(tp
, regno
);
2337 case PHY_READ_EFUSE
:
2338 predata
= rtl8168d_efuse_read(tp
, regno
);
2341 case PHY_CLEAR_READCOUNT
:
2346 rtl_writephy(tp
, regno
, data
);
2349 case PHY_READCOUNT_EQ_SKIP
:
2350 index
+= (count
== data
) ? 2 : 1;
2352 case PHY_COMP_EQ_SKIPN
:
2353 if (predata
== data
)
2357 case PHY_COMP_NEQ_SKIPN
:
2358 if (predata
!= data
)
2362 case PHY_WRITE_PREVIOUS
:
2363 rtl_writephy(tp
, regno
, predata
);
2374 case PHY_READ_MAC_BYTE
:
2375 case PHY_WRITE_MAC_BYTE
:
2376 case PHY_WRITE_ERI_WORD
:
2383 static void rtl_release_firmware(struct rtl8169_private
*tp
)
2385 if (!IS_ERR_OR_NULL(tp
->rtl_fw
)) {
2386 release_firmware(tp
->rtl_fw
->fw
);
2389 tp
->rtl_fw
= RTL_FIRMWARE_UNKNOWN
;
2392 static void rtl_apply_firmware(struct rtl8169_private
*tp
)
2394 struct rtl_fw
*rtl_fw
= tp
->rtl_fw
;
2396 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2397 if (!IS_ERR_OR_NULL(rtl_fw
))
2398 rtl_phy_write_fw(tp
, rtl_fw
);
2401 static void rtl_apply_firmware_cond(struct rtl8169_private
*tp
, u8 reg
, u16 val
)
2403 if (rtl_readphy(tp
, reg
) != val
)
2404 netif_warn(tp
, hw
, tp
->dev
, "chipset not ready for firmware\n");
2406 rtl_apply_firmware(tp
);
2409 static void rtl8169s_hw_phy_config(struct rtl8169_private
*tp
)
2411 static const struct phy_reg phy_reg_init
[] = {
2473 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2476 static void rtl8169sb_hw_phy_config(struct rtl8169_private
*tp
)
2478 static const struct phy_reg phy_reg_init
[] = {
2484 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2487 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private
*tp
)
2489 struct pci_dev
*pdev
= tp
->pci_dev
;
2491 if ((pdev
->subsystem_vendor
!= PCI_VENDOR_ID_GIGABYTE
) ||
2492 (pdev
->subsystem_device
!= 0xe000))
2495 rtl_writephy(tp
, 0x1f, 0x0001);
2496 rtl_writephy(tp
, 0x10, 0xf01b);
2497 rtl_writephy(tp
, 0x1f, 0x0000);
2500 static void rtl8169scd_hw_phy_config(struct rtl8169_private
*tp
)
2502 static const struct phy_reg phy_reg_init
[] = {
2542 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2544 rtl8169scd_hw_phy_config_quirk(tp
);
2547 static void rtl8169sce_hw_phy_config(struct rtl8169_private
*tp
)
2549 static const struct phy_reg phy_reg_init
[] = {
2597 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2600 static void rtl8168bb_hw_phy_config(struct rtl8169_private
*tp
)
2602 static const struct phy_reg phy_reg_init
[] = {
2607 rtl_writephy(tp
, 0x1f, 0x0001);
2608 rtl_patchphy(tp
, 0x16, 1 << 0);
2610 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2613 static void rtl8168bef_hw_phy_config(struct rtl8169_private
*tp
)
2615 static const struct phy_reg phy_reg_init
[] = {
2621 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2624 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private
*tp
)
2626 static const struct phy_reg phy_reg_init
[] = {
2634 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2637 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private
*tp
)
2639 static const struct phy_reg phy_reg_init
[] = {
2645 rtl_writephy(tp
, 0x1f, 0x0000);
2646 rtl_patchphy(tp
, 0x14, 1 << 5);
2647 rtl_patchphy(tp
, 0x0d, 1 << 5);
2649 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2652 static void rtl8168c_1_hw_phy_config(struct rtl8169_private
*tp
)
2654 static const struct phy_reg phy_reg_init
[] = {
2674 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2676 rtl_patchphy(tp
, 0x14, 1 << 5);
2677 rtl_patchphy(tp
, 0x0d, 1 << 5);
2678 rtl_writephy(tp
, 0x1f, 0x0000);
2681 static void rtl8168c_2_hw_phy_config(struct rtl8169_private
*tp
)
2683 static const struct phy_reg phy_reg_init
[] = {
2701 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2703 rtl_patchphy(tp
, 0x16, 1 << 0);
2704 rtl_patchphy(tp
, 0x14, 1 << 5);
2705 rtl_patchphy(tp
, 0x0d, 1 << 5);
2706 rtl_writephy(tp
, 0x1f, 0x0000);
2709 static void rtl8168c_3_hw_phy_config(struct rtl8169_private
*tp
)
2711 static const struct phy_reg phy_reg_init
[] = {
2723 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2725 rtl_patchphy(tp
, 0x16, 1 << 0);
2726 rtl_patchphy(tp
, 0x14, 1 << 5);
2727 rtl_patchphy(tp
, 0x0d, 1 << 5);
2728 rtl_writephy(tp
, 0x1f, 0x0000);
2731 static void rtl8168c_4_hw_phy_config(struct rtl8169_private
*tp
)
2733 rtl8168c_3_hw_phy_config(tp
);
2736 static void rtl8168d_1_hw_phy_config(struct rtl8169_private
*tp
)
2738 static const struct phy_reg phy_reg_init_0
[] = {
2739 /* Channel Estimation */
2760 * Enhance line driver power
2769 * Can not link to 1Gbps with bad cable
2770 * Decrease SNR threshold form 21.07dB to 19.04dB
2779 rtl_writephy_batch(tp
, phy_reg_init_0
, ARRAY_SIZE(phy_reg_init_0
));
2783 * Fine Tune Switching regulator parameter
2785 rtl_writephy(tp
, 0x1f, 0x0002);
2786 rtl_w1w0_phy(tp
, 0x0b, 0x0010, 0x00ef);
2787 rtl_w1w0_phy(tp
, 0x0c, 0xa200, 0x5d00);
2789 if (rtl8168d_efuse_read(tp
, 0x01) == 0xb1) {
2790 static const struct phy_reg phy_reg_init
[] = {
2800 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2802 val
= rtl_readphy(tp
, 0x0d);
2804 if ((val
& 0x00ff) != 0x006c) {
2805 static const u32 set
[] = {
2806 0x0065, 0x0066, 0x0067, 0x0068,
2807 0x0069, 0x006a, 0x006b, 0x006c
2811 rtl_writephy(tp
, 0x1f, 0x0002);
2814 for (i
= 0; i
< ARRAY_SIZE(set
); i
++)
2815 rtl_writephy(tp
, 0x0d, val
| set
[i
]);
2818 static const struct phy_reg phy_reg_init
[] = {
2826 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2829 /* RSET couple improve */
2830 rtl_writephy(tp
, 0x1f, 0x0002);
2831 rtl_patchphy(tp
, 0x0d, 0x0300);
2832 rtl_patchphy(tp
, 0x0f, 0x0010);
2834 /* Fine tune PLL performance */
2835 rtl_writephy(tp
, 0x1f, 0x0002);
2836 rtl_w1w0_phy(tp
, 0x02, 0x0100, 0x0600);
2837 rtl_w1w0_phy(tp
, 0x03, 0x0000, 0xe000);
2839 rtl_writephy(tp
, 0x1f, 0x0005);
2840 rtl_writephy(tp
, 0x05, 0x001b);
2842 rtl_apply_firmware_cond(tp
, MII_EXPANSION
, 0xbf00);
2844 rtl_writephy(tp
, 0x1f, 0x0000);
2847 static void rtl8168d_2_hw_phy_config(struct rtl8169_private
*tp
)
2849 static const struct phy_reg phy_reg_init_0
[] = {
2850 /* Channel Estimation */
2871 * Enhance line driver power
2880 * Can not link to 1Gbps with bad cable
2881 * Decrease SNR threshold form 21.07dB to 19.04dB
2890 rtl_writephy_batch(tp
, phy_reg_init_0
, ARRAY_SIZE(phy_reg_init_0
));
2892 if (rtl8168d_efuse_read(tp
, 0x01) == 0xb1) {
2893 static const struct phy_reg phy_reg_init
[] = {
2904 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2906 val
= rtl_readphy(tp
, 0x0d);
2907 if ((val
& 0x00ff) != 0x006c) {
2908 static const u32 set
[] = {
2909 0x0065, 0x0066, 0x0067, 0x0068,
2910 0x0069, 0x006a, 0x006b, 0x006c
2914 rtl_writephy(tp
, 0x1f, 0x0002);
2917 for (i
= 0; i
< ARRAY_SIZE(set
); i
++)
2918 rtl_writephy(tp
, 0x0d, val
| set
[i
]);
2921 static const struct phy_reg phy_reg_init
[] = {
2929 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2932 /* Fine tune PLL performance */
2933 rtl_writephy(tp
, 0x1f, 0x0002);
2934 rtl_w1w0_phy(tp
, 0x02, 0x0100, 0x0600);
2935 rtl_w1w0_phy(tp
, 0x03, 0x0000, 0xe000);
2937 /* Switching regulator Slew rate */
2938 rtl_writephy(tp
, 0x1f, 0x0002);
2939 rtl_patchphy(tp
, 0x0f, 0x0017);
2941 rtl_writephy(tp
, 0x1f, 0x0005);
2942 rtl_writephy(tp
, 0x05, 0x001b);
2944 rtl_apply_firmware_cond(tp
, MII_EXPANSION
, 0xb300);
2946 rtl_writephy(tp
, 0x1f, 0x0000);
2949 static void rtl8168d_3_hw_phy_config(struct rtl8169_private
*tp
)
2951 static const struct phy_reg phy_reg_init
[] = {
3007 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3010 static void rtl8168d_4_hw_phy_config(struct rtl8169_private
*tp
)
3012 static const struct phy_reg phy_reg_init
[] = {
3022 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3023 rtl_patchphy(tp
, 0x0d, 1 << 5);
3026 static void rtl8168e_1_hw_phy_config(struct rtl8169_private
*tp
)
3028 static const struct phy_reg phy_reg_init
[] = {
3029 /* Enable Delay cap */
3035 /* Channel estimation fine tune */
3044 /* Update PFM & 10M TX idle timer */
3056 rtl_apply_firmware(tp
);
3058 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3060 /* DCO enable for 10M IDLE Power */
3061 rtl_writephy(tp
, 0x1f, 0x0007);
3062 rtl_writephy(tp
, 0x1e, 0x0023);
3063 rtl_w1w0_phy(tp
, 0x17, 0x0006, 0x0000);
3064 rtl_writephy(tp
, 0x1f, 0x0000);
3066 /* For impedance matching */
3067 rtl_writephy(tp
, 0x1f, 0x0002);
3068 rtl_w1w0_phy(tp
, 0x08, 0x8000, 0x7f00);
3069 rtl_writephy(tp
, 0x1f, 0x0000);
3071 /* PHY auto speed down */
3072 rtl_writephy(tp
, 0x1f, 0x0007);
3073 rtl_writephy(tp
, 0x1e, 0x002d);
3074 rtl_w1w0_phy(tp
, 0x18, 0x0050, 0x0000);
3075 rtl_writephy(tp
, 0x1f, 0x0000);
3076 rtl_w1w0_phy(tp
, 0x14, 0x8000, 0x0000);
3078 rtl_writephy(tp
, 0x1f, 0x0005);
3079 rtl_writephy(tp
, 0x05, 0x8b86);
3080 rtl_w1w0_phy(tp
, 0x06, 0x0001, 0x0000);
3081 rtl_writephy(tp
, 0x1f, 0x0000);
3083 rtl_writephy(tp
, 0x1f, 0x0005);
3084 rtl_writephy(tp
, 0x05, 0x8b85);
3085 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x2000);
3086 rtl_writephy(tp
, 0x1f, 0x0007);
3087 rtl_writephy(tp
, 0x1e, 0x0020);
3088 rtl_w1w0_phy(tp
, 0x15, 0x0000, 0x1100);
3089 rtl_writephy(tp
, 0x1f, 0x0006);
3090 rtl_writephy(tp
, 0x00, 0x5a00);
3091 rtl_writephy(tp
, 0x1f, 0x0000);
3092 rtl_writephy(tp
, 0x0d, 0x0007);
3093 rtl_writephy(tp
, 0x0e, 0x003c);
3094 rtl_writephy(tp
, 0x0d, 0x4007);
3095 rtl_writephy(tp
, 0x0e, 0x0000);
3096 rtl_writephy(tp
, 0x0d, 0x0000);
3099 static void rtl8168e_2_hw_phy_config(struct rtl8169_private
*tp
)
3101 static const struct phy_reg phy_reg_init
[] = {
3102 /* Enable Delay cap */
3111 /* Channel estimation fine tune */
3128 rtl_apply_firmware(tp
);
3130 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3132 /* For 4-corner performance improve */
3133 rtl_writephy(tp
, 0x1f, 0x0005);
3134 rtl_writephy(tp
, 0x05, 0x8b80);
3135 rtl_w1w0_phy(tp
, 0x17, 0x0006, 0x0000);
3136 rtl_writephy(tp
, 0x1f, 0x0000);
3138 /* PHY auto speed down */
3139 rtl_writephy(tp
, 0x1f, 0x0004);
3140 rtl_writephy(tp
, 0x1f, 0x0007);
3141 rtl_writephy(tp
, 0x1e, 0x002d);
3142 rtl_w1w0_phy(tp
, 0x18, 0x0010, 0x0000);
3143 rtl_writephy(tp
, 0x1f, 0x0002);
3144 rtl_writephy(tp
, 0x1f, 0x0000);
3145 rtl_w1w0_phy(tp
, 0x14, 0x8000, 0x0000);
3147 /* improve 10M EEE waveform */
3148 rtl_writephy(tp
, 0x1f, 0x0005);
3149 rtl_writephy(tp
, 0x05, 0x8b86);
3150 rtl_w1w0_phy(tp
, 0x06, 0x0001, 0x0000);
3151 rtl_writephy(tp
, 0x1f, 0x0000);
3153 /* Improve 2-pair detection performance */
3154 rtl_writephy(tp
, 0x1f, 0x0005);
3155 rtl_writephy(tp
, 0x05, 0x8b85);
3156 rtl_w1w0_phy(tp
, 0x06, 0x4000, 0x0000);
3157 rtl_writephy(tp
, 0x1f, 0x0000);
3160 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_1111
, 0x0000, 0x0003, ERIAR_EXGMAC
);
3161 rtl_writephy(tp
, 0x1f, 0x0005);
3162 rtl_writephy(tp
, 0x05, 0x8b85);
3163 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x2000);
3164 rtl_writephy(tp
, 0x1f, 0x0004);
3165 rtl_writephy(tp
, 0x1f, 0x0007);
3166 rtl_writephy(tp
, 0x1e, 0x0020);
3167 rtl_w1w0_phy(tp
, 0x15, 0x0000, 0x0100);
3168 rtl_writephy(tp
, 0x1f, 0x0002);
3169 rtl_writephy(tp
, 0x1f, 0x0000);
3170 rtl_writephy(tp
, 0x0d, 0x0007);
3171 rtl_writephy(tp
, 0x0e, 0x003c);
3172 rtl_writephy(tp
, 0x0d, 0x4007);
3173 rtl_writephy(tp
, 0x0e, 0x0000);
3174 rtl_writephy(tp
, 0x0d, 0x0000);
3177 rtl_writephy(tp
, 0x1f, 0x0003);
3178 rtl_w1w0_phy(tp
, 0x19, 0x0000, 0x0001);
3179 rtl_w1w0_phy(tp
, 0x10, 0x0000, 0x0400);
3180 rtl_writephy(tp
, 0x1f, 0x0000);
3183 static void rtl8168f_hw_phy_config(struct rtl8169_private
*tp
)
3185 /* For 4-corner performance improve */
3186 rtl_writephy(tp
, 0x1f, 0x0005);
3187 rtl_writephy(tp
, 0x05, 0x8b80);
3188 rtl_w1w0_phy(tp
, 0x06, 0x0006, 0x0000);
3189 rtl_writephy(tp
, 0x1f, 0x0000);
3191 /* PHY auto speed down */
3192 rtl_writephy(tp
, 0x1f, 0x0007);
3193 rtl_writephy(tp
, 0x1e, 0x002d);
3194 rtl_w1w0_phy(tp
, 0x18, 0x0010, 0x0000);
3195 rtl_writephy(tp
, 0x1f, 0x0000);
3196 rtl_w1w0_phy(tp
, 0x14, 0x8000, 0x0000);
3198 /* Improve 10M EEE waveform */
3199 rtl_writephy(tp
, 0x1f, 0x0005);
3200 rtl_writephy(tp
, 0x05, 0x8b86);
3201 rtl_w1w0_phy(tp
, 0x06, 0x0001, 0x0000);
3202 rtl_writephy(tp
, 0x1f, 0x0000);
3205 static void rtl8168f_1_hw_phy_config(struct rtl8169_private
*tp
)
3207 static const struct phy_reg phy_reg_init
[] = {
3208 /* Channel estimation fine tune */
3213 /* Modify green table for giga & fnet */
3230 /* Modify green table for 10M */
3236 /* Disable hiimpedance detection (RTCT) */
3242 rtl_apply_firmware(tp
);
3244 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3246 rtl8168f_hw_phy_config(tp
);
3248 /* Improve 2-pair detection performance */
3249 rtl_writephy(tp
, 0x1f, 0x0005);
3250 rtl_writephy(tp
, 0x05, 0x8b85);
3251 rtl_w1w0_phy(tp
, 0x06, 0x4000, 0x0000);
3252 rtl_writephy(tp
, 0x1f, 0x0000);
3255 static void rtl8168f_2_hw_phy_config(struct rtl8169_private
*tp
)
3257 rtl_apply_firmware(tp
);
3259 rtl8168f_hw_phy_config(tp
);
3262 static void rtl8411_hw_phy_config(struct rtl8169_private
*tp
)
3264 static const struct phy_reg phy_reg_init
[] = {
3265 /* Channel estimation fine tune */
3270 /* Modify green table for giga & fnet */
3287 /* Modify green table for 10M */
3293 /* Disable hiimpedance detection (RTCT) */
3300 rtl_apply_firmware(tp
);
3302 rtl8168f_hw_phy_config(tp
);
3304 /* Improve 2-pair detection performance */
3305 rtl_writephy(tp
, 0x1f, 0x0005);
3306 rtl_writephy(tp
, 0x05, 0x8b85);
3307 rtl_w1w0_phy(tp
, 0x06, 0x4000, 0x0000);
3308 rtl_writephy(tp
, 0x1f, 0x0000);
3310 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3312 /* Modify green table for giga */
3313 rtl_writephy(tp
, 0x1f, 0x0005);
3314 rtl_writephy(tp
, 0x05, 0x8b54);
3315 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0800);
3316 rtl_writephy(tp
, 0x05, 0x8b5d);
3317 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0800);
3318 rtl_writephy(tp
, 0x05, 0x8a7c);
3319 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3320 rtl_writephy(tp
, 0x05, 0x8a7f);
3321 rtl_w1w0_phy(tp
, 0x06, 0x0100, 0x0000);
3322 rtl_writephy(tp
, 0x05, 0x8a82);
3323 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3324 rtl_writephy(tp
, 0x05, 0x8a85);
3325 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3326 rtl_writephy(tp
, 0x05, 0x8a88);
3327 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3328 rtl_writephy(tp
, 0x1f, 0x0000);
3330 /* uc same-seed solution */
3331 rtl_writephy(tp
, 0x1f, 0x0005);
3332 rtl_writephy(tp
, 0x05, 0x8b85);
3333 rtl_w1w0_phy(tp
, 0x06, 0x8000, 0x0000);
3334 rtl_writephy(tp
, 0x1f, 0x0000);
3337 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_0001
, 0x00, 0x03, ERIAR_EXGMAC
);
3338 rtl_writephy(tp
, 0x1f, 0x0005);
3339 rtl_writephy(tp
, 0x05, 0x8b85);
3340 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x2000);
3341 rtl_writephy(tp
, 0x1f, 0x0004);
3342 rtl_writephy(tp
, 0x1f, 0x0007);
3343 rtl_writephy(tp
, 0x1e, 0x0020);
3344 rtl_w1w0_phy(tp
, 0x15, 0x0000, 0x0100);
3345 rtl_writephy(tp
, 0x1f, 0x0000);
3346 rtl_writephy(tp
, 0x0d, 0x0007);
3347 rtl_writephy(tp
, 0x0e, 0x003c);
3348 rtl_writephy(tp
, 0x0d, 0x4007);
3349 rtl_writephy(tp
, 0x0e, 0x0000);
3350 rtl_writephy(tp
, 0x0d, 0x0000);
3353 rtl_writephy(tp
, 0x1f, 0x0003);
3354 rtl_w1w0_phy(tp
, 0x19, 0x0000, 0x0001);
3355 rtl_w1w0_phy(tp
, 0x10, 0x0000, 0x0400);
3356 rtl_writephy(tp
, 0x1f, 0x0000);
3359 static void rtl8168g_1_hw_phy_config(struct rtl8169_private
*tp
)
3361 static const u16 mac_ocp_patch
[] = {
3362 0xe008, 0xe01b, 0xe01d, 0xe01f,
3363 0xe021, 0xe023, 0xe025, 0xe027,
3364 0x49d2, 0xf10d, 0x766c, 0x49e2,
3365 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3367 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3368 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3369 0xbe00, 0xb416, 0x0076, 0xe86c,
3370 0xc602, 0xbe00, 0x0000, 0xc602,
3372 0xbe00, 0x0000, 0xc602, 0xbe00,
3373 0x0000, 0xc602, 0xbe00, 0x0000,
3374 0xc602, 0xbe00, 0x0000, 0xc602,
3375 0xbe00, 0x0000, 0xc602, 0xbe00,
3377 0x0000, 0x0000, 0x0000, 0x0000
3381 /* Patch code for GPHY reset */
3382 for (i
= 0; i
< ARRAY_SIZE(mac_ocp_patch
); i
++)
3383 r8168_mac_ocp_write(tp
, 0xf800 + 2*i
, mac_ocp_patch
[i
]);
3384 r8168_mac_ocp_write(tp
, 0xfc26, 0x8000);
3385 r8168_mac_ocp_write(tp
, 0xfc28, 0x0075);
3387 rtl_apply_firmware(tp
);
3389 if (r8168_phy_ocp_read(tp
, 0xa460) & 0x0100)
3390 rtl_w1w0_phy_ocp(tp
, 0xbcc4, 0x0000, 0x8000);
3392 rtl_w1w0_phy_ocp(tp
, 0xbcc4, 0x8000, 0x0000);
3394 if (r8168_phy_ocp_read(tp
, 0xa466) & 0x0100)
3395 rtl_w1w0_phy_ocp(tp
, 0xc41a, 0x0002, 0x0000);
3397 rtl_w1w0_phy_ocp(tp
, 0xbcc4, 0x0000, 0x0002);
3399 rtl_w1w0_phy_ocp(tp
, 0xa442, 0x000c, 0x0000);
3400 rtl_w1w0_phy_ocp(tp
, 0xa4b2, 0x0004, 0x0000);
3402 r8168_phy_ocp_write(tp
, 0xa436, 0x8012);
3403 rtl_w1w0_phy_ocp(tp
, 0xa438, 0x8000, 0x0000);
3405 rtl_w1w0_phy_ocp(tp
, 0xc422, 0x4000, 0x2000);
3408 static void rtl8102e_hw_phy_config(struct rtl8169_private
*tp
)
3410 static const struct phy_reg phy_reg_init
[] = {
3417 rtl_writephy(tp
, 0x1f, 0x0000);
3418 rtl_patchphy(tp
, 0x11, 1 << 12);
3419 rtl_patchphy(tp
, 0x19, 1 << 13);
3420 rtl_patchphy(tp
, 0x10, 1 << 15);
3422 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3425 static void rtl8105e_hw_phy_config(struct rtl8169_private
*tp
)
3427 static const struct phy_reg phy_reg_init
[] = {
3441 /* Disable ALDPS before ram code */
3442 rtl_writephy(tp
, 0x1f, 0x0000);
3443 rtl_writephy(tp
, 0x18, 0x0310);
3446 rtl_apply_firmware(tp
);
3448 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3451 static void rtl8402_hw_phy_config(struct rtl8169_private
*tp
)
3453 /* Disable ALDPS before setting firmware */
3454 rtl_writephy(tp
, 0x1f, 0x0000);
3455 rtl_writephy(tp
, 0x18, 0x0310);
3458 rtl_apply_firmware(tp
);
3461 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
3462 rtl_writephy(tp
, 0x1f, 0x0004);
3463 rtl_writephy(tp
, 0x10, 0x401f);
3464 rtl_writephy(tp
, 0x19, 0x7030);
3465 rtl_writephy(tp
, 0x1f, 0x0000);
3468 static void rtl8106e_hw_phy_config(struct rtl8169_private
*tp
)
3470 static const struct phy_reg phy_reg_init
[] = {
3477 /* Disable ALDPS before ram code */
3478 rtl_writephy(tp
, 0x1f, 0x0000);
3479 rtl_writephy(tp
, 0x18, 0x0310);
3482 rtl_apply_firmware(tp
);
3484 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
3485 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3487 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
3490 static void rtl_hw_phy_config(struct net_device
*dev
)
3492 struct rtl8169_private
*tp
= netdev_priv(dev
);
3494 rtl8169_print_mac_version(tp
);
3496 switch (tp
->mac_version
) {
3497 case RTL_GIGA_MAC_VER_01
:
3499 case RTL_GIGA_MAC_VER_02
:
3500 case RTL_GIGA_MAC_VER_03
:
3501 rtl8169s_hw_phy_config(tp
);
3503 case RTL_GIGA_MAC_VER_04
:
3504 rtl8169sb_hw_phy_config(tp
);
3506 case RTL_GIGA_MAC_VER_05
:
3507 rtl8169scd_hw_phy_config(tp
);
3509 case RTL_GIGA_MAC_VER_06
:
3510 rtl8169sce_hw_phy_config(tp
);
3512 case RTL_GIGA_MAC_VER_07
:
3513 case RTL_GIGA_MAC_VER_08
:
3514 case RTL_GIGA_MAC_VER_09
:
3515 rtl8102e_hw_phy_config(tp
);
3517 case RTL_GIGA_MAC_VER_11
:
3518 rtl8168bb_hw_phy_config(tp
);
3520 case RTL_GIGA_MAC_VER_12
:
3521 rtl8168bef_hw_phy_config(tp
);
3523 case RTL_GIGA_MAC_VER_17
:
3524 rtl8168bef_hw_phy_config(tp
);
3526 case RTL_GIGA_MAC_VER_18
:
3527 rtl8168cp_1_hw_phy_config(tp
);
3529 case RTL_GIGA_MAC_VER_19
:
3530 rtl8168c_1_hw_phy_config(tp
);
3532 case RTL_GIGA_MAC_VER_20
:
3533 rtl8168c_2_hw_phy_config(tp
);
3535 case RTL_GIGA_MAC_VER_21
:
3536 rtl8168c_3_hw_phy_config(tp
);
3538 case RTL_GIGA_MAC_VER_22
:
3539 rtl8168c_4_hw_phy_config(tp
);
3541 case RTL_GIGA_MAC_VER_23
:
3542 case RTL_GIGA_MAC_VER_24
:
3543 rtl8168cp_2_hw_phy_config(tp
);
3545 case RTL_GIGA_MAC_VER_25
:
3546 rtl8168d_1_hw_phy_config(tp
);
3548 case RTL_GIGA_MAC_VER_26
:
3549 rtl8168d_2_hw_phy_config(tp
);
3551 case RTL_GIGA_MAC_VER_27
:
3552 rtl8168d_3_hw_phy_config(tp
);
3554 case RTL_GIGA_MAC_VER_28
:
3555 rtl8168d_4_hw_phy_config(tp
);
3557 case RTL_GIGA_MAC_VER_29
:
3558 case RTL_GIGA_MAC_VER_30
:
3559 rtl8105e_hw_phy_config(tp
);
3561 case RTL_GIGA_MAC_VER_31
:
3564 case RTL_GIGA_MAC_VER_32
:
3565 case RTL_GIGA_MAC_VER_33
:
3566 rtl8168e_1_hw_phy_config(tp
);
3568 case RTL_GIGA_MAC_VER_34
:
3569 rtl8168e_2_hw_phy_config(tp
);
3571 case RTL_GIGA_MAC_VER_35
:
3572 rtl8168f_1_hw_phy_config(tp
);
3574 case RTL_GIGA_MAC_VER_36
:
3575 rtl8168f_2_hw_phy_config(tp
);
3578 case RTL_GIGA_MAC_VER_37
:
3579 rtl8402_hw_phy_config(tp
);
3582 case RTL_GIGA_MAC_VER_38
:
3583 rtl8411_hw_phy_config(tp
);
3586 case RTL_GIGA_MAC_VER_39
:
3587 rtl8106e_hw_phy_config(tp
);
3590 case RTL_GIGA_MAC_VER_40
:
3591 rtl8168g_1_hw_phy_config(tp
);
3594 case RTL_GIGA_MAC_VER_41
:
3600 static void rtl_phy_work(struct rtl8169_private
*tp
)
3602 struct timer_list
*timer
= &tp
->timer
;
3603 void __iomem
*ioaddr
= tp
->mmio_addr
;
3604 unsigned long timeout
= RTL8169_PHY_TIMEOUT
;
3606 assert(tp
->mac_version
> RTL_GIGA_MAC_VER_01
);
3608 if (tp
->phy_reset_pending(tp
)) {
3610 * A busy loop could burn quite a few cycles on nowadays CPU.
3611 * Let's delay the execution of the timer for a few ticks.
3617 if (tp
->link_ok(ioaddr
))
3620 netif_warn(tp
, link
, tp
->dev
, "PHY reset until link up\n");
3622 tp
->phy_reset_enable(tp
);
3625 mod_timer(timer
, jiffies
+ timeout
);
3628 static void rtl_schedule_task(struct rtl8169_private
*tp
, enum rtl_flag flag
)
3630 if (!test_and_set_bit(flag
, tp
->wk
.flags
))
3631 schedule_work(&tp
->wk
.work
);
3634 static void rtl8169_phy_timer(unsigned long __opaque
)
3636 struct net_device
*dev
= (struct net_device
*)__opaque
;
3637 struct rtl8169_private
*tp
= netdev_priv(dev
);
3639 rtl_schedule_task(tp
, RTL_FLAG_TASK_PHY_PENDING
);
3642 static void rtl8169_release_board(struct pci_dev
*pdev
, struct net_device
*dev
,
3643 void __iomem
*ioaddr
)
3646 pci_release_regions(pdev
);
3647 pci_clear_mwi(pdev
);
3648 pci_disable_device(pdev
);
3652 DECLARE_RTL_COND(rtl_phy_reset_cond
)
3654 return tp
->phy_reset_pending(tp
);
3657 static void rtl8169_phy_reset(struct net_device
*dev
,
3658 struct rtl8169_private
*tp
)
3660 tp
->phy_reset_enable(tp
);
3661 rtl_msleep_loop_wait_low(tp
, &rtl_phy_reset_cond
, 1, 100);
3664 static bool rtl_tbi_enabled(struct rtl8169_private
*tp
)
3666 void __iomem
*ioaddr
= tp
->mmio_addr
;
3668 return (tp
->mac_version
== RTL_GIGA_MAC_VER_01
) &&
3669 (RTL_R8(PHYstatus
) & TBI_Enable
);
3672 static void rtl8169_init_phy(struct net_device
*dev
, struct rtl8169_private
*tp
)
3674 void __iomem
*ioaddr
= tp
->mmio_addr
;
3676 rtl_hw_phy_config(dev
);
3678 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
) {
3679 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3683 pci_write_config_byte(tp
->pci_dev
, PCI_LATENCY_TIMER
, 0x40);
3685 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
3686 pci_write_config_byte(tp
->pci_dev
, PCI_CACHE_LINE_SIZE
, 0x08);
3688 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
) {
3689 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3691 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3692 rtl_writephy(tp
, 0x0b, 0x0000); //w 0x0b 15 0 0
3695 rtl8169_phy_reset(dev
, tp
);
3697 rtl8169_set_speed(dev
, AUTONEG_ENABLE
, SPEED_1000
, DUPLEX_FULL
,
3698 ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
3699 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
3700 (tp
->mii
.supports_gmii
?
3701 ADVERTISED_1000baseT_Half
|
3702 ADVERTISED_1000baseT_Full
: 0));
3704 if (rtl_tbi_enabled(tp
))
3705 netif_info(tp
, link
, dev
, "TBI auto-negotiating\n");
3708 static void rtl_rar_set(struct rtl8169_private
*tp
, u8
*addr
)
3710 void __iomem
*ioaddr
= tp
->mmio_addr
;
3714 low
= addr
[0] | (addr
[1] << 8) | (addr
[2] << 16) | (addr
[3] << 24);
3715 high
= addr
[4] | (addr
[5] << 8);
3719 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
3721 RTL_W32(MAC4
, high
);
3727 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
) {
3728 const struct exgmac_reg e
[] = {
3729 { .addr
= 0xe0, ERIAR_MASK_1111
, .val
= low
},
3730 { .addr
= 0xe4, ERIAR_MASK_1111
, .val
= high
},
3731 { .addr
= 0xf0, ERIAR_MASK_1111
, .val
= low
<< 16 },
3732 { .addr
= 0xf4, ERIAR_MASK_1111
, .val
= high
<< 16 |
3736 rtl_write_exgmac_batch(tp
, e
, ARRAY_SIZE(e
));
3739 RTL_W8(Cfg9346
, Cfg9346_Lock
);
3741 rtl_unlock_work(tp
);
3744 static int rtl_set_mac_address(struct net_device
*dev
, void *p
)
3746 struct rtl8169_private
*tp
= netdev_priv(dev
);
3747 struct sockaddr
*addr
= p
;
3749 if (!is_valid_ether_addr(addr
->sa_data
))
3750 return -EADDRNOTAVAIL
;
3752 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3754 rtl_rar_set(tp
, dev
->dev_addr
);
3759 static int rtl8169_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3761 struct rtl8169_private
*tp
= netdev_priv(dev
);
3762 struct mii_ioctl_data
*data
= if_mii(ifr
);
3764 return netif_running(dev
) ? tp
->do_ioctl(tp
, data
, cmd
) : -ENODEV
;
3767 static int rtl_xmii_ioctl(struct rtl8169_private
*tp
,
3768 struct mii_ioctl_data
*data
, int cmd
)
3772 data
->phy_id
= 32; /* Internal PHY */
3776 data
->val_out
= rtl_readphy(tp
, data
->reg_num
& 0x1f);
3780 rtl_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
3786 static int rtl_tbi_ioctl(struct rtl8169_private
*tp
, struct mii_ioctl_data
*data
, int cmd
)
3791 static void rtl_disable_msi(struct pci_dev
*pdev
, struct rtl8169_private
*tp
)
3793 if (tp
->features
& RTL_FEATURE_MSI
) {
3794 pci_disable_msi(pdev
);
3795 tp
->features
&= ~RTL_FEATURE_MSI
;
3799 static void __devinit
rtl_init_mdio_ops(struct rtl8169_private
*tp
)
3801 struct mdio_ops
*ops
= &tp
->mdio_ops
;
3803 switch (tp
->mac_version
) {
3804 case RTL_GIGA_MAC_VER_27
:
3805 ops
->write
= r8168dp_1_mdio_write
;
3806 ops
->read
= r8168dp_1_mdio_read
;
3808 case RTL_GIGA_MAC_VER_28
:
3809 case RTL_GIGA_MAC_VER_31
:
3810 ops
->write
= r8168dp_2_mdio_write
;
3811 ops
->read
= r8168dp_2_mdio_read
;
3813 case RTL_GIGA_MAC_VER_40
:
3814 case RTL_GIGA_MAC_VER_41
:
3815 ops
->write
= r8168g_mdio_write
;
3816 ops
->read
= r8168g_mdio_read
;
3819 ops
->write
= r8169_mdio_write
;
3820 ops
->read
= r8169_mdio_read
;
3825 static void rtl_wol_suspend_quirk(struct rtl8169_private
*tp
)
3827 void __iomem
*ioaddr
= tp
->mmio_addr
;
3829 switch (tp
->mac_version
) {
3830 case RTL_GIGA_MAC_VER_29
:
3831 case RTL_GIGA_MAC_VER_30
:
3832 case RTL_GIGA_MAC_VER_32
:
3833 case RTL_GIGA_MAC_VER_33
:
3834 case RTL_GIGA_MAC_VER_34
:
3835 case RTL_GIGA_MAC_VER_37
:
3836 case RTL_GIGA_MAC_VER_38
:
3837 case RTL_GIGA_MAC_VER_39
:
3838 case RTL_GIGA_MAC_VER_40
:
3839 case RTL_GIGA_MAC_VER_41
:
3840 RTL_W32(RxConfig
, RTL_R32(RxConfig
) |
3841 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
);
3848 static bool rtl_wol_pll_power_down(struct rtl8169_private
*tp
)
3850 if (!(__rtl8169_get_wol(tp
) & WAKE_ANY
))
3853 rtl_writephy(tp
, 0x1f, 0x0000);
3854 rtl_writephy(tp
, MII_BMCR
, 0x0000);
3856 rtl_wol_suspend_quirk(tp
);
3861 static void r810x_phy_power_down(struct rtl8169_private
*tp
)
3863 rtl_writephy(tp
, 0x1f, 0x0000);
3864 rtl_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3867 static void r810x_phy_power_up(struct rtl8169_private
*tp
)
3869 rtl_writephy(tp
, 0x1f, 0x0000);
3870 rtl_writephy(tp
, MII_BMCR
, BMCR_ANENABLE
);
3873 static void r810x_pll_power_down(struct rtl8169_private
*tp
)
3875 void __iomem
*ioaddr
= tp
->mmio_addr
;
3877 if (rtl_wol_pll_power_down(tp
))
3880 r810x_phy_power_down(tp
);
3882 switch (tp
->mac_version
) {
3883 case RTL_GIGA_MAC_VER_07
:
3884 case RTL_GIGA_MAC_VER_08
:
3885 case RTL_GIGA_MAC_VER_09
:
3886 case RTL_GIGA_MAC_VER_10
:
3887 case RTL_GIGA_MAC_VER_13
:
3888 case RTL_GIGA_MAC_VER_16
:
3891 RTL_W8(PMCH
, RTL_R8(PMCH
) & ~0x80);
3896 static void r810x_pll_power_up(struct rtl8169_private
*tp
)
3898 void __iomem
*ioaddr
= tp
->mmio_addr
;
3900 r810x_phy_power_up(tp
);
3902 switch (tp
->mac_version
) {
3903 case RTL_GIGA_MAC_VER_07
:
3904 case RTL_GIGA_MAC_VER_08
:
3905 case RTL_GIGA_MAC_VER_09
:
3906 case RTL_GIGA_MAC_VER_10
:
3907 case RTL_GIGA_MAC_VER_13
:
3908 case RTL_GIGA_MAC_VER_16
:
3911 RTL_W8(PMCH
, RTL_R8(PMCH
) | 0x80);
3916 static void r8168_phy_power_up(struct rtl8169_private
*tp
)
3918 rtl_writephy(tp
, 0x1f, 0x0000);
3919 switch (tp
->mac_version
) {
3920 case RTL_GIGA_MAC_VER_11
:
3921 case RTL_GIGA_MAC_VER_12
:
3922 case RTL_GIGA_MAC_VER_17
:
3923 case RTL_GIGA_MAC_VER_18
:
3924 case RTL_GIGA_MAC_VER_19
:
3925 case RTL_GIGA_MAC_VER_20
:
3926 case RTL_GIGA_MAC_VER_21
:
3927 case RTL_GIGA_MAC_VER_22
:
3928 case RTL_GIGA_MAC_VER_23
:
3929 case RTL_GIGA_MAC_VER_24
:
3930 case RTL_GIGA_MAC_VER_25
:
3931 case RTL_GIGA_MAC_VER_26
:
3932 case RTL_GIGA_MAC_VER_27
:
3933 case RTL_GIGA_MAC_VER_28
:
3934 case RTL_GIGA_MAC_VER_31
:
3935 rtl_writephy(tp
, 0x0e, 0x0000);
3940 rtl_writephy(tp
, MII_BMCR
, BMCR_ANENABLE
);
3943 static void r8168_phy_power_down(struct rtl8169_private
*tp
)
3945 rtl_writephy(tp
, 0x1f, 0x0000);
3946 switch (tp
->mac_version
) {
3947 case RTL_GIGA_MAC_VER_32
:
3948 case RTL_GIGA_MAC_VER_33
:
3949 rtl_writephy(tp
, MII_BMCR
, BMCR_ANENABLE
| BMCR_PDOWN
);
3952 case RTL_GIGA_MAC_VER_11
:
3953 case RTL_GIGA_MAC_VER_12
:
3954 case RTL_GIGA_MAC_VER_17
:
3955 case RTL_GIGA_MAC_VER_18
:
3956 case RTL_GIGA_MAC_VER_19
:
3957 case RTL_GIGA_MAC_VER_20
:
3958 case RTL_GIGA_MAC_VER_21
:
3959 case RTL_GIGA_MAC_VER_22
:
3960 case RTL_GIGA_MAC_VER_23
:
3961 case RTL_GIGA_MAC_VER_24
:
3962 case RTL_GIGA_MAC_VER_25
:
3963 case RTL_GIGA_MAC_VER_26
:
3964 case RTL_GIGA_MAC_VER_27
:
3965 case RTL_GIGA_MAC_VER_28
:
3966 case RTL_GIGA_MAC_VER_31
:
3967 rtl_writephy(tp
, 0x0e, 0x0200);
3969 rtl_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3974 static void r8168_pll_power_down(struct rtl8169_private
*tp
)
3976 void __iomem
*ioaddr
= tp
->mmio_addr
;
3978 if ((tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
3979 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
3980 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) &&
3981 r8168dp_check_dash(tp
)) {
3985 if ((tp
->mac_version
== RTL_GIGA_MAC_VER_23
||
3986 tp
->mac_version
== RTL_GIGA_MAC_VER_24
) &&
3987 (RTL_R16(CPlusCmd
) & ASF
)) {
3991 if (tp
->mac_version
== RTL_GIGA_MAC_VER_32
||
3992 tp
->mac_version
== RTL_GIGA_MAC_VER_33
)
3993 rtl_ephy_write(tp
, 0x19, 0xff64);
3995 if (rtl_wol_pll_power_down(tp
))
3998 r8168_phy_power_down(tp
);
4000 switch (tp
->mac_version
) {
4001 case RTL_GIGA_MAC_VER_25
:
4002 case RTL_GIGA_MAC_VER_26
:
4003 case RTL_GIGA_MAC_VER_27
:
4004 case RTL_GIGA_MAC_VER_28
:
4005 case RTL_GIGA_MAC_VER_31
:
4006 case RTL_GIGA_MAC_VER_32
:
4007 case RTL_GIGA_MAC_VER_33
:
4008 RTL_W8(PMCH
, RTL_R8(PMCH
) & ~0x80);
4013 static void r8168_pll_power_up(struct rtl8169_private
*tp
)
4015 void __iomem
*ioaddr
= tp
->mmio_addr
;
4017 switch (tp
->mac_version
) {
4018 case RTL_GIGA_MAC_VER_25
:
4019 case RTL_GIGA_MAC_VER_26
:
4020 case RTL_GIGA_MAC_VER_27
:
4021 case RTL_GIGA_MAC_VER_28
:
4022 case RTL_GIGA_MAC_VER_31
:
4023 case RTL_GIGA_MAC_VER_32
:
4024 case RTL_GIGA_MAC_VER_33
:
4025 RTL_W8(PMCH
, RTL_R8(PMCH
) | 0x80);
4029 r8168_phy_power_up(tp
);
4032 static void rtl_generic_op(struct rtl8169_private
*tp
,
4033 void (*op
)(struct rtl8169_private
*))
4039 static void rtl_pll_power_down(struct rtl8169_private
*tp
)
4041 rtl_generic_op(tp
, tp
->pll_power_ops
.down
);
4044 static void rtl_pll_power_up(struct rtl8169_private
*tp
)
4046 rtl_generic_op(tp
, tp
->pll_power_ops
.up
);
4049 static void __devinit
rtl_init_pll_power_ops(struct rtl8169_private
*tp
)
4051 struct pll_power_ops
*ops
= &tp
->pll_power_ops
;
4053 switch (tp
->mac_version
) {
4054 case RTL_GIGA_MAC_VER_07
:
4055 case RTL_GIGA_MAC_VER_08
:
4056 case RTL_GIGA_MAC_VER_09
:
4057 case RTL_GIGA_MAC_VER_10
:
4058 case RTL_GIGA_MAC_VER_16
:
4059 case RTL_GIGA_MAC_VER_29
:
4060 case RTL_GIGA_MAC_VER_30
:
4061 case RTL_GIGA_MAC_VER_37
:
4062 case RTL_GIGA_MAC_VER_39
:
4063 ops
->down
= r810x_pll_power_down
;
4064 ops
->up
= r810x_pll_power_up
;
4067 case RTL_GIGA_MAC_VER_11
:
4068 case RTL_GIGA_MAC_VER_12
:
4069 case RTL_GIGA_MAC_VER_17
:
4070 case RTL_GIGA_MAC_VER_18
:
4071 case RTL_GIGA_MAC_VER_19
:
4072 case RTL_GIGA_MAC_VER_20
:
4073 case RTL_GIGA_MAC_VER_21
:
4074 case RTL_GIGA_MAC_VER_22
:
4075 case RTL_GIGA_MAC_VER_23
:
4076 case RTL_GIGA_MAC_VER_24
:
4077 case RTL_GIGA_MAC_VER_25
:
4078 case RTL_GIGA_MAC_VER_26
:
4079 case RTL_GIGA_MAC_VER_27
:
4080 case RTL_GIGA_MAC_VER_28
:
4081 case RTL_GIGA_MAC_VER_31
:
4082 case RTL_GIGA_MAC_VER_32
:
4083 case RTL_GIGA_MAC_VER_33
:
4084 case RTL_GIGA_MAC_VER_34
:
4085 case RTL_GIGA_MAC_VER_35
:
4086 case RTL_GIGA_MAC_VER_36
:
4087 case RTL_GIGA_MAC_VER_38
:
4088 case RTL_GIGA_MAC_VER_40
:
4089 case RTL_GIGA_MAC_VER_41
:
4090 ops
->down
= r8168_pll_power_down
;
4091 ops
->up
= r8168_pll_power_up
;
4101 static void rtl_init_rxcfg(struct rtl8169_private
*tp
)
4103 void __iomem
*ioaddr
= tp
->mmio_addr
;
4105 switch (tp
->mac_version
) {
4106 case RTL_GIGA_MAC_VER_01
:
4107 case RTL_GIGA_MAC_VER_02
:
4108 case RTL_GIGA_MAC_VER_03
:
4109 case RTL_GIGA_MAC_VER_04
:
4110 case RTL_GIGA_MAC_VER_05
:
4111 case RTL_GIGA_MAC_VER_06
:
4112 case RTL_GIGA_MAC_VER_10
:
4113 case RTL_GIGA_MAC_VER_11
:
4114 case RTL_GIGA_MAC_VER_12
:
4115 case RTL_GIGA_MAC_VER_13
:
4116 case RTL_GIGA_MAC_VER_14
:
4117 case RTL_GIGA_MAC_VER_15
:
4118 case RTL_GIGA_MAC_VER_16
:
4119 case RTL_GIGA_MAC_VER_17
:
4120 RTL_W32(RxConfig
, RX_FIFO_THRESH
| RX_DMA_BURST
);
4122 case RTL_GIGA_MAC_VER_18
:
4123 case RTL_GIGA_MAC_VER_19
:
4124 case RTL_GIGA_MAC_VER_20
:
4125 case RTL_GIGA_MAC_VER_21
:
4126 case RTL_GIGA_MAC_VER_22
:
4127 case RTL_GIGA_MAC_VER_23
:
4128 case RTL_GIGA_MAC_VER_24
:
4129 case RTL_GIGA_MAC_VER_34
:
4130 RTL_W32(RxConfig
, RX128_INT_EN
| RX_MULTI_EN
| RX_DMA_BURST
);
4133 RTL_W32(RxConfig
, RX128_INT_EN
| RX_DMA_BURST
);
4138 static void rtl8169_init_ring_indexes(struct rtl8169_private
*tp
)
4140 tp
->dirty_tx
= tp
->dirty_rx
= tp
->cur_tx
= tp
->cur_rx
= 0;
4143 static void rtl_hw_jumbo_enable(struct rtl8169_private
*tp
)
4145 void __iomem
*ioaddr
= tp
->mmio_addr
;
4147 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
4148 rtl_generic_op(tp
, tp
->jumbo_ops
.enable
);
4149 RTL_W8(Cfg9346
, Cfg9346_Lock
);
4152 static void rtl_hw_jumbo_disable(struct rtl8169_private
*tp
)
4154 void __iomem
*ioaddr
= tp
->mmio_addr
;
4156 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
4157 rtl_generic_op(tp
, tp
->jumbo_ops
.disable
);
4158 RTL_W8(Cfg9346
, Cfg9346_Lock
);
4161 static void r8168c_hw_jumbo_enable(struct rtl8169_private
*tp
)
4163 void __iomem
*ioaddr
= tp
->mmio_addr
;
4165 RTL_W8(Config3
, RTL_R8(Config3
) | Jumbo_En0
);
4166 RTL_W8(Config4
, RTL_R8(Config4
) | Jumbo_En1
);
4167 rtl_tx_performance_tweak(tp
->pci_dev
, 0x2 << MAX_READ_REQUEST_SHIFT
);
4170 static void r8168c_hw_jumbo_disable(struct rtl8169_private
*tp
)
4172 void __iomem
*ioaddr
= tp
->mmio_addr
;
4174 RTL_W8(Config3
, RTL_R8(Config3
) & ~Jumbo_En0
);
4175 RTL_W8(Config4
, RTL_R8(Config4
) & ~Jumbo_En1
);
4176 rtl_tx_performance_tweak(tp
->pci_dev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4179 static void r8168dp_hw_jumbo_enable(struct rtl8169_private
*tp
)
4181 void __iomem
*ioaddr
= tp
->mmio_addr
;
4183 RTL_W8(Config3
, RTL_R8(Config3
) | Jumbo_En0
);
4186 static void r8168dp_hw_jumbo_disable(struct rtl8169_private
*tp
)
4188 void __iomem
*ioaddr
= tp
->mmio_addr
;
4190 RTL_W8(Config3
, RTL_R8(Config3
) & ~Jumbo_En0
);
4193 static void r8168e_hw_jumbo_enable(struct rtl8169_private
*tp
)
4195 void __iomem
*ioaddr
= tp
->mmio_addr
;
4197 RTL_W8(MaxTxPacketSize
, 0x3f);
4198 RTL_W8(Config3
, RTL_R8(Config3
) | Jumbo_En0
);
4199 RTL_W8(Config4
, RTL_R8(Config4
) | 0x01);
4200 rtl_tx_performance_tweak(tp
->pci_dev
, 0x2 << MAX_READ_REQUEST_SHIFT
);
4203 static void r8168e_hw_jumbo_disable(struct rtl8169_private
*tp
)
4205 void __iomem
*ioaddr
= tp
->mmio_addr
;
4207 RTL_W8(MaxTxPacketSize
, 0x0c);
4208 RTL_W8(Config3
, RTL_R8(Config3
) & ~Jumbo_En0
);
4209 RTL_W8(Config4
, RTL_R8(Config4
) & ~0x01);
4210 rtl_tx_performance_tweak(tp
->pci_dev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4213 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private
*tp
)
4215 rtl_tx_performance_tweak(tp
->pci_dev
,
4216 (0x2 << MAX_READ_REQUEST_SHIFT
) | PCI_EXP_DEVCTL_NOSNOOP_EN
);
4219 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private
*tp
)
4221 rtl_tx_performance_tweak(tp
->pci_dev
,
4222 (0x5 << MAX_READ_REQUEST_SHIFT
) | PCI_EXP_DEVCTL_NOSNOOP_EN
);
4225 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private
*tp
)
4227 void __iomem
*ioaddr
= tp
->mmio_addr
;
4229 r8168b_0_hw_jumbo_enable(tp
);
4231 RTL_W8(Config4
, RTL_R8(Config4
) | (1 << 0));
4234 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private
*tp
)
4236 void __iomem
*ioaddr
= tp
->mmio_addr
;
4238 r8168b_0_hw_jumbo_disable(tp
);
4240 RTL_W8(Config4
, RTL_R8(Config4
) & ~(1 << 0));
4243 static void __devinit
rtl_init_jumbo_ops(struct rtl8169_private
*tp
)
4245 struct jumbo_ops
*ops
= &tp
->jumbo_ops
;
4247 switch (tp
->mac_version
) {
4248 case RTL_GIGA_MAC_VER_11
:
4249 ops
->disable
= r8168b_0_hw_jumbo_disable
;
4250 ops
->enable
= r8168b_0_hw_jumbo_enable
;
4252 case RTL_GIGA_MAC_VER_12
:
4253 case RTL_GIGA_MAC_VER_17
:
4254 ops
->disable
= r8168b_1_hw_jumbo_disable
;
4255 ops
->enable
= r8168b_1_hw_jumbo_enable
;
4257 case RTL_GIGA_MAC_VER_18
: /* Wild guess. Needs info from Realtek. */
4258 case RTL_GIGA_MAC_VER_19
:
4259 case RTL_GIGA_MAC_VER_20
:
4260 case RTL_GIGA_MAC_VER_21
: /* Wild guess. Needs info from Realtek. */
4261 case RTL_GIGA_MAC_VER_22
:
4262 case RTL_GIGA_MAC_VER_23
:
4263 case RTL_GIGA_MAC_VER_24
:
4264 case RTL_GIGA_MAC_VER_25
:
4265 case RTL_GIGA_MAC_VER_26
:
4266 ops
->disable
= r8168c_hw_jumbo_disable
;
4267 ops
->enable
= r8168c_hw_jumbo_enable
;
4269 case RTL_GIGA_MAC_VER_27
:
4270 case RTL_GIGA_MAC_VER_28
:
4271 ops
->disable
= r8168dp_hw_jumbo_disable
;
4272 ops
->enable
= r8168dp_hw_jumbo_enable
;
4274 case RTL_GIGA_MAC_VER_31
: /* Wild guess. Needs info from Realtek. */
4275 case RTL_GIGA_MAC_VER_32
:
4276 case RTL_GIGA_MAC_VER_33
:
4277 case RTL_GIGA_MAC_VER_34
:
4278 ops
->disable
= r8168e_hw_jumbo_disable
;
4279 ops
->enable
= r8168e_hw_jumbo_enable
;
4283 * No action needed for jumbo frames with 8169.
4284 * No jumbo for 810x at all.
4286 case RTL_GIGA_MAC_VER_40
:
4287 case RTL_GIGA_MAC_VER_41
:
4289 ops
->disable
= NULL
;
4295 DECLARE_RTL_COND(rtl_chipcmd_cond
)
4297 void __iomem
*ioaddr
= tp
->mmio_addr
;
4299 return RTL_R8(ChipCmd
) & CmdReset
;
4302 static void rtl_hw_reset(struct rtl8169_private
*tp
)
4304 void __iomem
*ioaddr
= tp
->mmio_addr
;
4306 RTL_W8(ChipCmd
, CmdReset
);
4308 rtl_udelay_loop_wait_low(tp
, &rtl_chipcmd_cond
, 100, 100);
4311 static void rtl_request_uncached_firmware(struct rtl8169_private
*tp
)
4313 struct rtl_fw
*rtl_fw
;
4317 name
= rtl_lookup_firmware_name(tp
);
4319 goto out_no_firmware
;
4321 rtl_fw
= kzalloc(sizeof(*rtl_fw
), GFP_KERNEL
);
4325 rc
= request_firmware(&rtl_fw
->fw
, name
, &tp
->pci_dev
->dev
);
4329 rc
= rtl_check_firmware(tp
, rtl_fw
);
4331 goto err_release_firmware
;
4333 tp
->rtl_fw
= rtl_fw
;
4337 err_release_firmware
:
4338 release_firmware(rtl_fw
->fw
);
4342 netif_warn(tp
, ifup
, tp
->dev
, "unable to load firmware patch %s (%d)\n",
4349 static void rtl_request_firmware(struct rtl8169_private
*tp
)
4351 if (IS_ERR(tp
->rtl_fw
))
4352 rtl_request_uncached_firmware(tp
);
4355 static void rtl_rx_close(struct rtl8169_private
*tp
)
4357 void __iomem
*ioaddr
= tp
->mmio_addr
;
4359 RTL_W32(RxConfig
, RTL_R32(RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
);
4362 DECLARE_RTL_COND(rtl_npq_cond
)
4364 void __iomem
*ioaddr
= tp
->mmio_addr
;
4366 return RTL_R8(TxPoll
) & NPQ
;
4369 DECLARE_RTL_COND(rtl_txcfg_empty_cond
)
4371 void __iomem
*ioaddr
= tp
->mmio_addr
;
4373 return RTL_R32(TxConfig
) & TXCFG_EMPTY
;
4376 static void rtl8169_hw_reset(struct rtl8169_private
*tp
)
4378 void __iomem
*ioaddr
= tp
->mmio_addr
;
4380 /* Disable interrupts */
4381 rtl8169_irq_mask_and_ack(tp
);
4385 if (tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
4386 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
4387 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) {
4388 rtl_udelay_loop_wait_low(tp
, &rtl_npq_cond
, 20, 42*42);
4389 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
||
4390 tp
->mac_version
== RTL_GIGA_MAC_VER_35
||
4391 tp
->mac_version
== RTL_GIGA_MAC_VER_36
||
4392 tp
->mac_version
== RTL_GIGA_MAC_VER_37
||
4393 tp
->mac_version
== RTL_GIGA_MAC_VER_40
||
4394 tp
->mac_version
== RTL_GIGA_MAC_VER_41
||
4395 tp
->mac_version
== RTL_GIGA_MAC_VER_38
) {
4396 RTL_W8(ChipCmd
, RTL_R8(ChipCmd
) | StopReq
);
4397 rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 666);
4399 RTL_W8(ChipCmd
, RTL_R8(ChipCmd
) | StopReq
);
4406 static void rtl_set_rx_tx_config_registers(struct rtl8169_private
*tp
)
4408 void __iomem
*ioaddr
= tp
->mmio_addr
;
4410 /* Set DMA burst size and Interframe Gap Time */
4411 RTL_W32(TxConfig
, (TX_DMA_BURST
<< TxDMAShift
) |
4412 (InterFrameGap
<< TxInterFrameGapShift
));
4415 static void rtl_hw_start(struct net_device
*dev
)
4417 struct rtl8169_private
*tp
= netdev_priv(dev
);
4421 rtl_irq_enable_all(tp
);
4424 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private
*tp
,
4425 void __iomem
*ioaddr
)
4428 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4429 * register to be written before TxDescAddrLow to work.
4430 * Switching from MMIO to I/O access fixes the issue as well.
4432 RTL_W32(TxDescStartAddrHigh
, ((u64
) tp
->TxPhyAddr
) >> 32);
4433 RTL_W32(TxDescStartAddrLow
, ((u64
) tp
->TxPhyAddr
) & DMA_BIT_MASK(32));
4434 RTL_W32(RxDescAddrHigh
, ((u64
) tp
->RxPhyAddr
) >> 32);
4435 RTL_W32(RxDescAddrLow
, ((u64
) tp
->RxPhyAddr
) & DMA_BIT_MASK(32));
4438 static u16
rtl_rw_cpluscmd(void __iomem
*ioaddr
)
4442 cmd
= RTL_R16(CPlusCmd
);
4443 RTL_W16(CPlusCmd
, cmd
);
4447 static void rtl_set_rx_max_size(void __iomem
*ioaddr
, unsigned int rx_buf_sz
)
4449 /* Low hurts. Let's disable the filtering. */
4450 RTL_W16(RxMaxSize
, rx_buf_sz
+ 1);
4453 static void rtl8169_set_magic_reg(void __iomem
*ioaddr
, unsigned mac_version
)
4455 static const struct rtl_cfg2_info
{
4460 { RTL_GIGA_MAC_VER_05
, PCI_Clock_33MHz
, 0x000fff00 }, // 8110SCd
4461 { RTL_GIGA_MAC_VER_05
, PCI_Clock_66MHz
, 0x000fffff },
4462 { RTL_GIGA_MAC_VER_06
, PCI_Clock_33MHz
, 0x00ffff00 }, // 8110SCe
4463 { RTL_GIGA_MAC_VER_06
, PCI_Clock_66MHz
, 0x00ffffff }
4465 const struct rtl_cfg2_info
*p
= cfg2_info
;
4469 clk
= RTL_R8(Config2
) & PCI_Clock_66MHz
;
4470 for (i
= 0; i
< ARRAY_SIZE(cfg2_info
); i
++, p
++) {
4471 if ((p
->mac_version
== mac_version
) && (p
->clk
== clk
)) {
4472 RTL_W32(0x7c, p
->val
);
4478 static void rtl_set_rx_mode(struct net_device
*dev
)
4480 struct rtl8169_private
*tp
= netdev_priv(dev
);
4481 void __iomem
*ioaddr
= tp
->mmio_addr
;
4482 u32 mc_filter
[2]; /* Multicast hash filter */
4486 if (dev
->flags
& IFF_PROMISC
) {
4487 /* Unconditionally log net taps. */
4488 netif_notice(tp
, link
, dev
, "Promiscuous mode enabled\n");
4490 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
|
4492 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4493 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
4494 (dev
->flags
& IFF_ALLMULTI
)) {
4495 /* Too many to filter perfectly -- accept all multicasts. */
4496 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
4497 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4499 struct netdev_hw_addr
*ha
;
4501 rx_mode
= AcceptBroadcast
| AcceptMyPhys
;
4502 mc_filter
[1] = mc_filter
[0] = 0;
4503 netdev_for_each_mc_addr(ha
, dev
) {
4504 int bit_nr
= ether_crc(ETH_ALEN
, ha
->addr
) >> 26;
4505 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
4506 rx_mode
|= AcceptMulticast
;
4510 if (dev
->features
& NETIF_F_RXALL
)
4511 rx_mode
|= (AcceptErr
| AcceptRunt
);
4513 tmp
= (RTL_R32(RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
) | rx_mode
;
4515 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
) {
4516 u32 data
= mc_filter
[0];
4518 mc_filter
[0] = swab32(mc_filter
[1]);
4519 mc_filter
[1] = swab32(data
);
4522 RTL_W32(MAR0
+ 4, mc_filter
[1]);
4523 RTL_W32(MAR0
+ 0, mc_filter
[0]);
4525 RTL_W32(RxConfig
, tmp
);
4528 static void rtl_hw_start_8169(struct net_device
*dev
)
4530 struct rtl8169_private
*tp
= netdev_priv(dev
);
4531 void __iomem
*ioaddr
= tp
->mmio_addr
;
4532 struct pci_dev
*pdev
= tp
->pci_dev
;
4534 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
) {
4535 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) | PCIMulRW
);
4536 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, 0x08);
4539 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
4540 if (tp
->mac_version
== RTL_GIGA_MAC_VER_01
||
4541 tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4542 tp
->mac_version
== RTL_GIGA_MAC_VER_03
||
4543 tp
->mac_version
== RTL_GIGA_MAC_VER_04
)
4544 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
4548 RTL_W8(EarlyTxThres
, NoEarlyTx
);
4550 rtl_set_rx_max_size(ioaddr
, rx_buf_sz
);
4552 if (tp
->mac_version
== RTL_GIGA_MAC_VER_01
||
4553 tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4554 tp
->mac_version
== RTL_GIGA_MAC_VER_03
||
4555 tp
->mac_version
== RTL_GIGA_MAC_VER_04
)
4556 rtl_set_rx_tx_config_registers(tp
);
4558 tp
->cp_cmd
|= rtl_rw_cpluscmd(ioaddr
) | PCIMulRW
;
4560 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4561 tp
->mac_version
== RTL_GIGA_MAC_VER_03
) {
4562 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4563 "Bit-3 and bit-14 MUST be 1\n");
4564 tp
->cp_cmd
|= (1 << 14);
4567 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
4569 rtl8169_set_magic_reg(ioaddr
, tp
->mac_version
);
4572 * Undocumented corner. Supposedly:
4573 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4575 RTL_W16(IntrMitigate
, 0x0000);
4577 rtl_set_rx_tx_desc_registers(tp
, ioaddr
);
4579 if (tp
->mac_version
!= RTL_GIGA_MAC_VER_01
&&
4580 tp
->mac_version
!= RTL_GIGA_MAC_VER_02
&&
4581 tp
->mac_version
!= RTL_GIGA_MAC_VER_03
&&
4582 tp
->mac_version
!= RTL_GIGA_MAC_VER_04
) {
4583 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
4584 rtl_set_rx_tx_config_registers(tp
);
4587 RTL_W8(Cfg9346
, Cfg9346_Lock
);
4589 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4592 RTL_W32(RxMissed
, 0);
4594 rtl_set_rx_mode(dev
);
4596 /* no early-rx interrupts */
4597 RTL_W16(MultiIntr
, RTL_R16(MultiIntr
) & 0xF000);
4600 static void rtl_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4602 if (tp
->csi_ops
.write
)
4603 tp
->csi_ops
.write(tp
, addr
, value
);
4606 static u32
rtl_csi_read(struct rtl8169_private
*tp
, int addr
)
4608 return tp
->csi_ops
.read
? tp
->csi_ops
.read(tp
, addr
) : ~0;
4611 static void rtl_csi_access_enable(struct rtl8169_private
*tp
, u32 bits
)
4615 csi
= rtl_csi_read(tp
, 0x070c) & 0x00ffffff;
4616 rtl_csi_write(tp
, 0x070c, csi
| bits
);
4619 static void rtl_csi_access_enable_1(struct rtl8169_private
*tp
)
4621 rtl_csi_access_enable(tp
, 0x17000000);
4624 static void rtl_csi_access_enable_2(struct rtl8169_private
*tp
)
4626 rtl_csi_access_enable(tp
, 0x27000000);
4629 DECLARE_RTL_COND(rtl_csiar_cond
)
4631 void __iomem
*ioaddr
= tp
->mmio_addr
;
4633 return RTL_R32(CSIAR
) & CSIAR_FLAG
;
4636 static void r8169_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4638 void __iomem
*ioaddr
= tp
->mmio_addr
;
4640 RTL_W32(CSIDR
, value
);
4641 RTL_W32(CSIAR
, CSIAR_WRITE_CMD
| (addr
& CSIAR_ADDR_MASK
) |
4642 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
);
4644 rtl_udelay_loop_wait_low(tp
, &rtl_csiar_cond
, 10, 100);
4647 static u32
r8169_csi_read(struct rtl8169_private
*tp
, int addr
)
4649 void __iomem
*ioaddr
= tp
->mmio_addr
;
4651 RTL_W32(CSIAR
, (addr
& CSIAR_ADDR_MASK
) |
4652 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
);
4654 return rtl_udelay_loop_wait_high(tp
, &rtl_csiar_cond
, 10, 100) ?
4655 RTL_R32(CSIDR
) : ~0;
4658 static void r8402_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4660 void __iomem
*ioaddr
= tp
->mmio_addr
;
4662 RTL_W32(CSIDR
, value
);
4663 RTL_W32(CSIAR
, CSIAR_WRITE_CMD
| (addr
& CSIAR_ADDR_MASK
) |
4664 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
|
4667 rtl_udelay_loop_wait_low(tp
, &rtl_csiar_cond
, 10, 100);
4670 static u32
r8402_csi_read(struct rtl8169_private
*tp
, int addr
)
4672 void __iomem
*ioaddr
= tp
->mmio_addr
;
4674 RTL_W32(CSIAR
, (addr
& CSIAR_ADDR_MASK
) | CSIAR_FUNC_NIC
|
4675 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
);
4677 return rtl_udelay_loop_wait_high(tp
, &rtl_csiar_cond
, 10, 100) ?
4678 RTL_R32(CSIDR
) : ~0;
4681 static void __devinit
rtl_init_csi_ops(struct rtl8169_private
*tp
)
4683 struct csi_ops
*ops
= &tp
->csi_ops
;
4685 switch (tp
->mac_version
) {
4686 case RTL_GIGA_MAC_VER_01
:
4687 case RTL_GIGA_MAC_VER_02
:
4688 case RTL_GIGA_MAC_VER_03
:
4689 case RTL_GIGA_MAC_VER_04
:
4690 case RTL_GIGA_MAC_VER_05
:
4691 case RTL_GIGA_MAC_VER_06
:
4692 case RTL_GIGA_MAC_VER_10
:
4693 case RTL_GIGA_MAC_VER_11
:
4694 case RTL_GIGA_MAC_VER_12
:
4695 case RTL_GIGA_MAC_VER_13
:
4696 case RTL_GIGA_MAC_VER_14
:
4697 case RTL_GIGA_MAC_VER_15
:
4698 case RTL_GIGA_MAC_VER_16
:
4699 case RTL_GIGA_MAC_VER_17
:
4704 case RTL_GIGA_MAC_VER_37
:
4705 case RTL_GIGA_MAC_VER_38
:
4706 ops
->write
= r8402_csi_write
;
4707 ops
->read
= r8402_csi_read
;
4711 ops
->write
= r8169_csi_write
;
4712 ops
->read
= r8169_csi_read
;
4718 unsigned int offset
;
4723 static void rtl_ephy_init(struct rtl8169_private
*tp
, const struct ephy_info
*e
,
4729 w
= (rtl_ephy_read(tp
, e
->offset
) & ~e
->mask
) | e
->bits
;
4730 rtl_ephy_write(tp
, e
->offset
, w
);
4735 static void rtl_disable_clock_request(struct pci_dev
*pdev
)
4737 pcie_capability_clear_word(pdev
, PCI_EXP_LNKCTL
,
4738 PCI_EXP_LNKCTL_CLKREQ_EN
);
4741 static void rtl_enable_clock_request(struct pci_dev
*pdev
)
4743 pcie_capability_set_word(pdev
, PCI_EXP_LNKCTL
,
4744 PCI_EXP_LNKCTL_CLKREQ_EN
);
4747 #define R8168_CPCMD_QUIRK_MASK (\
4758 static void rtl_hw_start_8168bb(struct rtl8169_private
*tp
)
4760 void __iomem
*ioaddr
= tp
->mmio_addr
;
4761 struct pci_dev
*pdev
= tp
->pci_dev
;
4763 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4765 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4767 rtl_tx_performance_tweak(pdev
,
4768 (0x5 << MAX_READ_REQUEST_SHIFT
) | PCI_EXP_DEVCTL_NOSNOOP_EN
);
4771 static void rtl_hw_start_8168bef(struct rtl8169_private
*tp
)
4773 void __iomem
*ioaddr
= tp
->mmio_addr
;
4775 rtl_hw_start_8168bb(tp
);
4777 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4779 RTL_W8(Config4
, RTL_R8(Config4
) & ~(1 << 0));
4782 static void __rtl_hw_start_8168cp(struct rtl8169_private
*tp
)
4784 void __iomem
*ioaddr
= tp
->mmio_addr
;
4785 struct pci_dev
*pdev
= tp
->pci_dev
;
4787 RTL_W8(Config1
, RTL_R8(Config1
) | Speed_down
);
4789 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4791 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4793 rtl_disable_clock_request(pdev
);
4795 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4798 static void rtl_hw_start_8168cp_1(struct rtl8169_private
*tp
)
4800 static const struct ephy_info e_info_8168cp
[] = {
4801 { 0x01, 0, 0x0001 },
4802 { 0x02, 0x0800, 0x1000 },
4803 { 0x03, 0, 0x0042 },
4804 { 0x06, 0x0080, 0x0000 },
4808 rtl_csi_access_enable_2(tp
);
4810 rtl_ephy_init(tp
, e_info_8168cp
, ARRAY_SIZE(e_info_8168cp
));
4812 __rtl_hw_start_8168cp(tp
);
4815 static void rtl_hw_start_8168cp_2(struct rtl8169_private
*tp
)
4817 void __iomem
*ioaddr
= tp
->mmio_addr
;
4818 struct pci_dev
*pdev
= tp
->pci_dev
;
4820 rtl_csi_access_enable_2(tp
);
4822 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4824 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4826 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4829 static void rtl_hw_start_8168cp_3(struct rtl8169_private
*tp
)
4831 void __iomem
*ioaddr
= tp
->mmio_addr
;
4832 struct pci_dev
*pdev
= tp
->pci_dev
;
4834 rtl_csi_access_enable_2(tp
);
4836 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4839 RTL_W8(DBG_REG
, 0x20);
4841 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4843 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4845 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4848 static void rtl_hw_start_8168c_1(struct rtl8169_private
*tp
)
4850 void __iomem
*ioaddr
= tp
->mmio_addr
;
4851 static const struct ephy_info e_info_8168c_1
[] = {
4852 { 0x02, 0x0800, 0x1000 },
4853 { 0x03, 0, 0x0002 },
4854 { 0x06, 0x0080, 0x0000 }
4857 rtl_csi_access_enable_2(tp
);
4859 RTL_W8(DBG_REG
, 0x06 | FIX_NAK_1
| FIX_NAK_2
);
4861 rtl_ephy_init(tp
, e_info_8168c_1
, ARRAY_SIZE(e_info_8168c_1
));
4863 __rtl_hw_start_8168cp(tp
);
4866 static void rtl_hw_start_8168c_2(struct rtl8169_private
*tp
)
4868 static const struct ephy_info e_info_8168c_2
[] = {
4869 { 0x01, 0, 0x0001 },
4870 { 0x03, 0x0400, 0x0220 }
4873 rtl_csi_access_enable_2(tp
);
4875 rtl_ephy_init(tp
, e_info_8168c_2
, ARRAY_SIZE(e_info_8168c_2
));
4877 __rtl_hw_start_8168cp(tp
);
4880 static void rtl_hw_start_8168c_3(struct rtl8169_private
*tp
)
4882 rtl_hw_start_8168c_2(tp
);
4885 static void rtl_hw_start_8168c_4(struct rtl8169_private
*tp
)
4887 rtl_csi_access_enable_2(tp
);
4889 __rtl_hw_start_8168cp(tp
);
4892 static void rtl_hw_start_8168d(struct rtl8169_private
*tp
)
4894 void __iomem
*ioaddr
= tp
->mmio_addr
;
4895 struct pci_dev
*pdev
= tp
->pci_dev
;
4897 rtl_csi_access_enable_2(tp
);
4899 rtl_disable_clock_request(pdev
);
4901 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4903 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4905 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4908 static void rtl_hw_start_8168dp(struct rtl8169_private
*tp
)
4910 void __iomem
*ioaddr
= tp
->mmio_addr
;
4911 struct pci_dev
*pdev
= tp
->pci_dev
;
4913 rtl_csi_access_enable_1(tp
);
4915 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4917 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4919 rtl_disable_clock_request(pdev
);
4922 static void rtl_hw_start_8168d_4(struct rtl8169_private
*tp
)
4924 void __iomem
*ioaddr
= tp
->mmio_addr
;
4925 struct pci_dev
*pdev
= tp
->pci_dev
;
4926 static const struct ephy_info e_info_8168d_4
[] = {
4928 { 0x19, 0x20, 0x50 },
4933 rtl_csi_access_enable_1(tp
);
4935 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4937 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4939 for (i
= 0; i
< ARRAY_SIZE(e_info_8168d_4
); i
++) {
4940 const struct ephy_info
*e
= e_info_8168d_4
+ i
;
4943 w
= rtl_ephy_read(tp
, e
->offset
);
4944 rtl_ephy_write(tp
, 0x03, (w
& e
->mask
) | e
->bits
);
4947 rtl_enable_clock_request(pdev
);
4950 static void rtl_hw_start_8168e_1(struct rtl8169_private
*tp
)
4952 void __iomem
*ioaddr
= tp
->mmio_addr
;
4953 struct pci_dev
*pdev
= tp
->pci_dev
;
4954 static const struct ephy_info e_info_8168e_1
[] = {
4955 { 0x00, 0x0200, 0x0100 },
4956 { 0x00, 0x0000, 0x0004 },
4957 { 0x06, 0x0002, 0x0001 },
4958 { 0x06, 0x0000, 0x0030 },
4959 { 0x07, 0x0000, 0x2000 },
4960 { 0x00, 0x0000, 0x0020 },
4961 { 0x03, 0x5800, 0x2000 },
4962 { 0x03, 0x0000, 0x0001 },
4963 { 0x01, 0x0800, 0x1000 },
4964 { 0x07, 0x0000, 0x4000 },
4965 { 0x1e, 0x0000, 0x2000 },
4966 { 0x19, 0xffff, 0xfe6c },
4967 { 0x0a, 0x0000, 0x0040 }
4970 rtl_csi_access_enable_2(tp
);
4972 rtl_ephy_init(tp
, e_info_8168e_1
, ARRAY_SIZE(e_info_8168e_1
));
4974 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4976 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4978 rtl_disable_clock_request(pdev
);
4980 /* Reset tx FIFO pointer */
4981 RTL_W32(MISC
, RTL_R32(MISC
) | TXPLA_RST
);
4982 RTL_W32(MISC
, RTL_R32(MISC
) & ~TXPLA_RST
);
4984 RTL_W8(Config5
, RTL_R8(Config5
) & ~Spi_en
);
4987 static void rtl_hw_start_8168e_2(struct rtl8169_private
*tp
)
4989 void __iomem
*ioaddr
= tp
->mmio_addr
;
4990 struct pci_dev
*pdev
= tp
->pci_dev
;
4991 static const struct ephy_info e_info_8168e_2
[] = {
4992 { 0x09, 0x0000, 0x0080 },
4993 { 0x19, 0x0000, 0x0224 }
4996 rtl_csi_access_enable_1(tp
);
4998 rtl_ephy_init(tp
, e_info_8168e_2
, ARRAY_SIZE(e_info_8168e_2
));
5000 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5002 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5003 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5004 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, 0x00100002, ERIAR_EXGMAC
);
5005 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00100006, ERIAR_EXGMAC
);
5006 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050, ERIAR_EXGMAC
);
5007 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x07ff0060, ERIAR_EXGMAC
);
5008 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_0001
, 0x10, 0x00, ERIAR_EXGMAC
);
5009 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00, ERIAR_EXGMAC
);
5011 RTL_W8(MaxTxPacketSize
, EarlySize
);
5013 rtl_disable_clock_request(pdev
);
5015 RTL_W32(TxConfig
, RTL_R32(TxConfig
) | TXCFG_AUTO_FIFO
);
5016 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
5018 /* Adjust EEE LED frequency */
5019 RTL_W8(EEE_LED
, RTL_R8(EEE_LED
) & ~0x07);
5021 RTL_W8(DLLPR
, RTL_R8(DLLPR
) | PFM_EN
);
5022 RTL_W32(MISC
, RTL_R32(MISC
) | PWM_EN
);
5023 RTL_W8(Config5
, RTL_R8(Config5
) & ~Spi_en
);
5026 static void rtl_hw_start_8168f(struct rtl8169_private
*tp
)
5028 void __iomem
*ioaddr
= tp
->mmio_addr
;
5029 struct pci_dev
*pdev
= tp
->pci_dev
;
5031 rtl_csi_access_enable_2(tp
);
5033 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5035 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5036 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5037 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, 0x00100002, ERIAR_EXGMAC
);
5038 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00100006, ERIAR_EXGMAC
);
5039 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01, ERIAR_EXGMAC
);
5040 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00, ERIAR_EXGMAC
);
5041 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_0001
, 0x10, 0x00, ERIAR_EXGMAC
);
5042 rtl_w1w0_eri(tp
, 0x1d0, ERIAR_MASK_0001
, 0x10, 0x00, ERIAR_EXGMAC
);
5043 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050, ERIAR_EXGMAC
);
5044 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x00000060, ERIAR_EXGMAC
);
5046 RTL_W8(MaxTxPacketSize
, EarlySize
);
5048 rtl_disable_clock_request(pdev
);
5050 RTL_W32(TxConfig
, RTL_R32(TxConfig
) | TXCFG_AUTO_FIFO
);
5051 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
5052 RTL_W8(DLLPR
, RTL_R8(DLLPR
) | PFM_EN
);
5053 RTL_W32(MISC
, RTL_R32(MISC
) | PWM_EN
);
5054 RTL_W8(Config5
, RTL_R8(Config5
) & ~Spi_en
);
5057 static void rtl_hw_start_8168f_1(struct rtl8169_private
*tp
)
5059 void __iomem
*ioaddr
= tp
->mmio_addr
;
5060 static const struct ephy_info e_info_8168f_1
[] = {
5061 { 0x06, 0x00c0, 0x0020 },
5062 { 0x08, 0x0001, 0x0002 },
5063 { 0x09, 0x0000, 0x0080 },
5064 { 0x19, 0x0000, 0x0224 }
5067 rtl_hw_start_8168f(tp
);
5069 rtl_ephy_init(tp
, e_info_8168f_1
, ARRAY_SIZE(e_info_8168f_1
));
5071 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00, ERIAR_EXGMAC
);
5073 /* Adjust EEE LED frequency */
5074 RTL_W8(EEE_LED
, RTL_R8(EEE_LED
) & ~0x07);
5077 static void rtl_hw_start_8411(struct rtl8169_private
*tp
)
5079 static const struct ephy_info e_info_8168f_1
[] = {
5080 { 0x06, 0x00c0, 0x0020 },
5081 { 0x0f, 0xffff, 0x5200 },
5082 { 0x1e, 0x0000, 0x4000 },
5083 { 0x19, 0x0000, 0x0224 }
5086 rtl_hw_start_8168f(tp
);
5088 rtl_ephy_init(tp
, e_info_8168f_1
, ARRAY_SIZE(e_info_8168f_1
));
5090 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0x0000, ERIAR_EXGMAC
);
5093 static void rtl_hw_start_8168g_1(struct rtl8169_private
*tp
)
5095 void __iomem
*ioaddr
= tp
->mmio_addr
;
5096 struct pci_dev
*pdev
= tp
->pci_dev
;
5098 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_0101
, 0x080002, ERIAR_EXGMAC
);
5099 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_0001
, 0x38, ERIAR_EXGMAC
);
5100 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_0001
, 0x48, ERIAR_EXGMAC
);
5101 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00100006, ERIAR_EXGMAC
);
5103 rtl_csi_access_enable_1(tp
);
5105 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5107 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01, ERIAR_EXGMAC
);
5108 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00, ERIAR_EXGMAC
);
5110 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
5111 RTL_W32(MISC
, RTL_R32(MISC
) & ~RXDV_GATED_EN
);
5112 RTL_W8(MaxTxPacketSize
, EarlySize
);
5114 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5115 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5117 /* Adjust EEE LED frequency */
5118 RTL_W8(EEE_LED
, RTL_R8(EEE_LED
) & ~0x07);
5120 rtl_w1w0_eri(tp
, 0x2fc, ERIAR_MASK_0001
, 0x01, 0x02, ERIAR_EXGMAC
);
5123 static void rtl_hw_start_8168(struct net_device
*dev
)
5125 struct rtl8169_private
*tp
= netdev_priv(dev
);
5126 void __iomem
*ioaddr
= tp
->mmio_addr
;
5128 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
5130 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
5132 rtl_set_rx_max_size(ioaddr
, rx_buf_sz
);
5134 tp
->cp_cmd
|= RTL_R16(CPlusCmd
) | PktCntrDisable
| INTT_1
;
5136 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
5138 RTL_W16(IntrMitigate
, 0x5151);
5140 /* Work around for RxFIFO overflow. */
5141 if (tp
->mac_version
== RTL_GIGA_MAC_VER_11
) {
5142 tp
->event_slow
|= RxFIFOOver
| PCSTimeout
;
5143 tp
->event_slow
&= ~RxOverflow
;
5146 rtl_set_rx_tx_desc_registers(tp
, ioaddr
);
5148 rtl_set_rx_mode(dev
);
5150 RTL_W32(TxConfig
, (TX_DMA_BURST
<< TxDMAShift
) |
5151 (InterFrameGap
<< TxInterFrameGapShift
));
5155 switch (tp
->mac_version
) {
5156 case RTL_GIGA_MAC_VER_11
:
5157 rtl_hw_start_8168bb(tp
);
5160 case RTL_GIGA_MAC_VER_12
:
5161 case RTL_GIGA_MAC_VER_17
:
5162 rtl_hw_start_8168bef(tp
);
5165 case RTL_GIGA_MAC_VER_18
:
5166 rtl_hw_start_8168cp_1(tp
);
5169 case RTL_GIGA_MAC_VER_19
:
5170 rtl_hw_start_8168c_1(tp
);
5173 case RTL_GIGA_MAC_VER_20
:
5174 rtl_hw_start_8168c_2(tp
);
5177 case RTL_GIGA_MAC_VER_21
:
5178 rtl_hw_start_8168c_3(tp
);
5181 case RTL_GIGA_MAC_VER_22
:
5182 rtl_hw_start_8168c_4(tp
);
5185 case RTL_GIGA_MAC_VER_23
:
5186 rtl_hw_start_8168cp_2(tp
);
5189 case RTL_GIGA_MAC_VER_24
:
5190 rtl_hw_start_8168cp_3(tp
);
5193 case RTL_GIGA_MAC_VER_25
:
5194 case RTL_GIGA_MAC_VER_26
:
5195 case RTL_GIGA_MAC_VER_27
:
5196 rtl_hw_start_8168d(tp
);
5199 case RTL_GIGA_MAC_VER_28
:
5200 rtl_hw_start_8168d_4(tp
);
5203 case RTL_GIGA_MAC_VER_31
:
5204 rtl_hw_start_8168dp(tp
);
5207 case RTL_GIGA_MAC_VER_32
:
5208 case RTL_GIGA_MAC_VER_33
:
5209 rtl_hw_start_8168e_1(tp
);
5211 case RTL_GIGA_MAC_VER_34
:
5212 rtl_hw_start_8168e_2(tp
);
5215 case RTL_GIGA_MAC_VER_35
:
5216 case RTL_GIGA_MAC_VER_36
:
5217 rtl_hw_start_8168f_1(tp
);
5220 case RTL_GIGA_MAC_VER_38
:
5221 rtl_hw_start_8411(tp
);
5224 case RTL_GIGA_MAC_VER_40
:
5225 case RTL_GIGA_MAC_VER_41
:
5226 rtl_hw_start_8168g_1(tp
);
5230 printk(KERN_ERR PFX
"%s: unknown chipset (mac_version = %d).\n",
5231 dev
->name
, tp
->mac_version
);
5235 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
5237 RTL_W8(Cfg9346
, Cfg9346_Lock
);
5239 RTL_W16(MultiIntr
, RTL_R16(MultiIntr
) & 0xF000);
5242 #define R810X_CPCMD_QUIRK_MASK (\
5253 static void rtl_hw_start_8102e_1(struct rtl8169_private
*tp
)
5255 void __iomem
*ioaddr
= tp
->mmio_addr
;
5256 struct pci_dev
*pdev
= tp
->pci_dev
;
5257 static const struct ephy_info e_info_8102e_1
[] = {
5258 { 0x01, 0, 0x6e65 },
5259 { 0x02, 0, 0x091f },
5260 { 0x03, 0, 0xc2f9 },
5261 { 0x06, 0, 0xafb5 },
5262 { 0x07, 0, 0x0e00 },
5263 { 0x19, 0, 0xec80 },
5264 { 0x01, 0, 0x2e65 },
5269 rtl_csi_access_enable_2(tp
);
5271 RTL_W8(DBG_REG
, FIX_NAK_1
);
5273 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5276 LEDS1
| LEDS0
| Speed_down
| MEMMAP
| IOMAP
| VPD
| PMEnable
);
5277 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
5279 cfg1
= RTL_R8(Config1
);
5280 if ((cfg1
& LEDS0
) && (cfg1
& LEDS1
))
5281 RTL_W8(Config1
, cfg1
& ~LEDS0
);
5283 rtl_ephy_init(tp
, e_info_8102e_1
, ARRAY_SIZE(e_info_8102e_1
));
5286 static void rtl_hw_start_8102e_2(struct rtl8169_private
*tp
)
5288 void __iomem
*ioaddr
= tp
->mmio_addr
;
5289 struct pci_dev
*pdev
= tp
->pci_dev
;
5291 rtl_csi_access_enable_2(tp
);
5293 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5295 RTL_W8(Config1
, MEMMAP
| IOMAP
| VPD
| PMEnable
);
5296 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
5299 static void rtl_hw_start_8102e_3(struct rtl8169_private
*tp
)
5301 rtl_hw_start_8102e_2(tp
);
5303 rtl_ephy_write(tp
, 0x03, 0xc2f9);
5306 static void rtl_hw_start_8105e_1(struct rtl8169_private
*tp
)
5308 void __iomem
*ioaddr
= tp
->mmio_addr
;
5309 static const struct ephy_info e_info_8105e_1
[] = {
5310 { 0x07, 0, 0x4000 },
5311 { 0x19, 0, 0x0200 },
5312 { 0x19, 0, 0x0020 },
5313 { 0x1e, 0, 0x2000 },
5314 { 0x03, 0, 0x0001 },
5315 { 0x19, 0, 0x0100 },
5316 { 0x19, 0, 0x0004 },
5320 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5321 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) | 0x002800);
5323 /* Disable Early Tally Counter */
5324 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) & ~0x010000);
5326 RTL_W8(MCU
, RTL_R8(MCU
) | EN_NDP
| EN_OOB_RESET
);
5327 RTL_W8(DLLPR
, RTL_R8(DLLPR
) | PFM_EN
);
5329 rtl_ephy_init(tp
, e_info_8105e_1
, ARRAY_SIZE(e_info_8105e_1
));
5332 static void rtl_hw_start_8105e_2(struct rtl8169_private
*tp
)
5334 rtl_hw_start_8105e_1(tp
);
5335 rtl_ephy_write(tp
, 0x1e, rtl_ephy_read(tp
, 0x1e) | 0x8000);
5338 static void rtl_hw_start_8402(struct rtl8169_private
*tp
)
5340 void __iomem
*ioaddr
= tp
->mmio_addr
;
5341 static const struct ephy_info e_info_8402
[] = {
5342 { 0x19, 0xffff, 0xff64 },
5346 rtl_csi_access_enable_2(tp
);
5348 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5349 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) | 0x002800);
5351 RTL_W32(TxConfig
, RTL_R32(TxConfig
) | TXCFG_AUTO_FIFO
);
5352 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
5354 rtl_ephy_init(tp
, e_info_8402
, ARRAY_SIZE(e_info_8402
));
5356 rtl_tx_performance_tweak(tp
->pci_dev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5358 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, 0x00000002, ERIAR_EXGMAC
);
5359 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00000006, ERIAR_EXGMAC
);
5360 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01, ERIAR_EXGMAC
);
5361 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00, ERIAR_EXGMAC
);
5362 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5363 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5364 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0e00, 0xff00, ERIAR_EXGMAC
);
5367 static void rtl_hw_start_8106(struct rtl8169_private
*tp
)
5369 void __iomem
*ioaddr
= tp
->mmio_addr
;
5371 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5372 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) | 0x002800);
5374 RTL_W32(MISC
, (RTL_R32(MISC
) | DISABLE_LAN_EN
) & ~EARLY_TALLY_EN
);
5375 RTL_W8(MCU
, RTL_R8(MCU
) | EN_NDP
| EN_OOB_RESET
);
5376 RTL_W8(DLLPR
, RTL_R8(DLLPR
) & ~PFM_EN
);
5379 static void rtl_hw_start_8101(struct net_device
*dev
)
5381 struct rtl8169_private
*tp
= netdev_priv(dev
);
5382 void __iomem
*ioaddr
= tp
->mmio_addr
;
5383 struct pci_dev
*pdev
= tp
->pci_dev
;
5385 if (tp
->mac_version
>= RTL_GIGA_MAC_VER_30
)
5386 tp
->event_slow
&= ~RxFIFOOver
;
5388 if (tp
->mac_version
== RTL_GIGA_MAC_VER_13
||
5389 tp
->mac_version
== RTL_GIGA_MAC_VER_16
)
5390 pcie_capability_set_word(pdev
, PCI_EXP_DEVCTL
,
5391 PCI_EXP_DEVCTL_NOSNOOP_EN
);
5393 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
5395 switch (tp
->mac_version
) {
5396 case RTL_GIGA_MAC_VER_07
:
5397 rtl_hw_start_8102e_1(tp
);
5400 case RTL_GIGA_MAC_VER_08
:
5401 rtl_hw_start_8102e_3(tp
);
5404 case RTL_GIGA_MAC_VER_09
:
5405 rtl_hw_start_8102e_2(tp
);
5408 case RTL_GIGA_MAC_VER_29
:
5409 rtl_hw_start_8105e_1(tp
);
5411 case RTL_GIGA_MAC_VER_30
:
5412 rtl_hw_start_8105e_2(tp
);
5415 case RTL_GIGA_MAC_VER_37
:
5416 rtl_hw_start_8402(tp
);
5419 case RTL_GIGA_MAC_VER_39
:
5420 rtl_hw_start_8106(tp
);
5424 RTL_W8(Cfg9346
, Cfg9346_Lock
);
5426 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
5428 rtl_set_rx_max_size(ioaddr
, rx_buf_sz
);
5430 tp
->cp_cmd
&= ~R810X_CPCMD_QUIRK_MASK
;
5431 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
5433 RTL_W16(IntrMitigate
, 0x0000);
5435 rtl_set_rx_tx_desc_registers(tp
, ioaddr
);
5437 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
5438 rtl_set_rx_tx_config_registers(tp
);
5442 rtl_set_rx_mode(dev
);
5444 RTL_W16(MultiIntr
, RTL_R16(MultiIntr
) & 0xf000);
5447 static int rtl8169_change_mtu(struct net_device
*dev
, int new_mtu
)
5449 struct rtl8169_private
*tp
= netdev_priv(dev
);
5451 if (new_mtu
< ETH_ZLEN
||
5452 new_mtu
> rtl_chip_infos
[tp
->mac_version
].jumbo_max
)
5455 if (new_mtu
> ETH_DATA_LEN
)
5456 rtl_hw_jumbo_enable(tp
);
5458 rtl_hw_jumbo_disable(tp
);
5461 netdev_update_features(dev
);
5466 static inline void rtl8169_make_unusable_by_asic(struct RxDesc
*desc
)
5468 desc
->addr
= cpu_to_le64(0x0badbadbadbadbadull
);
5469 desc
->opts1
&= ~cpu_to_le32(DescOwn
| RsvdMask
);
5472 static void rtl8169_free_rx_databuff(struct rtl8169_private
*tp
,
5473 void **data_buff
, struct RxDesc
*desc
)
5475 dma_unmap_single(&tp
->pci_dev
->dev
, le64_to_cpu(desc
->addr
), rx_buf_sz
,
5480 rtl8169_make_unusable_by_asic(desc
);
5483 static inline void rtl8169_mark_to_asic(struct RxDesc
*desc
, u32 rx_buf_sz
)
5485 u32 eor
= le32_to_cpu(desc
->opts1
) & RingEnd
;
5487 desc
->opts1
= cpu_to_le32(DescOwn
| eor
| rx_buf_sz
);
5490 static inline void rtl8169_map_to_asic(struct RxDesc
*desc
, dma_addr_t mapping
,
5493 desc
->addr
= cpu_to_le64(mapping
);
5495 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
5498 static inline void *rtl8169_align(void *data
)
5500 return (void *)ALIGN((long)data
, 16);
5503 static struct sk_buff
*rtl8169_alloc_rx_data(struct rtl8169_private
*tp
,
5504 struct RxDesc
*desc
)
5508 struct device
*d
= &tp
->pci_dev
->dev
;
5509 struct net_device
*dev
= tp
->dev
;
5510 int node
= dev
->dev
.parent
? dev_to_node(dev
->dev
.parent
) : -1;
5512 data
= kmalloc_node(rx_buf_sz
, GFP_KERNEL
, node
);
5516 if (rtl8169_align(data
) != data
) {
5518 data
= kmalloc_node(rx_buf_sz
+ 15, GFP_KERNEL
, node
);
5523 mapping
= dma_map_single(d
, rtl8169_align(data
), rx_buf_sz
,
5525 if (unlikely(dma_mapping_error(d
, mapping
))) {
5526 if (net_ratelimit())
5527 netif_err(tp
, drv
, tp
->dev
, "Failed to map RX DMA!\n");
5531 rtl8169_map_to_asic(desc
, mapping
, rx_buf_sz
);
5539 static void rtl8169_rx_clear(struct rtl8169_private
*tp
)
5543 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
5544 if (tp
->Rx_databuff
[i
]) {
5545 rtl8169_free_rx_databuff(tp
, tp
->Rx_databuff
+ i
,
5546 tp
->RxDescArray
+ i
);
5551 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc
*desc
)
5553 desc
->opts1
|= cpu_to_le32(RingEnd
);
5556 static int rtl8169_rx_fill(struct rtl8169_private
*tp
)
5560 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
5563 if (tp
->Rx_databuff
[i
])
5566 data
= rtl8169_alloc_rx_data(tp
, tp
->RxDescArray
+ i
);
5568 rtl8169_make_unusable_by_asic(tp
->RxDescArray
+ i
);
5571 tp
->Rx_databuff
[i
] = data
;
5574 rtl8169_mark_as_last_descriptor(tp
->RxDescArray
+ NUM_RX_DESC
- 1);
5578 rtl8169_rx_clear(tp
);
5582 static int rtl8169_init_ring(struct net_device
*dev
)
5584 struct rtl8169_private
*tp
= netdev_priv(dev
);
5586 rtl8169_init_ring_indexes(tp
);
5588 memset(tp
->tx_skb
, 0x0, NUM_TX_DESC
* sizeof(struct ring_info
));
5589 memset(tp
->Rx_databuff
, 0x0, NUM_RX_DESC
* sizeof(void *));
5591 return rtl8169_rx_fill(tp
);
5594 static void rtl8169_unmap_tx_skb(struct device
*d
, struct ring_info
*tx_skb
,
5595 struct TxDesc
*desc
)
5597 unsigned int len
= tx_skb
->len
;
5599 dma_unmap_single(d
, le64_to_cpu(desc
->addr
), len
, DMA_TO_DEVICE
);
5607 static void rtl8169_tx_clear_range(struct rtl8169_private
*tp
, u32 start
,
5612 for (i
= 0; i
< n
; i
++) {
5613 unsigned int entry
= (start
+ i
) % NUM_TX_DESC
;
5614 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
5615 unsigned int len
= tx_skb
->len
;
5618 struct sk_buff
*skb
= tx_skb
->skb
;
5620 rtl8169_unmap_tx_skb(&tp
->pci_dev
->dev
, tx_skb
,
5621 tp
->TxDescArray
+ entry
);
5623 tp
->dev
->stats
.tx_dropped
++;
5631 static void rtl8169_tx_clear(struct rtl8169_private
*tp
)
5633 rtl8169_tx_clear_range(tp
, tp
->dirty_tx
, NUM_TX_DESC
);
5634 tp
->cur_tx
= tp
->dirty_tx
= 0;
5637 static void rtl_reset_work(struct rtl8169_private
*tp
)
5639 struct net_device
*dev
= tp
->dev
;
5642 napi_disable(&tp
->napi
);
5643 netif_stop_queue(dev
);
5644 synchronize_sched();
5646 rtl8169_hw_reset(tp
);
5648 for (i
= 0; i
< NUM_RX_DESC
; i
++)
5649 rtl8169_mark_to_asic(tp
->RxDescArray
+ i
, rx_buf_sz
);
5651 rtl8169_tx_clear(tp
);
5652 rtl8169_init_ring_indexes(tp
);
5654 napi_enable(&tp
->napi
);
5656 netif_wake_queue(dev
);
5657 rtl8169_check_link_status(dev
, tp
, tp
->mmio_addr
);
5660 static void rtl8169_tx_timeout(struct net_device
*dev
)
5662 struct rtl8169_private
*tp
= netdev_priv(dev
);
5664 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
5667 static int rtl8169_xmit_frags(struct rtl8169_private
*tp
, struct sk_buff
*skb
,
5670 struct skb_shared_info
*info
= skb_shinfo(skb
);
5671 unsigned int cur_frag
, entry
;
5672 struct TxDesc
* uninitialized_var(txd
);
5673 struct device
*d
= &tp
->pci_dev
->dev
;
5676 for (cur_frag
= 0; cur_frag
< info
->nr_frags
; cur_frag
++) {
5677 const skb_frag_t
*frag
= info
->frags
+ cur_frag
;
5682 entry
= (entry
+ 1) % NUM_TX_DESC
;
5684 txd
= tp
->TxDescArray
+ entry
;
5685 len
= skb_frag_size(frag
);
5686 addr
= skb_frag_address(frag
);
5687 mapping
= dma_map_single(d
, addr
, len
, DMA_TO_DEVICE
);
5688 if (unlikely(dma_mapping_error(d
, mapping
))) {
5689 if (net_ratelimit())
5690 netif_err(tp
, drv
, tp
->dev
,
5691 "Failed to map TX fragments DMA!\n");
5695 /* Anti gcc 2.95.3 bugware (sic) */
5696 status
= opts
[0] | len
|
5697 (RingEnd
* !((entry
+ 1) % NUM_TX_DESC
));
5699 txd
->opts1
= cpu_to_le32(status
);
5700 txd
->opts2
= cpu_to_le32(opts
[1]);
5701 txd
->addr
= cpu_to_le64(mapping
);
5703 tp
->tx_skb
[entry
].len
= len
;
5707 tp
->tx_skb
[entry
].skb
= skb
;
5708 txd
->opts1
|= cpu_to_le32(LastFrag
);
5714 rtl8169_tx_clear_range(tp
, tp
->cur_tx
+ 1, cur_frag
);
5718 static inline void rtl8169_tso_csum(struct rtl8169_private
*tp
,
5719 struct sk_buff
*skb
, u32
*opts
)
5721 const struct rtl_tx_desc_info
*info
= tx_desc_info
+ tp
->txd_version
;
5722 u32 mss
= skb_shinfo(skb
)->gso_size
;
5723 int offset
= info
->opts_offset
;
5727 opts
[offset
] |= min(mss
, TD_MSS_MAX
) << info
->mss_shift
;
5728 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5729 const struct iphdr
*ip
= ip_hdr(skb
);
5731 if (ip
->protocol
== IPPROTO_TCP
)
5732 opts
[offset
] |= info
->checksum
.tcp
;
5733 else if (ip
->protocol
== IPPROTO_UDP
)
5734 opts
[offset
] |= info
->checksum
.udp
;
5740 static netdev_tx_t
rtl8169_start_xmit(struct sk_buff
*skb
,
5741 struct net_device
*dev
)
5743 struct rtl8169_private
*tp
= netdev_priv(dev
);
5744 unsigned int entry
= tp
->cur_tx
% NUM_TX_DESC
;
5745 struct TxDesc
*txd
= tp
->TxDescArray
+ entry
;
5746 void __iomem
*ioaddr
= tp
->mmio_addr
;
5747 struct device
*d
= &tp
->pci_dev
->dev
;
5753 if (unlikely(!TX_FRAGS_READY_FOR(tp
, skb_shinfo(skb
)->nr_frags
))) {
5754 netif_err(tp
, drv
, dev
, "BUG! Tx Ring full when queue awake!\n");
5758 if (unlikely(le32_to_cpu(txd
->opts1
) & DescOwn
))
5761 len
= skb_headlen(skb
);
5762 mapping
= dma_map_single(d
, skb
->data
, len
, DMA_TO_DEVICE
);
5763 if (unlikely(dma_mapping_error(d
, mapping
))) {
5764 if (net_ratelimit())
5765 netif_err(tp
, drv
, dev
, "Failed to map TX DMA!\n");
5769 tp
->tx_skb
[entry
].len
= len
;
5770 txd
->addr
= cpu_to_le64(mapping
);
5772 opts
[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp
, skb
));
5775 rtl8169_tso_csum(tp
, skb
, opts
);
5777 frags
= rtl8169_xmit_frags(tp
, skb
, opts
);
5781 opts
[0] |= FirstFrag
;
5783 opts
[0] |= FirstFrag
| LastFrag
;
5784 tp
->tx_skb
[entry
].skb
= skb
;
5787 txd
->opts2
= cpu_to_le32(opts
[1]);
5789 skb_tx_timestamp(skb
);
5793 /* Anti gcc 2.95.3 bugware (sic) */
5794 status
= opts
[0] | len
| (RingEnd
* !((entry
+ 1) % NUM_TX_DESC
));
5795 txd
->opts1
= cpu_to_le32(status
);
5797 tp
->cur_tx
+= frags
+ 1;
5801 RTL_W8(TxPoll
, NPQ
);
5805 if (!TX_FRAGS_READY_FOR(tp
, MAX_SKB_FRAGS
)) {
5806 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5807 * not miss a ring update when it notices a stopped queue.
5810 netif_stop_queue(dev
);
5811 /* Sync with rtl_tx:
5812 * - publish queue status and cur_tx ring index (write barrier)
5813 * - refresh dirty_tx ring index (read barrier).
5814 * May the current thread have a pessimistic view of the ring
5815 * status and forget to wake up queue, a racing rtl_tx thread
5819 if (TX_FRAGS_READY_FOR(tp
, MAX_SKB_FRAGS
))
5820 netif_wake_queue(dev
);
5823 return NETDEV_TX_OK
;
5826 rtl8169_unmap_tx_skb(d
, tp
->tx_skb
+ entry
, txd
);
5829 dev
->stats
.tx_dropped
++;
5830 return NETDEV_TX_OK
;
5833 netif_stop_queue(dev
);
5834 dev
->stats
.tx_dropped
++;
5835 return NETDEV_TX_BUSY
;
5838 static void rtl8169_pcierr_interrupt(struct net_device
*dev
)
5840 struct rtl8169_private
*tp
= netdev_priv(dev
);
5841 struct pci_dev
*pdev
= tp
->pci_dev
;
5842 u16 pci_status
, pci_cmd
;
5844 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
5845 pci_read_config_word(pdev
, PCI_STATUS
, &pci_status
);
5847 netif_err(tp
, intr
, dev
, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5848 pci_cmd
, pci_status
);
5851 * The recovery sequence below admits a very elaborated explanation:
5852 * - it seems to work;
5853 * - I did not see what else could be done;
5854 * - it makes iop3xx happy.
5856 * Feel free to adjust to your needs.
5858 if (pdev
->broken_parity_status
)
5859 pci_cmd
&= ~PCI_COMMAND_PARITY
;
5861 pci_cmd
|= PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
;
5863 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
5865 pci_write_config_word(pdev
, PCI_STATUS
,
5866 pci_status
& (PCI_STATUS_DETECTED_PARITY
|
5867 PCI_STATUS_SIG_SYSTEM_ERROR
| PCI_STATUS_REC_MASTER_ABORT
|
5868 PCI_STATUS_REC_TARGET_ABORT
| PCI_STATUS_SIG_TARGET_ABORT
));
5870 /* The infamous DAC f*ckup only happens at boot time */
5871 if ((tp
->cp_cmd
& PCIDAC
) && !tp
->dirty_rx
&& !tp
->cur_rx
) {
5872 void __iomem
*ioaddr
= tp
->mmio_addr
;
5874 netif_info(tp
, intr
, dev
, "disabling PCI DAC\n");
5875 tp
->cp_cmd
&= ~PCIDAC
;
5876 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
5877 dev
->features
&= ~NETIF_F_HIGHDMA
;
5880 rtl8169_hw_reset(tp
);
5882 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
5885 static void rtl_tx(struct net_device
*dev
, struct rtl8169_private
*tp
)
5887 unsigned int dirty_tx
, tx_left
;
5889 dirty_tx
= tp
->dirty_tx
;
5891 tx_left
= tp
->cur_tx
- dirty_tx
;
5893 while (tx_left
> 0) {
5894 unsigned int entry
= dirty_tx
% NUM_TX_DESC
;
5895 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
5899 status
= le32_to_cpu(tp
->TxDescArray
[entry
].opts1
);
5900 if (status
& DescOwn
)
5903 rtl8169_unmap_tx_skb(&tp
->pci_dev
->dev
, tx_skb
,
5904 tp
->TxDescArray
+ entry
);
5905 if (status
& LastFrag
) {
5906 u64_stats_update_begin(&tp
->tx_stats
.syncp
);
5907 tp
->tx_stats
.packets
++;
5908 tp
->tx_stats
.bytes
+= tx_skb
->skb
->len
;
5909 u64_stats_update_end(&tp
->tx_stats
.syncp
);
5910 dev_kfree_skb(tx_skb
->skb
);
5917 if (tp
->dirty_tx
!= dirty_tx
) {
5918 tp
->dirty_tx
= dirty_tx
;
5919 /* Sync with rtl8169_start_xmit:
5920 * - publish dirty_tx ring index (write barrier)
5921 * - refresh cur_tx ring index and queue status (read barrier)
5922 * May the current thread miss the stopped queue condition,
5923 * a racing xmit thread can only have a right view of the
5927 if (netif_queue_stopped(dev
) &&
5928 TX_FRAGS_READY_FOR(tp
, MAX_SKB_FRAGS
)) {
5929 netif_wake_queue(dev
);
5932 * 8168 hack: TxPoll requests are lost when the Tx packets are
5933 * too close. Let's kick an extra TxPoll request when a burst
5934 * of start_xmit activity is detected (if it is not detected,
5935 * it is slow enough). -- FR
5937 if (tp
->cur_tx
!= dirty_tx
) {
5938 void __iomem
*ioaddr
= tp
->mmio_addr
;
5940 RTL_W8(TxPoll
, NPQ
);
5945 static inline int rtl8169_fragmented_frame(u32 status
)
5947 return (status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
);
5950 static inline void rtl8169_rx_csum(struct sk_buff
*skb
, u32 opts1
)
5952 u32 status
= opts1
& RxProtoMask
;
5954 if (((status
== RxProtoTCP
) && !(opts1
& TCPFail
)) ||
5955 ((status
== RxProtoUDP
) && !(opts1
& UDPFail
)))
5956 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5958 skb_checksum_none_assert(skb
);
5961 static struct sk_buff
*rtl8169_try_rx_copy(void *data
,
5962 struct rtl8169_private
*tp
,
5966 struct sk_buff
*skb
;
5967 struct device
*d
= &tp
->pci_dev
->dev
;
5969 data
= rtl8169_align(data
);
5970 dma_sync_single_for_cpu(d
, addr
, pkt_size
, DMA_FROM_DEVICE
);
5972 skb
= netdev_alloc_skb_ip_align(tp
->dev
, pkt_size
);
5974 memcpy(skb
->data
, data
, pkt_size
);
5975 dma_sync_single_for_device(d
, addr
, pkt_size
, DMA_FROM_DEVICE
);
5980 static int rtl_rx(struct net_device
*dev
, struct rtl8169_private
*tp
, u32 budget
)
5982 unsigned int cur_rx
, rx_left
;
5985 cur_rx
= tp
->cur_rx
;
5986 rx_left
= NUM_RX_DESC
+ tp
->dirty_rx
- cur_rx
;
5987 rx_left
= min(rx_left
, budget
);
5989 for (; rx_left
> 0; rx_left
--, cur_rx
++) {
5990 unsigned int entry
= cur_rx
% NUM_RX_DESC
;
5991 struct RxDesc
*desc
= tp
->RxDescArray
+ entry
;
5995 status
= le32_to_cpu(desc
->opts1
) & tp
->opts1_mask
;
5997 if (status
& DescOwn
)
5999 if (unlikely(status
& RxRES
)) {
6000 netif_info(tp
, rx_err
, dev
, "Rx ERROR. status = %08x\n",
6002 dev
->stats
.rx_errors
++;
6003 if (status
& (RxRWT
| RxRUNT
))
6004 dev
->stats
.rx_length_errors
++;
6006 dev
->stats
.rx_crc_errors
++;
6007 if (status
& RxFOVF
) {
6008 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
6009 dev
->stats
.rx_fifo_errors
++;
6011 if ((status
& (RxRUNT
| RxCRC
)) &&
6012 !(status
& (RxRWT
| RxFOVF
)) &&
6013 (dev
->features
& NETIF_F_RXALL
))
6016 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
6018 struct sk_buff
*skb
;
6023 addr
= le64_to_cpu(desc
->addr
);
6024 if (likely(!(dev
->features
& NETIF_F_RXFCS
)))
6025 pkt_size
= (status
& 0x00003fff) - 4;
6027 pkt_size
= status
& 0x00003fff;
6030 * The driver does not support incoming fragmented
6031 * frames. They are seen as a symptom of over-mtu
6034 if (unlikely(rtl8169_fragmented_frame(status
))) {
6035 dev
->stats
.rx_dropped
++;
6036 dev
->stats
.rx_length_errors
++;
6037 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
6041 skb
= rtl8169_try_rx_copy(tp
->Rx_databuff
[entry
],
6042 tp
, pkt_size
, addr
);
6043 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
6045 dev
->stats
.rx_dropped
++;
6049 rtl8169_rx_csum(skb
, status
);
6050 skb_put(skb
, pkt_size
);
6051 skb
->protocol
= eth_type_trans(skb
, dev
);
6053 rtl8169_rx_vlan_tag(desc
, skb
);
6055 napi_gro_receive(&tp
->napi
, skb
);
6057 u64_stats_update_begin(&tp
->rx_stats
.syncp
);
6058 tp
->rx_stats
.packets
++;
6059 tp
->rx_stats
.bytes
+= pkt_size
;
6060 u64_stats_update_end(&tp
->rx_stats
.syncp
);
6063 /* Work around for AMD plateform. */
6064 if ((desc
->opts2
& cpu_to_le32(0xfffe000)) &&
6065 (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)) {
6071 count
= cur_rx
- tp
->cur_rx
;
6072 tp
->cur_rx
= cur_rx
;
6074 tp
->dirty_rx
+= count
;
6079 static irqreturn_t
rtl8169_interrupt(int irq
, void *dev_instance
)
6081 struct net_device
*dev
= dev_instance
;
6082 struct rtl8169_private
*tp
= netdev_priv(dev
);
6086 status
= rtl_get_events(tp
);
6087 if (status
&& status
!= 0xffff) {
6088 status
&= RTL_EVENT_NAPI
| tp
->event_slow
;
6092 rtl_irq_disable(tp
);
6093 napi_schedule(&tp
->napi
);
6096 return IRQ_RETVAL(handled
);
6100 * Workqueue context.
6102 static void rtl_slow_event_work(struct rtl8169_private
*tp
)
6104 struct net_device
*dev
= tp
->dev
;
6107 status
= rtl_get_events(tp
) & tp
->event_slow
;
6108 rtl_ack_events(tp
, status
);
6110 if (unlikely(status
& RxFIFOOver
)) {
6111 switch (tp
->mac_version
) {
6112 /* Work around for rx fifo overflow */
6113 case RTL_GIGA_MAC_VER_11
:
6114 netif_stop_queue(dev
);
6115 /* XXX - Hack alert. See rtl_task(). */
6116 set_bit(RTL_FLAG_TASK_RESET_PENDING
, tp
->wk
.flags
);
6122 if (unlikely(status
& SYSErr
))
6123 rtl8169_pcierr_interrupt(dev
);
6125 if (status
& LinkChg
)
6126 __rtl8169_check_link_status(dev
, tp
, tp
->mmio_addr
, true);
6128 rtl_irq_enable_all(tp
);
6131 static void rtl_task(struct work_struct
*work
)
6133 static const struct {
6135 void (*action
)(struct rtl8169_private
*);
6137 /* XXX - keep rtl_slow_event_work() as first element. */
6138 { RTL_FLAG_TASK_SLOW_PENDING
, rtl_slow_event_work
},
6139 { RTL_FLAG_TASK_RESET_PENDING
, rtl_reset_work
},
6140 { RTL_FLAG_TASK_PHY_PENDING
, rtl_phy_work
}
6142 struct rtl8169_private
*tp
=
6143 container_of(work
, struct rtl8169_private
, wk
.work
);
6144 struct net_device
*dev
= tp
->dev
;
6149 if (!netif_running(dev
) ||
6150 !test_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
))
6153 for (i
= 0; i
< ARRAY_SIZE(rtl_work
); i
++) {
6156 pending
= test_and_clear_bit(rtl_work
[i
].bitnr
, tp
->wk
.flags
);
6158 rtl_work
[i
].action(tp
);
6162 rtl_unlock_work(tp
);
6165 static int rtl8169_poll(struct napi_struct
*napi
, int budget
)
6167 struct rtl8169_private
*tp
= container_of(napi
, struct rtl8169_private
, napi
);
6168 struct net_device
*dev
= tp
->dev
;
6169 u16 enable_mask
= RTL_EVENT_NAPI
| tp
->event_slow
;
6173 status
= rtl_get_events(tp
);
6174 rtl_ack_events(tp
, status
& ~tp
->event_slow
);
6176 if (status
& RTL_EVENT_NAPI_RX
)
6177 work_done
= rtl_rx(dev
, tp
, (u32
) budget
);
6179 if (status
& RTL_EVENT_NAPI_TX
)
6182 if (status
& tp
->event_slow
) {
6183 enable_mask
&= ~tp
->event_slow
;
6185 rtl_schedule_task(tp
, RTL_FLAG_TASK_SLOW_PENDING
);
6188 if (work_done
< budget
) {
6189 napi_complete(napi
);
6191 rtl_irq_enable(tp
, enable_mask
);
6198 static void rtl8169_rx_missed(struct net_device
*dev
, void __iomem
*ioaddr
)
6200 struct rtl8169_private
*tp
= netdev_priv(dev
);
6202 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
)
6205 dev
->stats
.rx_missed_errors
+= (RTL_R32(RxMissed
) & 0xffffff);
6206 RTL_W32(RxMissed
, 0);
6209 static void rtl8169_down(struct net_device
*dev
)
6211 struct rtl8169_private
*tp
= netdev_priv(dev
);
6212 void __iomem
*ioaddr
= tp
->mmio_addr
;
6214 del_timer_sync(&tp
->timer
);
6216 napi_disable(&tp
->napi
);
6217 netif_stop_queue(dev
);
6219 rtl8169_hw_reset(tp
);
6221 * At this point device interrupts can not be enabled in any function,
6222 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6223 * and napi is disabled (rtl8169_poll).
6225 rtl8169_rx_missed(dev
, ioaddr
);
6227 /* Give a racing hard_start_xmit a few cycles to complete. */
6228 synchronize_sched();
6230 rtl8169_tx_clear(tp
);
6232 rtl8169_rx_clear(tp
);
6234 rtl_pll_power_down(tp
);
6237 static int rtl8169_close(struct net_device
*dev
)
6239 struct rtl8169_private
*tp
= netdev_priv(dev
);
6240 struct pci_dev
*pdev
= tp
->pci_dev
;
6242 pm_runtime_get_sync(&pdev
->dev
);
6244 /* Update counters before going down */
6245 rtl8169_update_counters(dev
);
6248 clear_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6251 rtl_unlock_work(tp
);
6253 free_irq(pdev
->irq
, dev
);
6255 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
6257 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
6259 tp
->TxDescArray
= NULL
;
6260 tp
->RxDescArray
= NULL
;
6262 pm_runtime_put_sync(&pdev
->dev
);
6267 #ifdef CONFIG_NET_POLL_CONTROLLER
6268 static void rtl8169_netpoll(struct net_device
*dev
)
6270 struct rtl8169_private
*tp
= netdev_priv(dev
);
6272 rtl8169_interrupt(tp
->pci_dev
->irq
, dev
);
6276 static int rtl_open(struct net_device
*dev
)
6278 struct rtl8169_private
*tp
= netdev_priv(dev
);
6279 void __iomem
*ioaddr
= tp
->mmio_addr
;
6280 struct pci_dev
*pdev
= tp
->pci_dev
;
6281 int retval
= -ENOMEM
;
6283 pm_runtime_get_sync(&pdev
->dev
);
6286 * Rx and Tx descriptors needs 256 bytes alignment.
6287 * dma_alloc_coherent provides more.
6289 tp
->TxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
,
6290 &tp
->TxPhyAddr
, GFP_KERNEL
);
6291 if (!tp
->TxDescArray
)
6292 goto err_pm_runtime_put
;
6294 tp
->RxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
,
6295 &tp
->RxPhyAddr
, GFP_KERNEL
);
6296 if (!tp
->RxDescArray
)
6299 retval
= rtl8169_init_ring(dev
);
6303 INIT_WORK(&tp
->wk
.work
, rtl_task
);
6307 rtl_request_firmware(tp
);
6309 retval
= request_irq(pdev
->irq
, rtl8169_interrupt
,
6310 (tp
->features
& RTL_FEATURE_MSI
) ? 0 : IRQF_SHARED
,
6313 goto err_release_fw_2
;
6317 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6319 napi_enable(&tp
->napi
);
6321 rtl8169_init_phy(dev
, tp
);
6323 __rtl8169_set_features(dev
, dev
->features
);
6325 rtl_pll_power_up(tp
);
6329 netif_start_queue(dev
);
6331 rtl_unlock_work(tp
);
6333 tp
->saved_wolopts
= 0;
6334 pm_runtime_put_noidle(&pdev
->dev
);
6336 rtl8169_check_link_status(dev
, tp
, ioaddr
);
6341 rtl_release_firmware(tp
);
6342 rtl8169_rx_clear(tp
);
6344 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
6346 tp
->RxDescArray
= NULL
;
6348 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
6350 tp
->TxDescArray
= NULL
;
6352 pm_runtime_put_noidle(&pdev
->dev
);
6356 static struct rtnl_link_stats64
*
6357 rtl8169_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6359 struct rtl8169_private
*tp
= netdev_priv(dev
);
6360 void __iomem
*ioaddr
= tp
->mmio_addr
;
6363 if (netif_running(dev
))
6364 rtl8169_rx_missed(dev
, ioaddr
);
6367 start
= u64_stats_fetch_begin_bh(&tp
->rx_stats
.syncp
);
6368 stats
->rx_packets
= tp
->rx_stats
.packets
;
6369 stats
->rx_bytes
= tp
->rx_stats
.bytes
;
6370 } while (u64_stats_fetch_retry_bh(&tp
->rx_stats
.syncp
, start
));
6374 start
= u64_stats_fetch_begin_bh(&tp
->tx_stats
.syncp
);
6375 stats
->tx_packets
= tp
->tx_stats
.packets
;
6376 stats
->tx_bytes
= tp
->tx_stats
.bytes
;
6377 } while (u64_stats_fetch_retry_bh(&tp
->tx_stats
.syncp
, start
));
6379 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
6380 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
6381 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
6382 stats
->rx_errors
= dev
->stats
.rx_errors
;
6383 stats
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
6384 stats
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
6385 stats
->rx_missed_errors
= dev
->stats
.rx_missed_errors
;
6390 static void rtl8169_net_suspend(struct net_device
*dev
)
6392 struct rtl8169_private
*tp
= netdev_priv(dev
);
6394 if (!netif_running(dev
))
6397 netif_device_detach(dev
);
6398 netif_stop_queue(dev
);
6401 napi_disable(&tp
->napi
);
6402 clear_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6403 rtl_unlock_work(tp
);
6405 rtl_pll_power_down(tp
);
6410 static int rtl8169_suspend(struct device
*device
)
6412 struct pci_dev
*pdev
= to_pci_dev(device
);
6413 struct net_device
*dev
= pci_get_drvdata(pdev
);
6415 rtl8169_net_suspend(dev
);
6420 static void __rtl8169_resume(struct net_device
*dev
)
6422 struct rtl8169_private
*tp
= netdev_priv(dev
);
6424 netif_device_attach(dev
);
6426 rtl_pll_power_up(tp
);
6429 napi_enable(&tp
->napi
);
6430 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6431 rtl_unlock_work(tp
);
6433 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
6436 static int rtl8169_resume(struct device
*device
)
6438 struct pci_dev
*pdev
= to_pci_dev(device
);
6439 struct net_device
*dev
= pci_get_drvdata(pdev
);
6440 struct rtl8169_private
*tp
= netdev_priv(dev
);
6442 rtl8169_init_phy(dev
, tp
);
6444 if (netif_running(dev
))
6445 __rtl8169_resume(dev
);
6450 static int rtl8169_runtime_suspend(struct device
*device
)
6452 struct pci_dev
*pdev
= to_pci_dev(device
);
6453 struct net_device
*dev
= pci_get_drvdata(pdev
);
6454 struct rtl8169_private
*tp
= netdev_priv(dev
);
6456 if (!tp
->TxDescArray
)
6460 tp
->saved_wolopts
= __rtl8169_get_wol(tp
);
6461 __rtl8169_set_wol(tp
, WAKE_ANY
);
6462 rtl_unlock_work(tp
);
6464 rtl8169_net_suspend(dev
);
6469 static int rtl8169_runtime_resume(struct device
*device
)
6471 struct pci_dev
*pdev
= to_pci_dev(device
);
6472 struct net_device
*dev
= pci_get_drvdata(pdev
);
6473 struct rtl8169_private
*tp
= netdev_priv(dev
);
6475 if (!tp
->TxDescArray
)
6479 __rtl8169_set_wol(tp
, tp
->saved_wolopts
);
6480 tp
->saved_wolopts
= 0;
6481 rtl_unlock_work(tp
);
6483 rtl8169_init_phy(dev
, tp
);
6485 __rtl8169_resume(dev
);
6490 static int rtl8169_runtime_idle(struct device
*device
)
6492 struct pci_dev
*pdev
= to_pci_dev(device
);
6493 struct net_device
*dev
= pci_get_drvdata(pdev
);
6494 struct rtl8169_private
*tp
= netdev_priv(dev
);
6496 return tp
->TxDescArray
? -EBUSY
: 0;
6499 static const struct dev_pm_ops rtl8169_pm_ops
= {
6500 .suspend
= rtl8169_suspend
,
6501 .resume
= rtl8169_resume
,
6502 .freeze
= rtl8169_suspend
,
6503 .thaw
= rtl8169_resume
,
6504 .poweroff
= rtl8169_suspend
,
6505 .restore
= rtl8169_resume
,
6506 .runtime_suspend
= rtl8169_runtime_suspend
,
6507 .runtime_resume
= rtl8169_runtime_resume
,
6508 .runtime_idle
= rtl8169_runtime_idle
,
6511 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6513 #else /* !CONFIG_PM */
6515 #define RTL8169_PM_OPS NULL
6517 #endif /* !CONFIG_PM */
6519 static void rtl_wol_shutdown_quirk(struct rtl8169_private
*tp
)
6521 void __iomem
*ioaddr
= tp
->mmio_addr
;
6523 /* WoL fails with 8168b when the receiver is disabled. */
6524 switch (tp
->mac_version
) {
6525 case RTL_GIGA_MAC_VER_11
:
6526 case RTL_GIGA_MAC_VER_12
:
6527 case RTL_GIGA_MAC_VER_17
:
6528 pci_clear_master(tp
->pci_dev
);
6530 RTL_W8(ChipCmd
, CmdRxEnb
);
6539 static void rtl_shutdown(struct pci_dev
*pdev
)
6541 struct net_device
*dev
= pci_get_drvdata(pdev
);
6542 struct rtl8169_private
*tp
= netdev_priv(dev
);
6543 struct device
*d
= &pdev
->dev
;
6545 pm_runtime_get_sync(d
);
6547 rtl8169_net_suspend(dev
);
6549 /* Restore original MAC address */
6550 rtl_rar_set(tp
, dev
->perm_addr
);
6552 rtl8169_hw_reset(tp
);
6554 if (system_state
== SYSTEM_POWER_OFF
) {
6555 if (__rtl8169_get_wol(tp
) & WAKE_ANY
) {
6556 rtl_wol_suspend_quirk(tp
);
6557 rtl_wol_shutdown_quirk(tp
);
6560 pci_wake_from_d3(pdev
, true);
6561 pci_set_power_state(pdev
, PCI_D3hot
);
6564 pm_runtime_put_noidle(d
);
6567 static void __devexit
rtl_remove_one(struct pci_dev
*pdev
)
6569 struct net_device
*dev
= pci_get_drvdata(pdev
);
6570 struct rtl8169_private
*tp
= netdev_priv(dev
);
6572 if (tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
6573 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
6574 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) {
6575 rtl8168_driver_stop(tp
);
6578 cancel_work_sync(&tp
->wk
.work
);
6580 netif_napi_del(&tp
->napi
);
6582 unregister_netdev(dev
);
6584 rtl_release_firmware(tp
);
6586 if (pci_dev_run_wake(pdev
))
6587 pm_runtime_get_noresume(&pdev
->dev
);
6589 /* restore original MAC address */
6590 rtl_rar_set(tp
, dev
->perm_addr
);
6592 rtl_disable_msi(pdev
, tp
);
6593 rtl8169_release_board(pdev
, dev
, tp
->mmio_addr
);
6594 pci_set_drvdata(pdev
, NULL
);
6597 static const struct net_device_ops rtl_netdev_ops
= {
6598 .ndo_open
= rtl_open
,
6599 .ndo_stop
= rtl8169_close
,
6600 .ndo_get_stats64
= rtl8169_get_stats64
,
6601 .ndo_start_xmit
= rtl8169_start_xmit
,
6602 .ndo_tx_timeout
= rtl8169_tx_timeout
,
6603 .ndo_validate_addr
= eth_validate_addr
,
6604 .ndo_change_mtu
= rtl8169_change_mtu
,
6605 .ndo_fix_features
= rtl8169_fix_features
,
6606 .ndo_set_features
= rtl8169_set_features
,
6607 .ndo_set_mac_address
= rtl_set_mac_address
,
6608 .ndo_do_ioctl
= rtl8169_ioctl
,
6609 .ndo_set_rx_mode
= rtl_set_rx_mode
,
6610 #ifdef CONFIG_NET_POLL_CONTROLLER
6611 .ndo_poll_controller
= rtl8169_netpoll
,
6616 static const struct rtl_cfg_info
{
6617 void (*hw_start
)(struct net_device
*);
6618 unsigned int region
;
6623 } rtl_cfg_infos
[] = {
6625 .hw_start
= rtl_hw_start_8169
,
6628 .event_slow
= SYSErr
| LinkChg
| RxOverflow
| RxFIFOOver
,
6629 .features
= RTL_FEATURE_GMII
,
6630 .default_ver
= RTL_GIGA_MAC_VER_01
,
6633 .hw_start
= rtl_hw_start_8168
,
6636 .event_slow
= SYSErr
| LinkChg
| RxOverflow
,
6637 .features
= RTL_FEATURE_GMII
| RTL_FEATURE_MSI
,
6638 .default_ver
= RTL_GIGA_MAC_VER_11
,
6641 .hw_start
= rtl_hw_start_8101
,
6644 .event_slow
= SYSErr
| LinkChg
| RxOverflow
| RxFIFOOver
|
6646 .features
= RTL_FEATURE_MSI
,
6647 .default_ver
= RTL_GIGA_MAC_VER_13
,
6651 /* Cfg9346_Unlock assumed. */
6652 static unsigned rtl_try_msi(struct rtl8169_private
*tp
,
6653 const struct rtl_cfg_info
*cfg
)
6655 void __iomem
*ioaddr
= tp
->mmio_addr
;
6659 cfg2
= RTL_R8(Config2
) & ~MSIEnable
;
6660 if (cfg
->features
& RTL_FEATURE_MSI
) {
6661 if (pci_enable_msi(tp
->pci_dev
)) {
6662 netif_info(tp
, hw
, tp
->dev
, "no MSI. Back to INTx.\n");
6665 msi
= RTL_FEATURE_MSI
;
6668 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
6669 RTL_W8(Config2
, cfg2
);
6673 DECLARE_RTL_COND(rtl_link_list_ready_cond
)
6675 void __iomem
*ioaddr
= tp
->mmio_addr
;
6677 return RTL_R8(MCU
) & LINK_LIST_RDY
;
6680 DECLARE_RTL_COND(rtl_rxtx_empty_cond
)
6682 void __iomem
*ioaddr
= tp
->mmio_addr
;
6684 return (RTL_R8(MCU
) & RXTX_EMPTY
) == RXTX_EMPTY
;
6687 static void __devinit
rtl_hw_init_8168g(struct rtl8169_private
*tp
)
6689 void __iomem
*ioaddr
= tp
->mmio_addr
;
6692 tp
->ocp_base
= OCP_STD_PHY_BASE
;
6694 RTL_W32(MISC
, RTL_R32(MISC
) | RXDV_GATED_EN
);
6696 if (!rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 42))
6699 if (!rtl_udelay_loop_wait_high(tp
, &rtl_rxtx_empty_cond
, 100, 42))
6702 RTL_W8(ChipCmd
, RTL_R8(ChipCmd
) & ~(CmdTxEnb
| CmdRxEnb
));
6704 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
6706 data
= r8168_mac_ocp_read(tp
, 0xe8de);
6708 r8168_mac_ocp_write(tp
, 0xe8de, data
);
6710 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
6713 data
= r8168_mac_ocp_read(tp
, 0xe8de);
6715 r8168_mac_ocp_write(tp
, 0xe8de, data
);
6717 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
6721 static void __devinit
rtl_hw_initialize(struct rtl8169_private
*tp
)
6723 switch (tp
->mac_version
) {
6724 case RTL_GIGA_MAC_VER_40
:
6725 case RTL_GIGA_MAC_VER_41
:
6726 rtl_hw_init_8168g(tp
);
6734 static int __devinit
6735 rtl_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6737 const struct rtl_cfg_info
*cfg
= rtl_cfg_infos
+ ent
->driver_data
;
6738 const unsigned int region
= cfg
->region
;
6739 struct rtl8169_private
*tp
;
6740 struct mii_if_info
*mii
;
6741 struct net_device
*dev
;
6742 void __iomem
*ioaddr
;
6746 if (netif_msg_drv(&debug
)) {
6747 printk(KERN_INFO
"%s Gigabit Ethernet driver %s loaded\n",
6748 MODULENAME
, RTL8169_VERSION
);
6751 dev
= alloc_etherdev(sizeof (*tp
));
6757 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6758 dev
->netdev_ops
= &rtl_netdev_ops
;
6759 tp
= netdev_priv(dev
);
6762 tp
->msg_enable
= netif_msg_init(debug
.msg_enable
, R8169_MSG_DEFAULT
);
6766 mii
->mdio_read
= rtl_mdio_read
;
6767 mii
->mdio_write
= rtl_mdio_write
;
6768 mii
->phy_id_mask
= 0x1f;
6769 mii
->reg_num_mask
= 0x1f;
6770 mii
->supports_gmii
= !!(cfg
->features
& RTL_FEATURE_GMII
);
6772 /* disable ASPM completely as that cause random device stop working
6773 * problems as well as full system hangs for some PCIe devices users */
6774 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
|
6775 PCIE_LINK_STATE_CLKPM
);
6777 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6778 rc
= pci_enable_device(pdev
);
6780 netif_err(tp
, probe
, dev
, "enable failure\n");
6781 goto err_out_free_dev_1
;
6784 if (pci_set_mwi(pdev
) < 0)
6785 netif_info(tp
, probe
, dev
, "Mem-Wr-Inval unavailable\n");
6787 /* make sure PCI base addr 1 is MMIO */
6788 if (!(pci_resource_flags(pdev
, region
) & IORESOURCE_MEM
)) {
6789 netif_err(tp
, probe
, dev
,
6790 "region #%d not an MMIO resource, aborting\n",
6796 /* check for weird/broken PCI region reporting */
6797 if (pci_resource_len(pdev
, region
) < R8169_REGS_SIZE
) {
6798 netif_err(tp
, probe
, dev
,
6799 "Invalid PCI region size(s), aborting\n");
6804 rc
= pci_request_regions(pdev
, MODULENAME
);
6806 netif_err(tp
, probe
, dev
, "could not request regions\n");
6810 tp
->cp_cmd
= RxChkSum
;
6812 if ((sizeof(dma_addr_t
) > 4) &&
6813 !pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) && use_dac
) {
6814 tp
->cp_cmd
|= PCIDAC
;
6815 dev
->features
|= NETIF_F_HIGHDMA
;
6817 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
6819 netif_err(tp
, probe
, dev
, "DMA configuration failed\n");
6820 goto err_out_free_res_3
;
6824 /* ioremap MMIO region */
6825 ioaddr
= ioremap(pci_resource_start(pdev
, region
), R8169_REGS_SIZE
);
6827 netif_err(tp
, probe
, dev
, "cannot remap MMIO, aborting\n");
6829 goto err_out_free_res_3
;
6831 tp
->mmio_addr
= ioaddr
;
6833 if (!pci_is_pcie(pdev
))
6834 netif_info(tp
, probe
, dev
, "not PCI Express\n");
6836 /* Identify chip attached to board */
6837 rtl8169_get_mac_version(tp
, dev
, cfg
->default_ver
);
6841 rtl_irq_disable(tp
);
6843 rtl_hw_initialize(tp
);
6847 rtl_ack_events(tp
, 0xffff);
6849 pci_set_master(pdev
);
6852 * Pretend we are using VLANs; This bypasses a nasty bug where
6853 * Interrupts stop flowing on high load on 8110SCd controllers.
6855 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
6856 tp
->cp_cmd
|= RxVlan
;
6858 rtl_init_mdio_ops(tp
);
6859 rtl_init_pll_power_ops(tp
);
6860 rtl_init_jumbo_ops(tp
);
6861 rtl_init_csi_ops(tp
);
6863 rtl8169_print_mac_version(tp
);
6865 chipset
= tp
->mac_version
;
6866 tp
->txd_version
= rtl_chip_infos
[chipset
].txd_version
;
6868 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
6869 RTL_W8(Config1
, RTL_R8(Config1
) | PMEnable
);
6870 RTL_W8(Config5
, RTL_R8(Config5
) & PMEStatus
);
6871 if ((RTL_R8(Config3
) & (LinkUp
| MagicPacket
)) != 0)
6872 tp
->features
|= RTL_FEATURE_WOL
;
6873 if ((RTL_R8(Config5
) & (UWF
| BWF
| MWF
)) != 0)
6874 tp
->features
|= RTL_FEATURE_WOL
;
6875 tp
->features
|= rtl_try_msi(tp
, cfg
);
6876 RTL_W8(Cfg9346
, Cfg9346_Lock
);
6878 if (rtl_tbi_enabled(tp
)) {
6879 tp
->set_speed
= rtl8169_set_speed_tbi
;
6880 tp
->get_settings
= rtl8169_gset_tbi
;
6881 tp
->phy_reset_enable
= rtl8169_tbi_reset_enable
;
6882 tp
->phy_reset_pending
= rtl8169_tbi_reset_pending
;
6883 tp
->link_ok
= rtl8169_tbi_link_ok
;
6884 tp
->do_ioctl
= rtl_tbi_ioctl
;
6886 tp
->set_speed
= rtl8169_set_speed_xmii
;
6887 tp
->get_settings
= rtl8169_gset_xmii
;
6888 tp
->phy_reset_enable
= rtl8169_xmii_reset_enable
;
6889 tp
->phy_reset_pending
= rtl8169_xmii_reset_pending
;
6890 tp
->link_ok
= rtl8169_xmii_link_ok
;
6891 tp
->do_ioctl
= rtl_xmii_ioctl
;
6894 mutex_init(&tp
->wk
.mutex
);
6896 /* Get MAC address */
6897 for (i
= 0; i
< ETH_ALEN
; i
++)
6898 dev
->dev_addr
[i
] = RTL_R8(MAC0
+ i
);
6899 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
6901 SET_ETHTOOL_OPS(dev
, &rtl8169_ethtool_ops
);
6902 dev
->watchdog_timeo
= RTL8169_TX_TIMEOUT
;
6904 netif_napi_add(dev
, &tp
->napi
, rtl8169_poll
, R8169_NAPI_WEIGHT
);
6906 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6907 * properly for all devices */
6908 dev
->features
|= NETIF_F_RXCSUM
|
6909 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6911 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
6912 NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6913 dev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
6916 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
6917 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6918 dev
->hw_features
&= ~NETIF_F_HW_VLAN_RX
;
6920 dev
->hw_features
|= NETIF_F_RXALL
;
6921 dev
->hw_features
|= NETIF_F_RXFCS
;
6923 tp
->hw_start
= cfg
->hw_start
;
6924 tp
->event_slow
= cfg
->event_slow
;
6926 tp
->opts1_mask
= (tp
->mac_version
!= RTL_GIGA_MAC_VER_01
) ?
6927 ~(RxBOVF
| RxFOVF
) : ~0;
6929 init_timer(&tp
->timer
);
6930 tp
->timer
.data
= (unsigned long) dev
;
6931 tp
->timer
.function
= rtl8169_phy_timer
;
6933 tp
->rtl_fw
= RTL_FIRMWARE_UNKNOWN
;
6935 rc
= register_netdev(dev
);
6939 pci_set_drvdata(pdev
, dev
);
6941 netif_info(tp
, probe
, dev
, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6942 rtl_chip_infos
[chipset
].name
, ioaddr
, dev
->dev_addr
,
6943 (u32
)(RTL_R32(TxConfig
) & 0x9cf0f8ff), pdev
->irq
);
6944 if (rtl_chip_infos
[chipset
].jumbo_max
!= JUMBO_1K
) {
6945 netif_info(tp
, probe
, dev
, "jumbo features [frames: %d bytes, "
6946 "tx checksumming: %s]\n",
6947 rtl_chip_infos
[chipset
].jumbo_max
,
6948 rtl_chip_infos
[chipset
].jumbo_tx_csum
? "ok" : "ko");
6951 if (tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
6952 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
6953 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) {
6954 rtl8168_driver_start(tp
);
6957 device_set_wakeup_enable(&pdev
->dev
, tp
->features
& RTL_FEATURE_WOL
);
6959 if (pci_dev_run_wake(pdev
))
6960 pm_runtime_put_noidle(&pdev
->dev
);
6962 netif_carrier_off(dev
);
6968 netif_napi_del(&tp
->napi
);
6969 rtl_disable_msi(pdev
, tp
);
6972 pci_release_regions(pdev
);
6974 pci_clear_mwi(pdev
);
6975 pci_disable_device(pdev
);
6981 static struct pci_driver rtl8169_pci_driver
= {
6983 .id_table
= rtl8169_pci_tbl
,
6984 .probe
= rtl_init_one
,
6985 .remove
= __devexit_p(rtl_remove_one
),
6986 .shutdown
= rtl_shutdown
,
6987 .driver
.pm
= RTL8169_PM_OPS
,
6990 static int __init
rtl8169_init_module(void)
6992 return pci_register_driver(&rtl8169_pci_driver
);
6995 static void __exit
rtl8169_cleanup_module(void)
6997 pci_unregister_driver(&rtl8169_pci_driver
);
7000 module_init(rtl8169_init_module
);
7001 module_exit(rtl8169_cleanup_module
);