2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/ethtool.h>
19 #include <linux/phy.h>
20 #include <linux/if_vlan.h>
21 #include <linux/crc32.h>
25 #include <linux/tcp.h>
26 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/firmware.h>
30 #include <linux/prefetch.h>
31 #include <linux/pci-aspm.h>
32 #include <linux/ipv6.h>
33 #include <net/ip6_checksum.h>
35 #define MODULENAME "r8169"
37 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
38 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
39 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
40 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
41 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
42 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
43 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
44 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
45 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
46 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
47 #define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw"
48 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
49 #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
50 #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
51 #define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
52 #define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
53 #define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
54 #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
55 #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
57 #define R8169_MSG_DEFAULT \
58 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
60 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
61 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
62 static const int multicast_filter_limit
= 32;
64 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
65 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
67 #define R8169_REGS_SIZE 256
68 #define R8169_RX_BUF_SIZE (SZ_16K - 1)
69 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
70 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
71 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
72 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
74 /* write/read MMIO register */
75 #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
76 #define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg))
77 #define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg))
78 #define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg))
79 #define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
80 #define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
83 RTL_GIGA_MAC_VER_01
= 0,
134 RTL_GIGA_MAC_NONE
= 0xff,
137 #define JUMBO_1K ETH_DATA_LEN
138 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
139 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
140 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
141 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
143 static const struct {
146 } rtl_chip_infos
[] = {
148 [RTL_GIGA_MAC_VER_01
] = {"RTL8169" },
149 [RTL_GIGA_MAC_VER_02
] = {"RTL8169s" },
150 [RTL_GIGA_MAC_VER_03
] = {"RTL8110s" },
151 [RTL_GIGA_MAC_VER_04
] = {"RTL8169sb/8110sb" },
152 [RTL_GIGA_MAC_VER_05
] = {"RTL8169sc/8110sc" },
153 [RTL_GIGA_MAC_VER_06
] = {"RTL8169sc/8110sc" },
155 [RTL_GIGA_MAC_VER_07
] = {"RTL8102e" },
156 [RTL_GIGA_MAC_VER_08
] = {"RTL8102e" },
157 [RTL_GIGA_MAC_VER_09
] = {"RTL8102e" },
158 [RTL_GIGA_MAC_VER_10
] = {"RTL8101e" },
159 [RTL_GIGA_MAC_VER_11
] = {"RTL8168b/8111b" },
160 [RTL_GIGA_MAC_VER_12
] = {"RTL8168b/8111b" },
161 [RTL_GIGA_MAC_VER_13
] = {"RTL8101e" },
162 [RTL_GIGA_MAC_VER_14
] = {"RTL8100e" },
163 [RTL_GIGA_MAC_VER_15
] = {"RTL8100e" },
164 [RTL_GIGA_MAC_VER_16
] = {"RTL8101e" },
165 [RTL_GIGA_MAC_VER_17
] = {"RTL8168b/8111b" },
166 [RTL_GIGA_MAC_VER_18
] = {"RTL8168cp/8111cp" },
167 [RTL_GIGA_MAC_VER_19
] = {"RTL8168c/8111c" },
168 [RTL_GIGA_MAC_VER_20
] = {"RTL8168c/8111c" },
169 [RTL_GIGA_MAC_VER_21
] = {"RTL8168c/8111c" },
170 [RTL_GIGA_MAC_VER_22
] = {"RTL8168c/8111c" },
171 [RTL_GIGA_MAC_VER_23
] = {"RTL8168cp/8111cp" },
172 [RTL_GIGA_MAC_VER_24
] = {"RTL8168cp/8111cp" },
173 [RTL_GIGA_MAC_VER_25
] = {"RTL8168d/8111d", FIRMWARE_8168D_1
},
174 [RTL_GIGA_MAC_VER_26
] = {"RTL8168d/8111d", FIRMWARE_8168D_2
},
175 [RTL_GIGA_MAC_VER_27
] = {"RTL8168dp/8111dp" },
176 [RTL_GIGA_MAC_VER_28
] = {"RTL8168dp/8111dp" },
177 [RTL_GIGA_MAC_VER_29
] = {"RTL8105e", FIRMWARE_8105E_1
},
178 [RTL_GIGA_MAC_VER_30
] = {"RTL8105e", FIRMWARE_8105E_1
},
179 [RTL_GIGA_MAC_VER_31
] = {"RTL8168dp/8111dp" },
180 [RTL_GIGA_MAC_VER_32
] = {"RTL8168e/8111e", FIRMWARE_8168E_1
},
181 [RTL_GIGA_MAC_VER_33
] = {"RTL8168e/8111e", FIRMWARE_8168E_2
},
182 [RTL_GIGA_MAC_VER_34
] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3
},
183 [RTL_GIGA_MAC_VER_35
] = {"RTL8168f/8111f", FIRMWARE_8168F_1
},
184 [RTL_GIGA_MAC_VER_36
] = {"RTL8168f/8111f", FIRMWARE_8168F_2
},
185 [RTL_GIGA_MAC_VER_37
] = {"RTL8402", FIRMWARE_8402_1
},
186 [RTL_GIGA_MAC_VER_38
] = {"RTL8411", FIRMWARE_8411_1
},
187 [RTL_GIGA_MAC_VER_39
] = {"RTL8106e", FIRMWARE_8106E_1
},
188 [RTL_GIGA_MAC_VER_40
] = {"RTL8168g/8111g", FIRMWARE_8168G_2
},
189 [RTL_GIGA_MAC_VER_41
] = {"RTL8168g/8111g" },
190 [RTL_GIGA_MAC_VER_42
] = {"RTL8168g/8111g", FIRMWARE_8168G_3
},
191 [RTL_GIGA_MAC_VER_43
] = {"RTL8106e", FIRMWARE_8106E_2
},
192 [RTL_GIGA_MAC_VER_44
] = {"RTL8411", FIRMWARE_8411_2
},
193 [RTL_GIGA_MAC_VER_45
] = {"RTL8168h/8111h", FIRMWARE_8168H_1
},
194 [RTL_GIGA_MAC_VER_46
] = {"RTL8168h/8111h", FIRMWARE_8168H_2
},
195 [RTL_GIGA_MAC_VER_47
] = {"RTL8107e", FIRMWARE_8107E_1
},
196 [RTL_GIGA_MAC_VER_48
] = {"RTL8107e", FIRMWARE_8107E_2
},
197 [RTL_GIGA_MAC_VER_49
] = {"RTL8168ep/8111ep" },
198 [RTL_GIGA_MAC_VER_50
] = {"RTL8168ep/8111ep" },
199 [RTL_GIGA_MAC_VER_51
] = {"RTL8168ep/8111ep" },
208 static const struct pci_device_id rtl8169_pci_tbl
[] = {
209 { PCI_VDEVICE(REALTEK
, 0x2502), RTL_CFG_1
},
210 { PCI_VDEVICE(REALTEK
, 0x2600), RTL_CFG_1
},
211 { PCI_VDEVICE(REALTEK
, 0x8129), RTL_CFG_0
},
212 { PCI_VDEVICE(REALTEK
, 0x8136), RTL_CFG_2
},
213 { PCI_VDEVICE(REALTEK
, 0x8161), RTL_CFG_1
},
214 { PCI_VDEVICE(REALTEK
, 0x8167), RTL_CFG_0
},
215 { PCI_VDEVICE(REALTEK
, 0x8168), RTL_CFG_1
},
216 { PCI_VDEVICE(NCUBE
, 0x8168), RTL_CFG_1
},
217 { PCI_VDEVICE(REALTEK
, 0x8169), RTL_CFG_0
},
218 { PCI_VENDOR_ID_DLINK
, 0x4300,
219 PCI_VENDOR_ID_DLINK
, 0x4b10, 0, 0, RTL_CFG_1
},
220 { PCI_VDEVICE(DLINK
, 0x4300), RTL_CFG_0
},
221 { PCI_VDEVICE(DLINK
, 0x4302), RTL_CFG_0
},
222 { PCI_VDEVICE(AT
, 0xc107), RTL_CFG_0
},
223 { PCI_VDEVICE(USR
, 0x0116), RTL_CFG_0
},
224 { PCI_VENDOR_ID_LINKSYS
, 0x1032,
225 PCI_ANY_ID
, 0x0024, 0, 0, RTL_CFG_0
},
227 PCI_ANY_ID
, 0x2410, 0, 0, RTL_CFG_2
},
231 MODULE_DEVICE_TABLE(pci
, rtl8169_pci_tbl
);
238 MAC0
= 0, /* Ethernet hardware address. */
240 MAR0
= 8, /* Multicast filter. */
241 CounterAddrLow
= 0x10,
242 CounterAddrHigh
= 0x14,
243 TxDescStartAddrLow
= 0x20,
244 TxDescStartAddrHigh
= 0x24,
245 TxHDescStartAddrLow
= 0x28,
246 TxHDescStartAddrHigh
= 0x2c,
255 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
256 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
259 #define RX128_INT_EN (1 << 15) /* 8111c and later */
260 #define RX_MULTI_EN (1 << 14) /* 8111c only */
261 #define RXCFG_FIFO_SHIFT 13
262 /* No threshold before first PCI xfer */
263 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
264 #define RX_EARLY_OFF (1 << 11)
265 #define RXCFG_DMA_SHIFT 8
266 /* Unlimited maximum PCI burst. */
267 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
274 #define PME_SIGNAL (1 << 5) /* 8168c and later */
286 #define RTL_COALESCE_MASK 0x0f
287 #define RTL_COALESCE_SHIFT 4
288 #define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
289 #define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
291 RxDescAddrLow
= 0xe4,
292 RxDescAddrHigh
= 0xe8,
293 EarlyTxThres
= 0xec, /* 8169. Unit of 32 bytes. */
295 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
297 MaxTxPacketSize
= 0xec, /* 8101/8168. Unit of 128 bytes. */
299 #define TxPacketMax (8064 >> 7)
300 #define EarlySize 0x27
303 FuncEventMask
= 0xf4,
304 FuncPresetState
= 0xf8,
309 FuncForceEvent
= 0xfc,
312 enum rtl8168_8101_registers
{
315 #define CSIAR_FLAG 0x80000000
316 #define CSIAR_WRITE_CMD 0x80000000
317 #define CSIAR_BYTE_ENABLE 0x0000f000
318 #define CSIAR_ADDR_MASK 0x00000fff
321 #define EPHYAR_FLAG 0x80000000
322 #define EPHYAR_WRITE_CMD 0x80000000
323 #define EPHYAR_REG_MASK 0x1f
324 #define EPHYAR_REG_SHIFT 16
325 #define EPHYAR_DATA_MASK 0xffff
327 #define PFM_EN (1 << 6)
328 #define TX_10M_PS_EN (1 << 7)
330 #define FIX_NAK_1 (1 << 4)
331 #define FIX_NAK_2 (1 << 3)
334 #define NOW_IS_OOB (1 << 7)
335 #define TX_EMPTY (1 << 5)
336 #define RX_EMPTY (1 << 4)
337 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
338 #define EN_NDP (1 << 3)
339 #define EN_OOB_RESET (1 << 2)
340 #define LINK_LIST_RDY (1 << 1)
342 #define EFUSEAR_FLAG 0x80000000
343 #define EFUSEAR_WRITE_CMD 0x80000000
344 #define EFUSEAR_READ_CMD 0x00000000
345 #define EFUSEAR_REG_MASK 0x03ff
346 #define EFUSEAR_REG_SHIFT 8
347 #define EFUSEAR_DATA_MASK 0xff
349 #define PFM_D3COLD_EN (1 << 6)
352 enum rtl8168_registers
{
357 #define ERIAR_FLAG 0x80000000
358 #define ERIAR_WRITE_CMD 0x80000000
359 #define ERIAR_READ_CMD 0x00000000
360 #define ERIAR_ADDR_BYTE_ALIGN 4
361 #define ERIAR_TYPE_SHIFT 16
362 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
363 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
364 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
365 #define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT)
366 #define ERIAR_MASK_SHIFT 12
367 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
368 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
369 #define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT)
370 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
371 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
372 EPHY_RXER_NUM
= 0x7c,
373 OCPDR
= 0xb0, /* OCP GPHY access */
374 #define OCPDR_WRITE_CMD 0x80000000
375 #define OCPDR_READ_CMD 0x00000000
376 #define OCPDR_REG_MASK 0x7f
377 #define OCPDR_GPHY_REG_SHIFT 16
378 #define OCPDR_DATA_MASK 0xffff
380 #define OCPAR_FLAG 0x80000000
381 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
382 #define OCPAR_GPHY_READ_CMD 0x0000f060
384 RDSAR1
= 0xd0, /* 8168c only. Undocumented on 8168dp */
385 MISC
= 0xf0, /* 8168e only. */
386 #define TXPLA_RST (1 << 29)
387 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
388 #define PWM_EN (1 << 22)
389 #define RXDV_GATED_EN (1 << 19)
390 #define EARLY_TALLY_EN (1 << 16)
393 enum rtl_register_content
{
394 /* InterruptStatusBits */
398 TxDescUnavail
= 0x0080,
422 /* TXPoll register p.5 */
423 HPQ
= 0x80, /* Poll cmd on the high prio queue */
424 NPQ
= 0x40, /* Poll cmd on the low prio queue */
425 FSWInt
= 0x01, /* Forced software interrupt */
429 Cfg9346_Unlock
= 0xc0,
434 AcceptBroadcast
= 0x08,
435 AcceptMulticast
= 0x04,
437 AcceptAllPhys
= 0x01,
438 #define RX_CONFIG_ACCEPT_MASK 0x3f
441 TxInterFrameGapShift
= 24,
442 TxDMAShift
= 8, /* DMA burst value (0-7) is shift this many bits */
444 /* Config1 register p.24 */
447 Speed_down
= (1 << 4),
451 PMEnable
= (1 << 0), /* Power Management Enable */
453 /* Config2 register p. 25 */
454 ClkReqEn
= (1 << 7), /* Clock Request Enable */
455 MSIEnable
= (1 << 5), /* 8169 only. Reserved in the 8168. */
456 PCI_Clock_66MHz
= 0x01,
457 PCI_Clock_33MHz
= 0x00,
459 /* Config3 register p.25 */
460 MagicPacket
= (1 << 5), /* Wake up when receives a Magic Packet */
461 LinkUp
= (1 << 4), /* Wake up when the cable connection is re-established */
462 Jumbo_En0
= (1 << 2), /* 8168 only. Reserved in the 8168b */
463 Rdy_to_L23
= (1 << 1), /* L23 Enable */
464 Beacon_en
= (1 << 0), /* 8168 only. Reserved in the 8168b */
466 /* Config4 register */
467 Jumbo_En1
= (1 << 1), /* 8168 only. Reserved in the 8168b */
469 /* Config5 register p.27 */
470 BWF
= (1 << 6), /* Accept Broadcast wakeup frame */
471 MWF
= (1 << 5), /* Accept Multicast wakeup frame */
472 UWF
= (1 << 4), /* Accept Unicast wakeup frame */
474 LanWake
= (1 << 1), /* LanWake enable/disable */
475 PMEStatus
= (1 << 0), /* PME status can be reset by PCI RST# */
476 ASPM_en
= (1 << 0), /* ASPM enable */
479 EnableBist
= (1 << 15), // 8168 8101
480 Mac_dbgo_oe
= (1 << 14), // 8168 8101
481 Normal_mode
= (1 << 13), // unused
482 Force_half_dup
= (1 << 12), // 8168 8101
483 Force_rxflow_en
= (1 << 11), // 8168 8101
484 Force_txflow_en
= (1 << 10), // 8168 8101
485 Cxpl_dbg_sel
= (1 << 9), // 8168 8101
486 ASF
= (1 << 8), // 8168 8101
487 PktCntrDisable
= (1 << 7), // 8168 8101
488 Mac_dbgo_sel
= 0x001c, // 8168
493 #define INTT_MASK GENMASK(1, 0)
495 /* rtl8169_PHYstatus */
506 TBILinkOK
= 0x02000000,
508 /* ResetCounterCommand */
511 /* DumpCounterCommand */
514 /* magic enable v2 */
515 MagicPacket_v2
= (1 << 16), /* Wake up when receives a Magic Packet */
519 /* First doubleword. */
520 DescOwn
= (1 << 31), /* Descriptor is owned by NIC */
521 RingEnd
= (1 << 30), /* End of descriptor ring */
522 FirstFrag
= (1 << 29), /* First segment of a packet */
523 LastFrag
= (1 << 28), /* Final segment of a packet */
527 enum rtl_tx_desc_bit
{
528 /* First doubleword. */
529 TD_LSO
= (1 << 27), /* Large Send Offload */
530 #define TD_MSS_MAX 0x07ffu /* MSS value */
532 /* Second doubleword. */
533 TxVlanTag
= (1 << 17), /* Add VLAN tag */
536 /* 8169, 8168b and 810x except 8102e. */
537 enum rtl_tx_desc_bit_0
{
538 /* First doubleword. */
539 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
540 TD0_TCP_CS
= (1 << 16), /* Calculate TCP/IP checksum */
541 TD0_UDP_CS
= (1 << 17), /* Calculate UDP/IP checksum */
542 TD0_IP_CS
= (1 << 18), /* Calculate IP checksum */
545 /* 8102e, 8168c and beyond. */
546 enum rtl_tx_desc_bit_1
{
547 /* First doubleword. */
548 TD1_GTSENV4
= (1 << 26), /* Giant Send for IPv4 */
549 TD1_GTSENV6
= (1 << 25), /* Giant Send for IPv6 */
550 #define GTTCPHO_SHIFT 18
551 #define GTTCPHO_MAX 0x7fU
553 /* Second doubleword. */
554 #define TCPHO_SHIFT 18
555 #define TCPHO_MAX 0x3ffU
556 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
557 TD1_IPv6_CS
= (1 << 28), /* Calculate IPv6 checksum */
558 TD1_IPv4_CS
= (1 << 29), /* Calculate IPv4 checksum */
559 TD1_TCP_CS
= (1 << 30), /* Calculate TCP/IP checksum */
560 TD1_UDP_CS
= (1 << 31), /* Calculate UDP/IP checksum */
563 enum rtl_rx_desc_bit
{
565 PID1
= (1 << 18), /* Protocol ID bit 1/2 */
566 PID0
= (1 << 17), /* Protocol ID bit 0/2 */
568 #define RxProtoUDP (PID1)
569 #define RxProtoTCP (PID0)
570 #define RxProtoIP (PID1 | PID0)
571 #define RxProtoMask RxProtoIP
573 IPFail
= (1 << 16), /* IP checksum failed */
574 UDPFail
= (1 << 15), /* UDP/IP checksum failed */
575 TCPFail
= (1 << 14), /* TCP/IP checksum failed */
576 RxVlanTag
= (1 << 16), /* VLAN tag available */
579 #define RsvdMask 0x3fffc000
580 #define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
599 struct rtl8169_counters
{
606 __le32 tx_one_collision
;
607 __le32 tx_multi_collision
;
615 struct rtl8169_tc_offsets
{
618 __le32 tx_multi_collision
;
623 RTL_FLAG_TASK_ENABLED
= 0,
624 RTL_FLAG_TASK_RESET_PENDING
,
628 struct rtl8169_stats
{
631 struct u64_stats_sync syncp
;
634 struct rtl8169_private
{
635 void __iomem
*mmio_addr
; /* memory map physical address */
636 struct pci_dev
*pci_dev
;
637 struct net_device
*dev
;
638 struct phy_device
*phydev
;
639 struct napi_struct napi
;
642 u32 cur_rx
; /* Index into the Rx descriptor buffer of next Rx pkt. */
643 u32 cur_tx
; /* Index into the Tx descriptor buffer of next Rx pkt. */
645 struct rtl8169_stats rx_stats
;
646 struct rtl8169_stats tx_stats
;
647 struct TxDesc
*TxDescArray
; /* 256-aligned Tx descriptor ring */
648 struct RxDesc
*RxDescArray
; /* 256-aligned Rx descriptor ring */
649 dma_addr_t TxPhyAddr
;
650 dma_addr_t RxPhyAddr
;
651 void *Rx_databuff
[NUM_RX_DESC
]; /* Rx data buffers */
652 struct ring_info tx_skb
[NUM_TX_DESC
]; /* Tx data buffers */
656 const struct rtl_coalesce_info
*coalesce_info
;
660 void (*write
)(struct rtl8169_private
*, int, int);
661 int (*read
)(struct rtl8169_private
*, int);
665 void (*enable
)(struct rtl8169_private
*);
666 void (*disable
)(struct rtl8169_private
*);
669 void (*hw_start
)(struct rtl8169_private
*tp
);
670 bool (*tso_csum
)(struct rtl8169_private
*, struct sk_buff
*, u32
*);
673 DECLARE_BITMAP(flags
, RTL_FLAG_MAX
);
675 struct work_struct work
;
678 unsigned irq_enabled
:1;
679 unsigned supports_gmii
:1;
680 dma_addr_t counters_phys_addr
;
681 struct rtl8169_counters
*counters
;
682 struct rtl8169_tc_offsets tc_offset
;
687 const struct firmware
*fw
;
689 #define RTL_VER_SIZE 32
691 char version
[RTL_VER_SIZE
];
693 struct rtl_fw_phy_action
{
702 typedef void (*rtl_generic_fct
)(struct rtl8169_private
*tp
);
704 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
705 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
706 module_param_named(debug
, debug
.msg_enable
, int, 0);
707 MODULE_PARM_DESC(debug
, "Debug verbosity level (0=none, ..., 16=all)");
708 MODULE_SOFTDEP("pre: realtek");
709 MODULE_LICENSE("GPL");
710 MODULE_FIRMWARE(FIRMWARE_8168D_1
);
711 MODULE_FIRMWARE(FIRMWARE_8168D_2
);
712 MODULE_FIRMWARE(FIRMWARE_8168E_1
);
713 MODULE_FIRMWARE(FIRMWARE_8168E_2
);
714 MODULE_FIRMWARE(FIRMWARE_8168E_3
);
715 MODULE_FIRMWARE(FIRMWARE_8105E_1
);
716 MODULE_FIRMWARE(FIRMWARE_8168F_1
);
717 MODULE_FIRMWARE(FIRMWARE_8168F_2
);
718 MODULE_FIRMWARE(FIRMWARE_8402_1
);
719 MODULE_FIRMWARE(FIRMWARE_8411_1
);
720 MODULE_FIRMWARE(FIRMWARE_8411_2
);
721 MODULE_FIRMWARE(FIRMWARE_8106E_1
);
722 MODULE_FIRMWARE(FIRMWARE_8106E_2
);
723 MODULE_FIRMWARE(FIRMWARE_8168G_2
);
724 MODULE_FIRMWARE(FIRMWARE_8168G_3
);
725 MODULE_FIRMWARE(FIRMWARE_8168H_1
);
726 MODULE_FIRMWARE(FIRMWARE_8168H_2
);
727 MODULE_FIRMWARE(FIRMWARE_8107E_1
);
728 MODULE_FIRMWARE(FIRMWARE_8107E_2
);
730 static inline struct device
*tp_to_dev(struct rtl8169_private
*tp
)
732 return &tp
->pci_dev
->dev
;
735 static void rtl_lock_work(struct rtl8169_private
*tp
)
737 mutex_lock(&tp
->wk
.mutex
);
740 static void rtl_unlock_work(struct rtl8169_private
*tp
)
742 mutex_unlock(&tp
->wk
.mutex
);
745 static void rtl_lock_config_regs(struct rtl8169_private
*tp
)
747 RTL_W8(tp
, Cfg9346
, Cfg9346_Lock
);
750 static void rtl_unlock_config_regs(struct rtl8169_private
*tp
)
752 RTL_W8(tp
, Cfg9346
, Cfg9346_Unlock
);
755 static void rtl_tx_performance_tweak(struct rtl8169_private
*tp
, u16 force
)
757 pcie_capability_clear_and_set_word(tp
->pci_dev
, PCI_EXP_DEVCTL
,
758 PCI_EXP_DEVCTL_READRQ
, force
);
762 bool (*check
)(struct rtl8169_private
*);
766 static void rtl_udelay(unsigned int d
)
771 static bool rtl_loop_wait(struct rtl8169_private
*tp
, const struct rtl_cond
*c
,
772 void (*delay
)(unsigned int), unsigned int d
, int n
,
777 for (i
= 0; i
< n
; i
++) {
778 if (c
->check(tp
) == high
)
782 netif_err(tp
, drv
, tp
->dev
, "%s == %d (loop: %d, delay: %d).\n",
783 c
->msg
, !high
, n
, d
);
787 static bool rtl_udelay_loop_wait_high(struct rtl8169_private
*tp
,
788 const struct rtl_cond
*c
,
789 unsigned int d
, int n
)
791 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, true);
794 static bool rtl_udelay_loop_wait_low(struct rtl8169_private
*tp
,
795 const struct rtl_cond
*c
,
796 unsigned int d
, int n
)
798 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, false);
801 static bool rtl_msleep_loop_wait_high(struct rtl8169_private
*tp
,
802 const struct rtl_cond
*c
,
803 unsigned int d
, int n
)
805 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, true);
808 static bool rtl_msleep_loop_wait_low(struct rtl8169_private
*tp
,
809 const struct rtl_cond
*c
,
810 unsigned int d
, int n
)
812 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, false);
815 #define DECLARE_RTL_COND(name) \
816 static bool name ## _check(struct rtl8169_private *); \
818 static const struct rtl_cond name = { \
819 .check = name ## _check, \
823 static bool name ## _check(struct rtl8169_private *tp)
825 static bool rtl_ocp_reg_failure(struct rtl8169_private
*tp
, u32 reg
)
827 if (reg
& 0xffff0001) {
828 netif_err(tp
, drv
, tp
->dev
, "Invalid ocp reg %x!\n", reg
);
834 DECLARE_RTL_COND(rtl_ocp_gphy_cond
)
836 return RTL_R32(tp
, GPHY_OCP
) & OCPAR_FLAG
;
839 static void r8168_phy_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
841 if (rtl_ocp_reg_failure(tp
, reg
))
844 RTL_W32(tp
, GPHY_OCP
, OCPAR_FLAG
| (reg
<< 15) | data
);
846 rtl_udelay_loop_wait_low(tp
, &rtl_ocp_gphy_cond
, 25, 10);
849 static u16
r8168_phy_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
851 if (rtl_ocp_reg_failure(tp
, reg
))
854 RTL_W32(tp
, GPHY_OCP
, reg
<< 15);
856 return rtl_udelay_loop_wait_high(tp
, &rtl_ocp_gphy_cond
, 25, 10) ?
857 (RTL_R32(tp
, GPHY_OCP
) & 0xffff) : ~0;
860 static void r8168_mac_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
862 if (rtl_ocp_reg_failure(tp
, reg
))
865 RTL_W32(tp
, OCPDR
, OCPAR_FLAG
| (reg
<< 15) | data
);
868 static u16
r8168_mac_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
870 if (rtl_ocp_reg_failure(tp
, reg
))
873 RTL_W32(tp
, OCPDR
, reg
<< 15);
875 return RTL_R32(tp
, OCPDR
);
878 #define OCP_STD_PHY_BASE 0xa400
880 static void r8168g_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
883 tp
->ocp_base
= value
? value
<< 4 : OCP_STD_PHY_BASE
;
887 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
890 r8168_phy_ocp_write(tp
, tp
->ocp_base
+ reg
* 2, value
);
893 static int r8168g_mdio_read(struct rtl8169_private
*tp
, int reg
)
895 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
898 return r8168_phy_ocp_read(tp
, tp
->ocp_base
+ reg
* 2);
901 static void mac_mcu_write(struct rtl8169_private
*tp
, int reg
, int value
)
904 tp
->ocp_base
= value
<< 4;
908 r8168_mac_ocp_write(tp
, tp
->ocp_base
+ reg
, value
);
911 static int mac_mcu_read(struct rtl8169_private
*tp
, int reg
)
913 return r8168_mac_ocp_read(tp
, tp
->ocp_base
+ reg
);
916 DECLARE_RTL_COND(rtl_phyar_cond
)
918 return RTL_R32(tp
, PHYAR
) & 0x80000000;
921 static void r8169_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
923 RTL_W32(tp
, PHYAR
, 0x80000000 | (reg
& 0x1f) << 16 | (value
& 0xffff));
925 rtl_udelay_loop_wait_low(tp
, &rtl_phyar_cond
, 25, 20);
927 * According to hardware specs a 20us delay is required after write
928 * complete indication, but before sending next command.
933 static int r8169_mdio_read(struct rtl8169_private
*tp
, int reg
)
937 RTL_W32(tp
, PHYAR
, 0x0 | (reg
& 0x1f) << 16);
939 value
= rtl_udelay_loop_wait_high(tp
, &rtl_phyar_cond
, 25, 20) ?
940 RTL_R32(tp
, PHYAR
) & 0xffff : ~0;
943 * According to hardware specs a 20us delay is required after read
944 * complete indication, but before sending next command.
951 DECLARE_RTL_COND(rtl_ocpar_cond
)
953 return RTL_R32(tp
, OCPAR
) & OCPAR_FLAG
;
956 static void r8168dp_1_mdio_access(struct rtl8169_private
*tp
, int reg
, u32 data
)
958 RTL_W32(tp
, OCPDR
, data
| ((reg
& OCPDR_REG_MASK
) << OCPDR_GPHY_REG_SHIFT
));
959 RTL_W32(tp
, OCPAR
, OCPAR_GPHY_WRITE_CMD
);
960 RTL_W32(tp
, EPHY_RXER_NUM
, 0);
962 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 1000, 100);
965 static void r8168dp_1_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
967 r8168dp_1_mdio_access(tp
, reg
,
968 OCPDR_WRITE_CMD
| (value
& OCPDR_DATA_MASK
));
971 static int r8168dp_1_mdio_read(struct rtl8169_private
*tp
, int reg
)
973 r8168dp_1_mdio_access(tp
, reg
, OCPDR_READ_CMD
);
976 RTL_W32(tp
, OCPAR
, OCPAR_GPHY_READ_CMD
);
977 RTL_W32(tp
, EPHY_RXER_NUM
, 0);
979 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 1000, 100) ?
980 RTL_R32(tp
, OCPDR
) & OCPDR_DATA_MASK
: ~0;
983 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
985 static void r8168dp_2_mdio_start(struct rtl8169_private
*tp
)
987 RTL_W32(tp
, 0xd0, RTL_R32(tp
, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT
);
990 static void r8168dp_2_mdio_stop(struct rtl8169_private
*tp
)
992 RTL_W32(tp
, 0xd0, RTL_R32(tp
, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT
);
995 static void r8168dp_2_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
997 r8168dp_2_mdio_start(tp
);
999 r8169_mdio_write(tp
, reg
, value
);
1001 r8168dp_2_mdio_stop(tp
);
1004 static int r8168dp_2_mdio_read(struct rtl8169_private
*tp
, int reg
)
1008 r8168dp_2_mdio_start(tp
);
1010 value
= r8169_mdio_read(tp
, reg
);
1012 r8168dp_2_mdio_stop(tp
);
1017 static void rtl_writephy(struct rtl8169_private
*tp
, int location
, u32 val
)
1019 tp
->mdio_ops
.write(tp
, location
, val
);
1022 static int rtl_readphy(struct rtl8169_private
*tp
, int location
)
1024 return tp
->mdio_ops
.read(tp
, location
);
1027 static void rtl_patchphy(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1029 rtl_writephy(tp
, reg_addr
, rtl_readphy(tp
, reg_addr
) | value
);
1032 static void rtl_w0w1_phy(struct rtl8169_private
*tp
, int reg_addr
, int p
, int m
)
1036 val
= rtl_readphy(tp
, reg_addr
);
1037 rtl_writephy(tp
, reg_addr
, (val
& ~m
) | p
);
1040 DECLARE_RTL_COND(rtl_ephyar_cond
)
1042 return RTL_R32(tp
, EPHYAR
) & EPHYAR_FLAG
;
1045 static void rtl_ephy_write(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1047 RTL_W32(tp
, EPHYAR
, EPHYAR_WRITE_CMD
| (value
& EPHYAR_DATA_MASK
) |
1048 (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1050 rtl_udelay_loop_wait_low(tp
, &rtl_ephyar_cond
, 10, 100);
1055 static u16
rtl_ephy_read(struct rtl8169_private
*tp
, int reg_addr
)
1057 RTL_W32(tp
, EPHYAR
, (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1059 return rtl_udelay_loop_wait_high(tp
, &rtl_ephyar_cond
, 10, 100) ?
1060 RTL_R32(tp
, EPHYAR
) & EPHYAR_DATA_MASK
: ~0;
1063 DECLARE_RTL_COND(rtl_eriar_cond
)
1065 return RTL_R32(tp
, ERIAR
) & ERIAR_FLAG
;
1068 static void _rtl_eri_write(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1071 BUG_ON((addr
& 3) || (mask
== 0));
1072 RTL_W32(tp
, ERIDR
, val
);
1073 RTL_W32(tp
, ERIAR
, ERIAR_WRITE_CMD
| type
| mask
| addr
);
1075 rtl_udelay_loop_wait_low(tp
, &rtl_eriar_cond
, 100, 100);
1078 static void rtl_eri_write(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1081 _rtl_eri_write(tp
, addr
, mask
, val
, ERIAR_EXGMAC
);
1084 static u32
_rtl_eri_read(struct rtl8169_private
*tp
, int addr
, int type
)
1086 RTL_W32(tp
, ERIAR
, ERIAR_READ_CMD
| type
| ERIAR_MASK_1111
| addr
);
1088 return rtl_udelay_loop_wait_high(tp
, &rtl_eriar_cond
, 100, 100) ?
1089 RTL_R32(tp
, ERIDR
) : ~0;
1092 static u32
rtl_eri_read(struct rtl8169_private
*tp
, int addr
)
1094 return _rtl_eri_read(tp
, addr
, ERIAR_EXGMAC
);
1097 static void rtl_w0w1_eri(struct rtl8169_private
*tp
, int addr
, u32 mask
, u32 p
,
1102 val
= rtl_eri_read(tp
, addr
);
1103 rtl_eri_write(tp
, addr
, mask
, (val
& ~m
) | p
);
1106 static void rtl_eri_set_bits(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1109 rtl_w0w1_eri(tp
, addr
, mask
, p
, 0);
1112 static void rtl_eri_clear_bits(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1115 rtl_w0w1_eri(tp
, addr
, mask
, 0, m
);
1118 static u32
r8168dp_ocp_read(struct rtl8169_private
*tp
, u8 mask
, u16 reg
)
1120 RTL_W32(tp
, OCPAR
, ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
1121 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 100, 20) ?
1122 RTL_R32(tp
, OCPDR
) : ~0;
1125 static u32
r8168ep_ocp_read(struct rtl8169_private
*tp
, u8 mask
, u16 reg
)
1127 return _rtl_eri_read(tp
, reg
, ERIAR_OOB
);
1130 static void r8168dp_ocp_write(struct rtl8169_private
*tp
, u8 mask
, u16 reg
,
1133 RTL_W32(tp
, OCPDR
, data
);
1134 RTL_W32(tp
, OCPAR
, OCPAR_FLAG
| ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
1135 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 100, 20);
1138 static void r8168ep_ocp_write(struct rtl8169_private
*tp
, u8 mask
, u16 reg
,
1141 _rtl_eri_write(tp
, reg
, ((u32
)mask
& 0x0f) << ERIAR_MASK_SHIFT
,
1145 static void r8168dp_oob_notify(struct rtl8169_private
*tp
, u8 cmd
)
1147 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_0001
, cmd
);
1149 r8168dp_ocp_write(tp
, 0x1, 0x30, 0x00000001);
1152 #define OOB_CMD_RESET 0x00
1153 #define OOB_CMD_DRIVER_START 0x05
1154 #define OOB_CMD_DRIVER_STOP 0x06
1156 static u16
rtl8168_get_ocp_reg(struct rtl8169_private
*tp
)
1158 return (tp
->mac_version
== RTL_GIGA_MAC_VER_31
) ? 0xb8 : 0x10;
1161 DECLARE_RTL_COND(rtl_dp_ocp_read_cond
)
1165 reg
= rtl8168_get_ocp_reg(tp
);
1167 return r8168dp_ocp_read(tp
, 0x0f, reg
) & 0x00000800;
1170 DECLARE_RTL_COND(rtl_ep_ocp_read_cond
)
1172 return r8168ep_ocp_read(tp
, 0x0f, 0x124) & 0x00000001;
1175 DECLARE_RTL_COND(rtl_ocp_tx_cond
)
1177 return RTL_R8(tp
, IBISR0
) & 0x20;
1180 static void rtl8168ep_stop_cmac(struct rtl8169_private
*tp
)
1182 RTL_W8(tp
, IBCR2
, RTL_R8(tp
, IBCR2
) & ~0x01);
1183 rtl_msleep_loop_wait_high(tp
, &rtl_ocp_tx_cond
, 50, 2000);
1184 RTL_W8(tp
, IBISR0
, RTL_R8(tp
, IBISR0
) | 0x20);
1185 RTL_W8(tp
, IBCR0
, RTL_R8(tp
, IBCR0
) & ~0x01);
1188 static void rtl8168dp_driver_start(struct rtl8169_private
*tp
)
1190 r8168dp_oob_notify(tp
, OOB_CMD_DRIVER_START
);
1191 rtl_msleep_loop_wait_high(tp
, &rtl_dp_ocp_read_cond
, 10, 10);
1194 static void rtl8168ep_driver_start(struct rtl8169_private
*tp
)
1196 r8168ep_ocp_write(tp
, 0x01, 0x180, OOB_CMD_DRIVER_START
);
1197 r8168ep_ocp_write(tp
, 0x01, 0x30,
1198 r8168ep_ocp_read(tp
, 0x01, 0x30) | 0x01);
1199 rtl_msleep_loop_wait_high(tp
, &rtl_ep_ocp_read_cond
, 10, 10);
1202 static void rtl8168_driver_start(struct rtl8169_private
*tp
)
1204 switch (tp
->mac_version
) {
1205 case RTL_GIGA_MAC_VER_27
:
1206 case RTL_GIGA_MAC_VER_28
:
1207 case RTL_GIGA_MAC_VER_31
:
1208 rtl8168dp_driver_start(tp
);
1210 case RTL_GIGA_MAC_VER_49
:
1211 case RTL_GIGA_MAC_VER_50
:
1212 case RTL_GIGA_MAC_VER_51
:
1213 rtl8168ep_driver_start(tp
);
1221 static void rtl8168dp_driver_stop(struct rtl8169_private
*tp
)
1223 r8168dp_oob_notify(tp
, OOB_CMD_DRIVER_STOP
);
1224 rtl_msleep_loop_wait_low(tp
, &rtl_dp_ocp_read_cond
, 10, 10);
1227 static void rtl8168ep_driver_stop(struct rtl8169_private
*tp
)
1229 rtl8168ep_stop_cmac(tp
);
1230 r8168ep_ocp_write(tp
, 0x01, 0x180, OOB_CMD_DRIVER_STOP
);
1231 r8168ep_ocp_write(tp
, 0x01, 0x30,
1232 r8168ep_ocp_read(tp
, 0x01, 0x30) | 0x01);
1233 rtl_msleep_loop_wait_low(tp
, &rtl_ep_ocp_read_cond
, 10, 10);
1236 static void rtl8168_driver_stop(struct rtl8169_private
*tp
)
1238 switch (tp
->mac_version
) {
1239 case RTL_GIGA_MAC_VER_27
:
1240 case RTL_GIGA_MAC_VER_28
:
1241 case RTL_GIGA_MAC_VER_31
:
1242 rtl8168dp_driver_stop(tp
);
1244 case RTL_GIGA_MAC_VER_49
:
1245 case RTL_GIGA_MAC_VER_50
:
1246 case RTL_GIGA_MAC_VER_51
:
1247 rtl8168ep_driver_stop(tp
);
1255 static bool r8168dp_check_dash(struct rtl8169_private
*tp
)
1257 u16 reg
= rtl8168_get_ocp_reg(tp
);
1259 return !!(r8168dp_ocp_read(tp
, 0x0f, reg
) & 0x00008000);
1262 static bool r8168ep_check_dash(struct rtl8169_private
*tp
)
1264 return !!(r8168ep_ocp_read(tp
, 0x0f, 0x128) & 0x00000001);
1267 static bool r8168_check_dash(struct rtl8169_private
*tp
)
1269 switch (tp
->mac_version
) {
1270 case RTL_GIGA_MAC_VER_27
:
1271 case RTL_GIGA_MAC_VER_28
:
1272 case RTL_GIGA_MAC_VER_31
:
1273 return r8168dp_check_dash(tp
);
1274 case RTL_GIGA_MAC_VER_49
:
1275 case RTL_GIGA_MAC_VER_50
:
1276 case RTL_GIGA_MAC_VER_51
:
1277 return r8168ep_check_dash(tp
);
1283 static void rtl_reset_packet_filter(struct rtl8169_private
*tp
)
1285 rtl_eri_clear_bits(tp
, 0xdc, ERIAR_MASK_0001
, BIT(0));
1286 rtl_eri_set_bits(tp
, 0xdc, ERIAR_MASK_0001
, BIT(0));
1289 DECLARE_RTL_COND(rtl_efusear_cond
)
1291 return RTL_R32(tp
, EFUSEAR
) & EFUSEAR_FLAG
;
1294 static u8
rtl8168d_efuse_read(struct rtl8169_private
*tp
, int reg_addr
)
1296 RTL_W32(tp
, EFUSEAR
, (reg_addr
& EFUSEAR_REG_MASK
) << EFUSEAR_REG_SHIFT
);
1298 return rtl_udelay_loop_wait_high(tp
, &rtl_efusear_cond
, 100, 300) ?
1299 RTL_R32(tp
, EFUSEAR
) & EFUSEAR_DATA_MASK
: ~0;
1302 static void rtl_ack_events(struct rtl8169_private
*tp
, u16 bits
)
1304 RTL_W16(tp
, IntrStatus
, bits
);
1307 static void rtl_irq_disable(struct rtl8169_private
*tp
)
1309 RTL_W16(tp
, IntrMask
, 0);
1310 tp
->irq_enabled
= 0;
1313 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1314 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1315 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1317 static void rtl_irq_enable(struct rtl8169_private
*tp
)
1319 tp
->irq_enabled
= 1;
1320 RTL_W16(tp
, IntrMask
, tp
->irq_mask
);
1323 static void rtl8169_irq_mask_and_ack(struct rtl8169_private
*tp
)
1325 rtl_irq_disable(tp
);
1326 rtl_ack_events(tp
, 0xffff);
1328 RTL_R8(tp
, ChipCmd
);
1331 static void rtl_link_chg_patch(struct rtl8169_private
*tp
)
1333 struct net_device
*dev
= tp
->dev
;
1334 struct phy_device
*phydev
= tp
->phydev
;
1336 if (!netif_running(dev
))
1339 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
||
1340 tp
->mac_version
== RTL_GIGA_MAC_VER_38
) {
1341 if (phydev
->speed
== SPEED_1000
) {
1342 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011);
1343 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005);
1344 } else if (phydev
->speed
== SPEED_100
) {
1345 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f);
1346 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005);
1348 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f);
1349 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f);
1351 rtl_reset_packet_filter(tp
);
1352 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_35
||
1353 tp
->mac_version
== RTL_GIGA_MAC_VER_36
) {
1354 if (phydev
->speed
== SPEED_1000
) {
1355 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011);
1356 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005);
1358 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f);
1359 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f);
1361 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_37
) {
1362 if (phydev
->speed
== SPEED_10
) {
1363 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x4d02);
1364 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_0011
, 0x0060a);
1366 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000);
1371 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1373 static void rtl8169_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1375 struct rtl8169_private
*tp
= netdev_priv(dev
);
1378 wol
->supported
= WAKE_ANY
;
1379 wol
->wolopts
= tp
->saved_wolopts
;
1380 rtl_unlock_work(tp
);
1383 static void __rtl8169_set_wol(struct rtl8169_private
*tp
, u32 wolopts
)
1385 unsigned int i
, tmp
;
1386 static const struct {
1391 { WAKE_PHY
, Config3
, LinkUp
},
1392 { WAKE_UCAST
, Config5
, UWF
},
1393 { WAKE_BCAST
, Config5
, BWF
},
1394 { WAKE_MCAST
, Config5
, MWF
},
1395 { WAKE_ANY
, Config5
, LanWake
},
1396 { WAKE_MAGIC
, Config3
, MagicPacket
}
1400 rtl_unlock_config_regs(tp
);
1402 switch (tp
->mac_version
) {
1403 case RTL_GIGA_MAC_VER_34
... RTL_GIGA_MAC_VER_38
:
1404 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
1405 tmp
= ARRAY_SIZE(cfg
) - 1;
1406 if (wolopts
& WAKE_MAGIC
)
1407 rtl_eri_set_bits(tp
, 0x0dc, ERIAR_MASK_0100
,
1410 rtl_eri_clear_bits(tp
, 0x0dc, ERIAR_MASK_0100
,
1414 tmp
= ARRAY_SIZE(cfg
);
1418 for (i
= 0; i
< tmp
; i
++) {
1419 options
= RTL_R8(tp
, cfg
[i
].reg
) & ~cfg
[i
].mask
;
1420 if (wolopts
& cfg
[i
].opt
)
1421 options
|= cfg
[i
].mask
;
1422 RTL_W8(tp
, cfg
[i
].reg
, options
);
1425 switch (tp
->mac_version
) {
1426 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_17
:
1427 options
= RTL_R8(tp
, Config1
) & ~PMEnable
;
1429 options
|= PMEnable
;
1430 RTL_W8(tp
, Config1
, options
);
1433 options
= RTL_R8(tp
, Config2
) & ~PME_SIGNAL
;
1435 options
|= PME_SIGNAL
;
1436 RTL_W8(tp
, Config2
, options
);
1440 rtl_lock_config_regs(tp
);
1442 device_set_wakeup_enable(tp_to_dev(tp
), wolopts
);
1445 static int rtl8169_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1447 struct rtl8169_private
*tp
= netdev_priv(dev
);
1448 struct device
*d
= tp_to_dev(tp
);
1450 if (wol
->wolopts
& ~WAKE_ANY
)
1453 pm_runtime_get_noresume(d
);
1457 tp
->saved_wolopts
= wol
->wolopts
;
1459 if (pm_runtime_active(d
))
1460 __rtl8169_set_wol(tp
, tp
->saved_wolopts
);
1462 rtl_unlock_work(tp
);
1464 pm_runtime_put_noidle(d
);
1469 static void rtl8169_get_drvinfo(struct net_device
*dev
,
1470 struct ethtool_drvinfo
*info
)
1472 struct rtl8169_private
*tp
= netdev_priv(dev
);
1473 struct rtl_fw
*rtl_fw
= tp
->rtl_fw
;
1475 strlcpy(info
->driver
, MODULENAME
, sizeof(info
->driver
));
1476 strlcpy(info
->bus_info
, pci_name(tp
->pci_dev
), sizeof(info
->bus_info
));
1477 BUILD_BUG_ON(sizeof(info
->fw_version
) < sizeof(rtl_fw
->version
));
1479 strlcpy(info
->fw_version
, rtl_fw
->version
,
1480 sizeof(info
->fw_version
));
1483 static int rtl8169_get_regs_len(struct net_device
*dev
)
1485 return R8169_REGS_SIZE
;
1488 static netdev_features_t
rtl8169_fix_features(struct net_device
*dev
,
1489 netdev_features_t features
)
1491 struct rtl8169_private
*tp
= netdev_priv(dev
);
1493 if (dev
->mtu
> TD_MSS_MAX
)
1494 features
&= ~NETIF_F_ALL_TSO
;
1496 if (dev
->mtu
> JUMBO_1K
&&
1497 tp
->mac_version
> RTL_GIGA_MAC_VER_06
)
1498 features
&= ~NETIF_F_IP_CSUM
;
1503 static int rtl8169_set_features(struct net_device
*dev
,
1504 netdev_features_t features
)
1506 struct rtl8169_private
*tp
= netdev_priv(dev
);
1511 rx_config
= RTL_R32(tp
, RxConfig
);
1512 if (features
& NETIF_F_RXALL
)
1513 rx_config
|= (AcceptErr
| AcceptRunt
);
1515 rx_config
&= ~(AcceptErr
| AcceptRunt
);
1517 RTL_W32(tp
, RxConfig
, rx_config
);
1519 if (features
& NETIF_F_RXCSUM
)
1520 tp
->cp_cmd
|= RxChkSum
;
1522 tp
->cp_cmd
&= ~RxChkSum
;
1524 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1525 tp
->cp_cmd
|= RxVlan
;
1527 tp
->cp_cmd
&= ~RxVlan
;
1529 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
1530 RTL_R16(tp
, CPlusCmd
);
1532 rtl_unlock_work(tp
);
1537 static inline u32
rtl8169_tx_vlan_tag(struct sk_buff
*skb
)
1539 return (skb_vlan_tag_present(skb
)) ?
1540 TxVlanTag
| swab16(skb_vlan_tag_get(skb
)) : 0x00;
1543 static void rtl8169_rx_vlan_tag(struct RxDesc
*desc
, struct sk_buff
*skb
)
1545 u32 opts2
= le32_to_cpu(desc
->opts2
);
1547 if (opts2
& RxVlanTag
)
1548 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), swab16(opts2
& 0xffff));
1551 static void rtl8169_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1554 struct rtl8169_private
*tp
= netdev_priv(dev
);
1555 u32 __iomem
*data
= tp
->mmio_addr
;
1560 for (i
= 0; i
< R8169_REGS_SIZE
; i
+= 4)
1561 memcpy_fromio(dw
++, data
++, 4);
1562 rtl_unlock_work(tp
);
1565 static u32
rtl8169_get_msglevel(struct net_device
*dev
)
1567 struct rtl8169_private
*tp
= netdev_priv(dev
);
1569 return tp
->msg_enable
;
1572 static void rtl8169_set_msglevel(struct net_device
*dev
, u32 value
)
1574 struct rtl8169_private
*tp
= netdev_priv(dev
);
1576 tp
->msg_enable
= value
;
1579 static const char rtl8169_gstrings
[][ETH_GSTRING_LEN
] = {
1586 "tx_single_collisions",
1587 "tx_multi_collisions",
1595 static int rtl8169_get_sset_count(struct net_device
*dev
, int sset
)
1599 return ARRAY_SIZE(rtl8169_gstrings
);
1605 DECLARE_RTL_COND(rtl_counters_cond
)
1607 return RTL_R32(tp
, CounterAddrLow
) & (CounterReset
| CounterDump
);
1610 static bool rtl8169_do_counters(struct rtl8169_private
*tp
, u32 counter_cmd
)
1612 dma_addr_t paddr
= tp
->counters_phys_addr
;
1615 RTL_W32(tp
, CounterAddrHigh
, (u64
)paddr
>> 32);
1616 RTL_R32(tp
, CounterAddrHigh
);
1617 cmd
= (u64
)paddr
& DMA_BIT_MASK(32);
1618 RTL_W32(tp
, CounterAddrLow
, cmd
);
1619 RTL_W32(tp
, CounterAddrLow
, cmd
| counter_cmd
);
1621 return rtl_udelay_loop_wait_low(tp
, &rtl_counters_cond
, 10, 1000);
1624 static bool rtl8169_reset_counters(struct rtl8169_private
*tp
)
1627 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
1630 if (tp
->mac_version
< RTL_GIGA_MAC_VER_19
)
1633 return rtl8169_do_counters(tp
, CounterReset
);
1636 static bool rtl8169_update_counters(struct rtl8169_private
*tp
)
1638 u8 val
= RTL_R8(tp
, ChipCmd
);
1641 * Some chips are unable to dump tally counters when the receiver
1642 * is disabled. If 0xff chip may be in a PCI power-save state.
1644 if (!(val
& CmdRxEnb
) || val
== 0xff)
1647 return rtl8169_do_counters(tp
, CounterDump
);
1650 static bool rtl8169_init_counter_offsets(struct rtl8169_private
*tp
)
1652 struct rtl8169_counters
*counters
= tp
->counters
;
1656 * rtl8169_init_counter_offsets is called from rtl_open. On chip
1657 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1658 * reset by a power cycle, while the counter values collected by the
1659 * driver are reset at every driver unload/load cycle.
1661 * To make sure the HW values returned by @get_stats64 match the SW
1662 * values, we collect the initial values at first open(*) and use them
1663 * as offsets to normalize the values returned by @get_stats64.
1665 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1666 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1667 * set at open time by rtl_hw_start.
1670 if (tp
->tc_offset
.inited
)
1673 /* If both, reset and update fail, propagate to caller. */
1674 if (rtl8169_reset_counters(tp
))
1677 if (rtl8169_update_counters(tp
))
1680 tp
->tc_offset
.tx_errors
= counters
->tx_errors
;
1681 tp
->tc_offset
.tx_multi_collision
= counters
->tx_multi_collision
;
1682 tp
->tc_offset
.tx_aborted
= counters
->tx_aborted
;
1683 tp
->tc_offset
.inited
= true;
1688 static void rtl8169_get_ethtool_stats(struct net_device
*dev
,
1689 struct ethtool_stats
*stats
, u64
*data
)
1691 struct rtl8169_private
*tp
= netdev_priv(dev
);
1692 struct device
*d
= tp_to_dev(tp
);
1693 struct rtl8169_counters
*counters
= tp
->counters
;
1697 pm_runtime_get_noresume(d
);
1699 if (pm_runtime_active(d
))
1700 rtl8169_update_counters(tp
);
1702 pm_runtime_put_noidle(d
);
1704 data
[0] = le64_to_cpu(counters
->tx_packets
);
1705 data
[1] = le64_to_cpu(counters
->rx_packets
);
1706 data
[2] = le64_to_cpu(counters
->tx_errors
);
1707 data
[3] = le32_to_cpu(counters
->rx_errors
);
1708 data
[4] = le16_to_cpu(counters
->rx_missed
);
1709 data
[5] = le16_to_cpu(counters
->align_errors
);
1710 data
[6] = le32_to_cpu(counters
->tx_one_collision
);
1711 data
[7] = le32_to_cpu(counters
->tx_multi_collision
);
1712 data
[8] = le64_to_cpu(counters
->rx_unicast
);
1713 data
[9] = le64_to_cpu(counters
->rx_broadcast
);
1714 data
[10] = le32_to_cpu(counters
->rx_multicast
);
1715 data
[11] = le16_to_cpu(counters
->tx_aborted
);
1716 data
[12] = le16_to_cpu(counters
->tx_underun
);
1719 static void rtl8169_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1723 memcpy(data
, *rtl8169_gstrings
, sizeof(rtl8169_gstrings
));
1729 * Interrupt coalescing
1731 * > 1 - the availability of the IntrMitigate (0xe2) register through the
1732 * > 8169, 8168 and 810x line of chipsets
1734 * 8169, 8168, and 8136(810x) serial chipsets support it.
1736 * > 2 - the Tx timer unit at gigabit speed
1738 * The unit of the timer depends on both the speed and the setting of CPlusCmd
1739 * (0xe0) bit 1 and bit 0.
1742 * bit[1:0] \ speed 1000M 100M 10M
1743 * 0 0 320ns 2.56us 40.96us
1744 * 0 1 2.56us 20.48us 327.7us
1745 * 1 0 5.12us 40.96us 655.4us
1746 * 1 1 10.24us 81.92us 1.31ms
1749 * bit[1:0] \ speed 1000M 100M 10M
1750 * 0 0 5us 2.56us 40.96us
1751 * 0 1 40us 20.48us 327.7us
1752 * 1 0 80us 40.96us 655.4us
1753 * 1 1 160us 81.92us 1.31ms
1756 /* rx/tx scale factors for one particular CPlusCmd[0:1] value */
1757 struct rtl_coalesce_scale
{
1762 /* rx/tx scale factors for all CPlusCmd[0:1] cases */
1763 struct rtl_coalesce_info
{
1765 struct rtl_coalesce_scale scalev
[4]; /* each CPlusCmd[0:1] case */
1768 /* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
1769 #define rxtx_x1822(r, t) { \
1772 {{(r)*8*2, (t)*8*2}}, \
1773 {{(r)*8*2*2, (t)*8*2*2}}, \
1775 static const struct rtl_coalesce_info rtl_coalesce_info_8169
[] = {
1776 /* speed delays: rx00 tx00 */
1777 { SPEED_10
, rxtx_x1822(40960, 40960) },
1778 { SPEED_100
, rxtx_x1822( 2560, 2560) },
1779 { SPEED_1000
, rxtx_x1822( 320, 320) },
1783 static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136
[] = {
1784 /* speed delays: rx00 tx00 */
1785 { SPEED_10
, rxtx_x1822(40960, 40960) },
1786 { SPEED_100
, rxtx_x1822( 2560, 2560) },
1787 { SPEED_1000
, rxtx_x1822( 5000, 5000) },
1792 /* get rx/tx scale vector corresponding to current speed */
1793 static const struct rtl_coalesce_info
*rtl_coalesce_info(struct net_device
*dev
)
1795 struct rtl8169_private
*tp
= netdev_priv(dev
);
1796 struct ethtool_link_ksettings ecmd
;
1797 const struct rtl_coalesce_info
*ci
;
1800 rc
= phy_ethtool_get_link_ksettings(dev
, &ecmd
);
1804 for (ci
= tp
->coalesce_info
; ci
->speed
!= 0; ci
++) {
1805 if (ecmd
.base
.speed
== ci
->speed
) {
1810 return ERR_PTR(-ELNRNG
);
1813 static int rtl_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
1815 struct rtl8169_private
*tp
= netdev_priv(dev
);
1816 const struct rtl_coalesce_info
*ci
;
1817 const struct rtl_coalesce_scale
*scale
;
1821 } coal_settings
[] = {
1822 { &ec
->rx_max_coalesced_frames
, &ec
->rx_coalesce_usecs
},
1823 { &ec
->tx_max_coalesced_frames
, &ec
->tx_coalesce_usecs
}
1824 }, *p
= coal_settings
;
1828 memset(ec
, 0, sizeof(*ec
));
1830 /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1831 ci
= rtl_coalesce_info(dev
);
1835 scale
= &ci
->scalev
[tp
->cp_cmd
& INTT_MASK
];
1837 /* read IntrMitigate and adjust according to scale */
1838 for (w
= RTL_R16(tp
, IntrMitigate
); w
; w
>>= RTL_COALESCE_SHIFT
, p
++) {
1839 *p
->max_frames
= (w
& RTL_COALESCE_MASK
) << 2;
1840 w
>>= RTL_COALESCE_SHIFT
;
1841 *p
->usecs
= w
& RTL_COALESCE_MASK
;
1844 for (i
= 0; i
< 2; i
++) {
1845 p
= coal_settings
+ i
;
1846 *p
->usecs
= (*p
->usecs
* scale
->nsecs
[i
]) / 1000;
1849 * ethtool_coalesce says it is illegal to set both usecs and
1852 if (!*p
->usecs
&& !*p
->max_frames
)
1859 /* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
1860 static const struct rtl_coalesce_scale
*rtl_coalesce_choose_scale(
1861 struct net_device
*dev
, u32 nsec
, u16
*cp01
)
1863 const struct rtl_coalesce_info
*ci
;
1866 ci
= rtl_coalesce_info(dev
);
1868 return ERR_CAST(ci
);
1870 for (i
= 0; i
< 4; i
++) {
1871 u32 rxtx_maxscale
= max(ci
->scalev
[i
].nsecs
[0],
1872 ci
->scalev
[i
].nsecs
[1]);
1873 if (nsec
<= rxtx_maxscale
* RTL_COALESCE_T_MAX
) {
1875 return &ci
->scalev
[i
];
1879 return ERR_PTR(-EINVAL
);
1882 static int rtl_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
1884 struct rtl8169_private
*tp
= netdev_priv(dev
);
1885 const struct rtl_coalesce_scale
*scale
;
1889 } coal_settings
[] = {
1890 { ec
->rx_max_coalesced_frames
, ec
->rx_coalesce_usecs
},
1891 { ec
->tx_max_coalesced_frames
, ec
->tx_coalesce_usecs
}
1892 }, *p
= coal_settings
;
1896 scale
= rtl_coalesce_choose_scale(dev
,
1897 max(p
[0].usecs
, p
[1].usecs
) * 1000, &cp01
);
1899 return PTR_ERR(scale
);
1901 for (i
= 0; i
< 2; i
++, p
++) {
1905 * accept max_frames=1 we returned in rtl_get_coalesce.
1906 * accept it not only when usecs=0 because of e.g. the following scenario:
1908 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1909 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
1910 * - then user does `ethtool -C eth0 rx-usecs 100`
1912 * since ethtool sends to kernel whole ethtool_coalesce
1913 * settings, if we do not handle rx_usecs=!0, rx_frames=1
1914 * we'll reject it below in `frames % 4 != 0`.
1916 if (p
->frames
== 1) {
1920 units
= p
->usecs
* 1000 / scale
->nsecs
[i
];
1921 if (p
->frames
> RTL_COALESCE_FRAME_MAX
|| p
->frames
% 4)
1924 w
<<= RTL_COALESCE_SHIFT
;
1926 w
<<= RTL_COALESCE_SHIFT
;
1927 w
|= p
->frames
>> 2;
1932 RTL_W16(tp
, IntrMitigate
, swab16(w
));
1934 tp
->cp_cmd
= (tp
->cp_cmd
& ~INTT_MASK
) | cp01
;
1935 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
1936 RTL_R16(tp
, CPlusCmd
);
1938 rtl_unlock_work(tp
);
1943 static int rtl_get_eee_supp(struct rtl8169_private
*tp
)
1945 struct phy_device
*phydev
= tp
->phydev
;
1948 switch (tp
->mac_version
) {
1949 case RTL_GIGA_MAC_VER_34
:
1950 case RTL_GIGA_MAC_VER_35
:
1951 case RTL_GIGA_MAC_VER_36
:
1952 case RTL_GIGA_MAC_VER_38
:
1953 ret
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_PCS_EEE_ABLE
);
1955 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
1956 phy_write(phydev
, 0x1f, 0x0a5c);
1957 ret
= phy_read(phydev
, 0x12);
1958 phy_write(phydev
, 0x1f, 0x0000);
1961 ret
= -EPROTONOSUPPORT
;
1968 static int rtl_get_eee_lpadv(struct rtl8169_private
*tp
)
1970 struct phy_device
*phydev
= tp
->phydev
;
1973 switch (tp
->mac_version
) {
1974 case RTL_GIGA_MAC_VER_34
:
1975 case RTL_GIGA_MAC_VER_35
:
1976 case RTL_GIGA_MAC_VER_36
:
1977 case RTL_GIGA_MAC_VER_38
:
1978 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
);
1980 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
1981 phy_write(phydev
, 0x1f, 0x0a5d);
1982 ret
= phy_read(phydev
, 0x11);
1983 phy_write(phydev
, 0x1f, 0x0000);
1986 ret
= -EPROTONOSUPPORT
;
1993 static int rtl_get_eee_adv(struct rtl8169_private
*tp
)
1995 struct phy_device
*phydev
= tp
->phydev
;
1998 switch (tp
->mac_version
) {
1999 case RTL_GIGA_MAC_VER_34
:
2000 case RTL_GIGA_MAC_VER_35
:
2001 case RTL_GIGA_MAC_VER_36
:
2002 case RTL_GIGA_MAC_VER_38
:
2003 ret
= phy_read_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
);
2005 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
2006 phy_write(phydev
, 0x1f, 0x0a5d);
2007 ret
= phy_read(phydev
, 0x10);
2008 phy_write(phydev
, 0x1f, 0x0000);
2011 ret
= -EPROTONOSUPPORT
;
2018 static int rtl_set_eee_adv(struct rtl8169_private
*tp
, int val
)
2020 struct phy_device
*phydev
= tp
->phydev
;
2023 switch (tp
->mac_version
) {
2024 case RTL_GIGA_MAC_VER_34
:
2025 case RTL_GIGA_MAC_VER_35
:
2026 case RTL_GIGA_MAC_VER_36
:
2027 case RTL_GIGA_MAC_VER_38
:
2028 ret
= phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
2030 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
2031 phy_write(phydev
, 0x1f, 0x0a5d);
2032 phy_write(phydev
, 0x10, val
);
2033 phy_write(phydev
, 0x1f, 0x0000);
2036 ret
= -EPROTONOSUPPORT
;
2043 static int rtl8169_get_eee(struct net_device
*dev
, struct ethtool_eee
*data
)
2045 struct rtl8169_private
*tp
= netdev_priv(dev
);
2046 struct device
*d
= tp_to_dev(tp
);
2049 pm_runtime_get_noresume(d
);
2051 if (!pm_runtime_active(d
)) {
2056 /* Get Supported EEE */
2057 ret
= rtl_get_eee_supp(tp
);
2060 data
->supported
= mmd_eee_cap_to_ethtool_sup_t(ret
);
2062 /* Get advertisement EEE */
2063 ret
= rtl_get_eee_adv(tp
);
2066 data
->advertised
= mmd_eee_adv_to_ethtool_adv_t(ret
);
2067 data
->eee_enabled
= !!data
->advertised
;
2069 /* Get LP advertisement EEE */
2070 ret
= rtl_get_eee_lpadv(tp
);
2073 data
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(ret
);
2074 data
->eee_active
= !!(data
->advertised
& data
->lp_advertised
);
2076 pm_runtime_put_noidle(d
);
2077 return ret
< 0 ? ret
: 0;
2080 static int rtl8169_set_eee(struct net_device
*dev
, struct ethtool_eee
*data
)
2082 struct rtl8169_private
*tp
= netdev_priv(dev
);
2083 struct device
*d
= tp_to_dev(tp
);
2084 int old_adv
, adv
= 0, cap
, ret
;
2086 pm_runtime_get_noresume(d
);
2088 if (!dev
->phydev
|| !pm_runtime_active(d
)) {
2093 if (dev
->phydev
->autoneg
== AUTONEG_DISABLE
||
2094 dev
->phydev
->duplex
!= DUPLEX_FULL
) {
2095 ret
= -EPROTONOSUPPORT
;
2099 /* Get Supported EEE */
2100 ret
= rtl_get_eee_supp(tp
);
2105 ret
= rtl_get_eee_adv(tp
);
2110 if (data
->eee_enabled
) {
2111 adv
= !data
->advertised
? cap
:
2112 ethtool_adv_to_mmd_eee_adv_t(data
->advertised
) & cap
;
2113 /* Mask prohibited EEE modes */
2114 adv
&= ~dev
->phydev
->eee_broken_modes
;
2117 if (old_adv
!= adv
) {
2118 ret
= rtl_set_eee_adv(tp
, adv
);
2122 /* Restart autonegotiation so the new modes get sent to the
2125 ret
= phy_restart_aneg(dev
->phydev
);
2129 pm_runtime_put_noidle(d
);
2130 return ret
< 0 ? ret
: 0;
2133 static const struct ethtool_ops rtl8169_ethtool_ops
= {
2134 .get_drvinfo
= rtl8169_get_drvinfo
,
2135 .get_regs_len
= rtl8169_get_regs_len
,
2136 .get_link
= ethtool_op_get_link
,
2137 .get_coalesce
= rtl_get_coalesce
,
2138 .set_coalesce
= rtl_set_coalesce
,
2139 .get_msglevel
= rtl8169_get_msglevel
,
2140 .set_msglevel
= rtl8169_set_msglevel
,
2141 .get_regs
= rtl8169_get_regs
,
2142 .get_wol
= rtl8169_get_wol
,
2143 .set_wol
= rtl8169_set_wol
,
2144 .get_strings
= rtl8169_get_strings
,
2145 .get_sset_count
= rtl8169_get_sset_count
,
2146 .get_ethtool_stats
= rtl8169_get_ethtool_stats
,
2147 .get_ts_info
= ethtool_op_get_ts_info
,
2148 .nway_reset
= phy_ethtool_nway_reset
,
2149 .get_eee
= rtl8169_get_eee
,
2150 .set_eee
= rtl8169_set_eee
,
2151 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2152 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2155 static void rtl_enable_eee(struct rtl8169_private
*tp
)
2157 int supported
= rtl_get_eee_supp(tp
);
2160 rtl_set_eee_adv(tp
, supported
);
2163 static void rtl8169_get_mac_version(struct rtl8169_private
*tp
)
2166 * The driver currently handles the 8168Bf and the 8168Be identically
2167 * but they can be identified more specifically through the test below
2170 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2172 * Same thing for the 8101Eb and the 8101Ec:
2174 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2176 static const struct rtl_mac_info
{
2181 /* 8168EP family. */
2182 { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51
},
2183 { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50
},
2184 { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49
},
2187 { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46
},
2188 { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45
},
2191 { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44
},
2192 { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42
},
2193 { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41
},
2194 { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40
},
2197 { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38
},
2198 { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36
},
2199 { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35
},
2202 { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34
},
2203 { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32
},
2204 { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33
},
2207 { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25
},
2208 { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26
},
2210 /* 8168DP family. */
2211 { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27
},
2212 { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28
},
2213 { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31
},
2216 { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23
},
2217 { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18
},
2218 { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24
},
2219 { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19
},
2220 { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20
},
2221 { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21
},
2222 { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22
},
2225 { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12
},
2226 { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17
},
2227 { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11
},
2230 { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39
},
2231 { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37
},
2232 { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29
},
2233 { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30
},
2234 { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08
},
2235 { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08
},
2236 { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07
},
2237 { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07
},
2238 { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13
},
2239 { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10
},
2240 { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16
},
2241 { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09
},
2242 { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09
},
2243 { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16
},
2244 /* FIXME: where did these entries come from ? -- FR */
2245 { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15
},
2246 { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14
},
2249 { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06
},
2250 { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05
},
2251 { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04
},
2252 { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03
},
2253 { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02
},
2254 { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01
},
2257 { 0x000, 0x000, RTL_GIGA_MAC_NONE
}
2259 const struct rtl_mac_info
*p
= mac_info
;
2260 u16 reg
= RTL_R32(tp
, TxConfig
) >> 20;
2262 while ((reg
& p
->mask
) != p
->val
)
2264 tp
->mac_version
= p
->mac_version
;
2266 if (tp
->mac_version
== RTL_GIGA_MAC_NONE
) {
2267 dev_err(tp_to_dev(tp
), "unknown chip XID %03x\n", reg
& 0xfcf);
2268 } else if (!tp
->supports_gmii
) {
2269 if (tp
->mac_version
== RTL_GIGA_MAC_VER_42
)
2270 tp
->mac_version
= RTL_GIGA_MAC_VER_43
;
2271 else if (tp
->mac_version
== RTL_GIGA_MAC_VER_45
)
2272 tp
->mac_version
= RTL_GIGA_MAC_VER_47
;
2273 else if (tp
->mac_version
== RTL_GIGA_MAC_VER_46
)
2274 tp
->mac_version
= RTL_GIGA_MAC_VER_48
;
2283 static void __rtl_writephy_batch(struct rtl8169_private
*tp
,
2284 const struct phy_reg
*regs
, int len
)
2287 rtl_writephy(tp
, regs
->reg
, regs
->val
);
2292 #define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
2294 #define PHY_READ 0x00000000
2295 #define PHY_DATA_OR 0x10000000
2296 #define PHY_DATA_AND 0x20000000
2297 #define PHY_BJMPN 0x30000000
2298 #define PHY_MDIO_CHG 0x40000000
2299 #define PHY_CLEAR_READCOUNT 0x70000000
2300 #define PHY_WRITE 0x80000000
2301 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2302 #define PHY_COMP_EQ_SKIPN 0xa0000000
2303 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2304 #define PHY_WRITE_PREVIOUS 0xc0000000
2305 #define PHY_SKIPN 0xd0000000
2306 #define PHY_DELAY_MS 0xe0000000
2310 char version
[RTL_VER_SIZE
];
2316 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2318 static bool rtl_fw_format_ok(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2320 const struct firmware
*fw
= rtl_fw
->fw
;
2321 struct fw_info
*fw_info
= (struct fw_info
*)fw
->data
;
2322 struct rtl_fw_phy_action
*pa
= &rtl_fw
->phy_action
;
2323 char *version
= rtl_fw
->version
;
2326 if (fw
->size
< FW_OPCODE_SIZE
)
2329 if (!fw_info
->magic
) {
2330 size_t i
, size
, start
;
2333 if (fw
->size
< sizeof(*fw_info
))
2336 for (i
= 0; i
< fw
->size
; i
++)
2337 checksum
+= fw
->data
[i
];
2341 start
= le32_to_cpu(fw_info
->fw_start
);
2342 if (start
> fw
->size
)
2345 size
= le32_to_cpu(fw_info
->fw_len
);
2346 if (size
> (fw
->size
- start
) / FW_OPCODE_SIZE
)
2349 memcpy(version
, fw_info
->version
, RTL_VER_SIZE
);
2351 pa
->code
= (__le32
*)(fw
->data
+ start
);
2354 if (fw
->size
% FW_OPCODE_SIZE
)
2357 strlcpy(version
, tp
->fw_name
, RTL_VER_SIZE
);
2359 pa
->code
= (__le32
*)fw
->data
;
2360 pa
->size
= fw
->size
/ FW_OPCODE_SIZE
;
2362 version
[RTL_VER_SIZE
- 1] = 0;
2369 static bool rtl_fw_data_ok(struct rtl8169_private
*tp
, struct net_device
*dev
,
2370 struct rtl_fw_phy_action
*pa
)
2375 for (index
= 0; index
< pa
->size
; index
++) {
2376 u32 action
= le32_to_cpu(pa
->code
[index
]);
2377 u32 regno
= (action
& 0x0fff0000) >> 16;
2379 switch(action
& 0xf0000000) {
2384 case PHY_CLEAR_READCOUNT
:
2386 case PHY_WRITE_PREVIOUS
:
2391 if (regno
> index
) {
2392 netif_err(tp
, ifup
, tp
->dev
,
2393 "Out of range of firmware\n");
2397 case PHY_READCOUNT_EQ_SKIP
:
2398 if (index
+ 2 >= pa
->size
) {
2399 netif_err(tp
, ifup
, tp
->dev
,
2400 "Out of range of firmware\n");
2404 case PHY_COMP_EQ_SKIPN
:
2405 case PHY_COMP_NEQ_SKIPN
:
2407 if (index
+ 1 + regno
>= pa
->size
) {
2408 netif_err(tp
, ifup
, tp
->dev
,
2409 "Out of range of firmware\n");
2415 netif_err(tp
, ifup
, tp
->dev
,
2416 "Invalid action 0x%08x\n", action
);
2425 static int rtl_check_firmware(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2427 struct net_device
*dev
= tp
->dev
;
2430 if (!rtl_fw_format_ok(tp
, rtl_fw
)) {
2431 netif_err(tp
, ifup
, dev
, "invalid firmware\n");
2435 if (rtl_fw_data_ok(tp
, dev
, &rtl_fw
->phy_action
))
2441 static void rtl_phy_write_fw(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2443 struct rtl_fw_phy_action
*pa
= &rtl_fw
->phy_action
;
2444 struct mdio_ops org
, *ops
= &tp
->mdio_ops
;
2448 predata
= count
= 0;
2449 org
.write
= ops
->write
;
2450 org
.read
= ops
->read
;
2452 for (index
= 0; index
< pa
->size
; ) {
2453 u32 action
= le32_to_cpu(pa
->code
[index
]);
2454 u32 data
= action
& 0x0000ffff;
2455 u32 regno
= (action
& 0x0fff0000) >> 16;
2460 switch(action
& 0xf0000000) {
2462 predata
= rtl_readphy(tp
, regno
);
2479 ops
->write
= org
.write
;
2480 ops
->read
= org
.read
;
2481 } else if (data
== 1) {
2482 ops
->write
= mac_mcu_write
;
2483 ops
->read
= mac_mcu_read
;
2488 case PHY_CLEAR_READCOUNT
:
2493 rtl_writephy(tp
, regno
, data
);
2496 case PHY_READCOUNT_EQ_SKIP
:
2497 index
+= (count
== data
) ? 2 : 1;
2499 case PHY_COMP_EQ_SKIPN
:
2500 if (predata
== data
)
2504 case PHY_COMP_NEQ_SKIPN
:
2505 if (predata
!= data
)
2509 case PHY_WRITE_PREVIOUS
:
2510 rtl_writephy(tp
, regno
, predata
);
2526 ops
->write
= org
.write
;
2527 ops
->read
= org
.read
;
2530 static void rtl_release_firmware(struct rtl8169_private
*tp
)
2533 release_firmware(tp
->rtl_fw
->fw
);
2539 static void rtl_apply_firmware(struct rtl8169_private
*tp
)
2541 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2543 rtl_phy_write_fw(tp
, tp
->rtl_fw
);
2546 static void rtl_apply_firmware_cond(struct rtl8169_private
*tp
, u8 reg
, u16 val
)
2548 if (rtl_readphy(tp
, reg
) != val
)
2549 netif_warn(tp
, hw
, tp
->dev
, "chipset not ready for firmware\n");
2551 rtl_apply_firmware(tp
);
2554 static void rtl8168_config_eee_mac(struct rtl8169_private
*tp
)
2556 /* Adjust EEE LED frequency */
2557 if (tp
->mac_version
!= RTL_GIGA_MAC_VER_38
)
2558 RTL_W8(tp
, EEE_LED
, RTL_R8(tp
, EEE_LED
) & ~0x07);
2560 rtl_eri_set_bits(tp
, 0x1b0, ERIAR_MASK_1111
, 0x0003);
2563 static void rtl8168f_config_eee_phy(struct rtl8169_private
*tp
)
2565 struct phy_device
*phydev
= tp
->phydev
;
2567 phy_write(phydev
, 0x1f, 0x0007);
2568 phy_write(phydev
, 0x1e, 0x0020);
2569 phy_set_bits(phydev
, 0x15, BIT(8));
2571 phy_write(phydev
, 0x1f, 0x0005);
2572 phy_write(phydev
, 0x05, 0x8b85);
2573 phy_set_bits(phydev
, 0x06, BIT(13));
2575 phy_write(phydev
, 0x1f, 0x0000);
2578 static void rtl8168g_config_eee_phy(struct rtl8169_private
*tp
)
2580 phy_write(tp
->phydev
, 0x1f, 0x0a43);
2581 phy_set_bits(tp
->phydev
, 0x11, BIT(4));
2582 phy_write(tp
->phydev
, 0x1f, 0x0000);
2585 static void rtl8169s_hw_phy_config(struct rtl8169_private
*tp
)
2587 static const struct phy_reg phy_reg_init
[] = {
2649 rtl_writephy_batch(tp
, phy_reg_init
);
2652 static void rtl8169sb_hw_phy_config(struct rtl8169_private
*tp
)
2654 static const struct phy_reg phy_reg_init
[] = {
2660 rtl_writephy_batch(tp
, phy_reg_init
);
2663 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private
*tp
)
2665 struct pci_dev
*pdev
= tp
->pci_dev
;
2667 if ((pdev
->subsystem_vendor
!= PCI_VENDOR_ID_GIGABYTE
) ||
2668 (pdev
->subsystem_device
!= 0xe000))
2671 rtl_writephy(tp
, 0x1f, 0x0001);
2672 rtl_writephy(tp
, 0x10, 0xf01b);
2673 rtl_writephy(tp
, 0x1f, 0x0000);
2676 static void rtl8169scd_hw_phy_config(struct rtl8169_private
*tp
)
2678 static const struct phy_reg phy_reg_init
[] = {
2718 rtl_writephy_batch(tp
, phy_reg_init
);
2720 rtl8169scd_hw_phy_config_quirk(tp
);
2723 static void rtl8169sce_hw_phy_config(struct rtl8169_private
*tp
)
2725 static const struct phy_reg phy_reg_init
[] = {
2773 rtl_writephy_batch(tp
, phy_reg_init
);
2776 static void rtl8168bb_hw_phy_config(struct rtl8169_private
*tp
)
2778 static const struct phy_reg phy_reg_init
[] = {
2783 rtl_writephy(tp
, 0x1f, 0x0001);
2784 rtl_patchphy(tp
, 0x16, 1 << 0);
2786 rtl_writephy_batch(tp
, phy_reg_init
);
2789 static void rtl8168bef_hw_phy_config(struct rtl8169_private
*tp
)
2791 static const struct phy_reg phy_reg_init
[] = {
2797 rtl_writephy_batch(tp
, phy_reg_init
);
2800 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private
*tp
)
2802 static const struct phy_reg phy_reg_init
[] = {
2810 rtl_writephy_batch(tp
, phy_reg_init
);
2813 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private
*tp
)
2815 static const struct phy_reg phy_reg_init
[] = {
2821 rtl_writephy(tp
, 0x1f, 0x0000);
2822 rtl_patchphy(tp
, 0x14, 1 << 5);
2823 rtl_patchphy(tp
, 0x0d, 1 << 5);
2825 rtl_writephy_batch(tp
, phy_reg_init
);
2828 static void rtl8168c_1_hw_phy_config(struct rtl8169_private
*tp
)
2830 static const struct phy_reg phy_reg_init
[] = {
2850 rtl_writephy_batch(tp
, phy_reg_init
);
2852 rtl_patchphy(tp
, 0x14, 1 << 5);
2853 rtl_patchphy(tp
, 0x0d, 1 << 5);
2854 rtl_writephy(tp
, 0x1f, 0x0000);
2857 static void rtl8168c_2_hw_phy_config(struct rtl8169_private
*tp
)
2859 static const struct phy_reg phy_reg_init
[] = {
2877 rtl_writephy_batch(tp
, phy_reg_init
);
2879 rtl_patchphy(tp
, 0x16, 1 << 0);
2880 rtl_patchphy(tp
, 0x14, 1 << 5);
2881 rtl_patchphy(tp
, 0x0d, 1 << 5);
2882 rtl_writephy(tp
, 0x1f, 0x0000);
2885 static void rtl8168c_3_hw_phy_config(struct rtl8169_private
*tp
)
2887 static const struct phy_reg phy_reg_init
[] = {
2899 rtl_writephy_batch(tp
, phy_reg_init
);
2901 rtl_patchphy(tp
, 0x16, 1 << 0);
2902 rtl_patchphy(tp
, 0x14, 1 << 5);
2903 rtl_patchphy(tp
, 0x0d, 1 << 5);
2904 rtl_writephy(tp
, 0x1f, 0x0000);
2907 static void rtl8168c_4_hw_phy_config(struct rtl8169_private
*tp
)
2909 rtl8168c_3_hw_phy_config(tp
);
2912 static void rtl8168d_1_hw_phy_config(struct rtl8169_private
*tp
)
2914 static const struct phy_reg phy_reg_init_0
[] = {
2915 /* Channel Estimation */
2936 * Enhance line driver power
2945 * Can not link to 1Gbps with bad cable
2946 * Decrease SNR threshold form 21.07dB to 19.04dB
2955 rtl_writephy_batch(tp
, phy_reg_init_0
);
2959 * Fine Tune Switching regulator parameter
2961 rtl_writephy(tp
, 0x1f, 0x0002);
2962 rtl_w0w1_phy(tp
, 0x0b, 0x0010, 0x00ef);
2963 rtl_w0w1_phy(tp
, 0x0c, 0xa200, 0x5d00);
2965 if (rtl8168d_efuse_read(tp
, 0x01) == 0xb1) {
2966 static const struct phy_reg phy_reg_init
[] = {
2976 rtl_writephy_batch(tp
, phy_reg_init
);
2978 val
= rtl_readphy(tp
, 0x0d);
2980 if ((val
& 0x00ff) != 0x006c) {
2981 static const u32 set
[] = {
2982 0x0065, 0x0066, 0x0067, 0x0068,
2983 0x0069, 0x006a, 0x006b, 0x006c
2987 rtl_writephy(tp
, 0x1f, 0x0002);
2990 for (i
= 0; i
< ARRAY_SIZE(set
); i
++)
2991 rtl_writephy(tp
, 0x0d, val
| set
[i
]);
2994 static const struct phy_reg phy_reg_init
[] = {
3002 rtl_writephy_batch(tp
, phy_reg_init
);
3005 /* RSET couple improve */
3006 rtl_writephy(tp
, 0x1f, 0x0002);
3007 rtl_patchphy(tp
, 0x0d, 0x0300);
3008 rtl_patchphy(tp
, 0x0f, 0x0010);
3010 /* Fine tune PLL performance */
3011 rtl_writephy(tp
, 0x1f, 0x0002);
3012 rtl_w0w1_phy(tp
, 0x02, 0x0100, 0x0600);
3013 rtl_w0w1_phy(tp
, 0x03, 0x0000, 0xe000);
3015 rtl_writephy(tp
, 0x1f, 0x0005);
3016 rtl_writephy(tp
, 0x05, 0x001b);
3018 rtl_apply_firmware_cond(tp
, MII_EXPANSION
, 0xbf00);
3020 rtl_writephy(tp
, 0x1f, 0x0000);
3023 static void rtl8168d_2_hw_phy_config(struct rtl8169_private
*tp
)
3025 static const struct phy_reg phy_reg_init_0
[] = {
3026 /* Channel Estimation */
3047 * Enhance line driver power
3056 * Can not link to 1Gbps with bad cable
3057 * Decrease SNR threshold form 21.07dB to 19.04dB
3066 rtl_writephy_batch(tp
, phy_reg_init_0
);
3068 if (rtl8168d_efuse_read(tp
, 0x01) == 0xb1) {
3069 static const struct phy_reg phy_reg_init
[] = {
3080 rtl_writephy_batch(tp
, phy_reg_init
);
3082 val
= rtl_readphy(tp
, 0x0d);
3083 if ((val
& 0x00ff) != 0x006c) {
3084 static const u32 set
[] = {
3085 0x0065, 0x0066, 0x0067, 0x0068,
3086 0x0069, 0x006a, 0x006b, 0x006c
3090 rtl_writephy(tp
, 0x1f, 0x0002);
3093 for (i
= 0; i
< ARRAY_SIZE(set
); i
++)
3094 rtl_writephy(tp
, 0x0d, val
| set
[i
]);
3097 static const struct phy_reg phy_reg_init
[] = {
3105 rtl_writephy_batch(tp
, phy_reg_init
);
3108 /* Fine tune PLL performance */
3109 rtl_writephy(tp
, 0x1f, 0x0002);
3110 rtl_w0w1_phy(tp
, 0x02, 0x0100, 0x0600);
3111 rtl_w0w1_phy(tp
, 0x03, 0x0000, 0xe000);
3113 /* Switching regulator Slew rate */
3114 rtl_writephy(tp
, 0x1f, 0x0002);
3115 rtl_patchphy(tp
, 0x0f, 0x0017);
3117 rtl_writephy(tp
, 0x1f, 0x0005);
3118 rtl_writephy(tp
, 0x05, 0x001b);
3120 rtl_apply_firmware_cond(tp
, MII_EXPANSION
, 0xb300);
3122 rtl_writephy(tp
, 0x1f, 0x0000);
3125 static void rtl8168d_3_hw_phy_config(struct rtl8169_private
*tp
)
3127 static const struct phy_reg phy_reg_init
[] = {
3183 rtl_writephy_batch(tp
, phy_reg_init
);
3186 static void rtl8168d_4_hw_phy_config(struct rtl8169_private
*tp
)
3188 static const struct phy_reg phy_reg_init
[] = {
3198 rtl_writephy_batch(tp
, phy_reg_init
);
3199 rtl_patchphy(tp
, 0x0d, 1 << 5);
3202 static void rtl8168e_1_hw_phy_config(struct rtl8169_private
*tp
)
3204 static const struct phy_reg phy_reg_init
[] = {
3205 /* Enable Delay cap */
3211 /* Channel estimation fine tune */
3220 /* Update PFM & 10M TX idle timer */
3232 rtl_apply_firmware(tp
);
3234 rtl_writephy_batch(tp
, phy_reg_init
);
3236 /* DCO enable for 10M IDLE Power */
3237 rtl_writephy(tp
, 0x1f, 0x0007);
3238 rtl_writephy(tp
, 0x1e, 0x0023);
3239 rtl_w0w1_phy(tp
, 0x17, 0x0006, 0x0000);
3240 rtl_writephy(tp
, 0x1f, 0x0000);
3242 /* For impedance matching */
3243 rtl_writephy(tp
, 0x1f, 0x0002);
3244 rtl_w0w1_phy(tp
, 0x08, 0x8000, 0x7f00);
3245 rtl_writephy(tp
, 0x1f, 0x0000);
3247 /* PHY auto speed down */
3248 rtl_writephy(tp
, 0x1f, 0x0007);
3249 rtl_writephy(tp
, 0x1e, 0x002d);
3250 rtl_w0w1_phy(tp
, 0x18, 0x0050, 0x0000);
3251 rtl_writephy(tp
, 0x1f, 0x0000);
3252 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0x0000);
3254 rtl_writephy(tp
, 0x1f, 0x0005);
3255 rtl_writephy(tp
, 0x05, 0x8b86);
3256 rtl_w0w1_phy(tp
, 0x06, 0x0001, 0x0000);
3257 rtl_writephy(tp
, 0x1f, 0x0000);
3259 rtl_writephy(tp
, 0x1f, 0x0005);
3260 rtl_writephy(tp
, 0x05, 0x8b85);
3261 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x2000);
3262 rtl_writephy(tp
, 0x1f, 0x0007);
3263 rtl_writephy(tp
, 0x1e, 0x0020);
3264 rtl_w0w1_phy(tp
, 0x15, 0x0000, 0x1100);
3265 rtl_writephy(tp
, 0x1f, 0x0006);
3266 rtl_writephy(tp
, 0x00, 0x5a00);
3267 rtl_writephy(tp
, 0x1f, 0x0000);
3268 rtl_writephy(tp
, 0x0d, 0x0007);
3269 rtl_writephy(tp
, 0x0e, 0x003c);
3270 rtl_writephy(tp
, 0x0d, 0x4007);
3271 rtl_writephy(tp
, 0x0e, 0x0000);
3272 rtl_writephy(tp
, 0x0d, 0x0000);
3275 static void rtl_rar_exgmac_set(struct rtl8169_private
*tp
, u8
*addr
)
3278 addr
[0] | (addr
[1] << 8),
3279 addr
[2] | (addr
[3] << 8),
3280 addr
[4] | (addr
[5] << 8)
3283 rtl_eri_write(tp
, 0xe0, ERIAR_MASK_1111
, w
[0] | (w
[1] << 16));
3284 rtl_eri_write(tp
, 0xe4, ERIAR_MASK_1111
, w
[2]);
3285 rtl_eri_write(tp
, 0xf0, ERIAR_MASK_1111
, w
[0] << 16);
3286 rtl_eri_write(tp
, 0xf4, ERIAR_MASK_1111
, w
[1] | (w
[2] << 16));
3289 static void rtl8168e_2_hw_phy_config(struct rtl8169_private
*tp
)
3291 static const struct phy_reg phy_reg_init
[] = {
3292 /* Enable Delay cap */
3301 /* Channel estimation fine tune */
3318 rtl_apply_firmware(tp
);
3320 rtl_writephy_batch(tp
, phy_reg_init
);
3322 /* For 4-corner performance improve */
3323 rtl_writephy(tp
, 0x1f, 0x0005);
3324 rtl_writephy(tp
, 0x05, 0x8b80);
3325 rtl_w0w1_phy(tp
, 0x17, 0x0006, 0x0000);
3326 rtl_writephy(tp
, 0x1f, 0x0000);
3328 /* PHY auto speed down */
3329 rtl_writephy(tp
, 0x1f, 0x0004);
3330 rtl_writephy(tp
, 0x1f, 0x0007);
3331 rtl_writephy(tp
, 0x1e, 0x002d);
3332 rtl_w0w1_phy(tp
, 0x18, 0x0010, 0x0000);
3333 rtl_writephy(tp
, 0x1f, 0x0002);
3334 rtl_writephy(tp
, 0x1f, 0x0000);
3335 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0x0000);
3337 /* improve 10M EEE waveform */
3338 rtl_writephy(tp
, 0x1f, 0x0005);
3339 rtl_writephy(tp
, 0x05, 0x8b86);
3340 rtl_w0w1_phy(tp
, 0x06, 0x0001, 0x0000);
3341 rtl_writephy(tp
, 0x1f, 0x0000);
3343 /* Improve 2-pair detection performance */
3344 rtl_writephy(tp
, 0x1f, 0x0005);
3345 rtl_writephy(tp
, 0x05, 0x8b85);
3346 rtl_w0w1_phy(tp
, 0x06, 0x4000, 0x0000);
3347 rtl_writephy(tp
, 0x1f, 0x0000);
3349 rtl8168f_config_eee_phy(tp
);
3353 rtl_writephy(tp
, 0x1f, 0x0003);
3354 rtl_w0w1_phy(tp
, 0x19, 0x0001, 0x0000);
3355 rtl_w0w1_phy(tp
, 0x10, 0x0400, 0x0000);
3356 rtl_writephy(tp
, 0x1f, 0x0000);
3357 rtl_writephy(tp
, 0x1f, 0x0005);
3358 rtl_w0w1_phy(tp
, 0x01, 0x0100, 0x0000);
3359 rtl_writephy(tp
, 0x1f, 0x0000);
3361 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3362 rtl_rar_exgmac_set(tp
, tp
->dev
->dev_addr
);
3365 static void rtl8168f_hw_phy_config(struct rtl8169_private
*tp
)
3367 /* For 4-corner performance improve */
3368 rtl_writephy(tp
, 0x1f, 0x0005);
3369 rtl_writephy(tp
, 0x05, 0x8b80);
3370 rtl_w0w1_phy(tp
, 0x06, 0x0006, 0x0000);
3371 rtl_writephy(tp
, 0x1f, 0x0000);
3373 /* PHY auto speed down */
3374 rtl_writephy(tp
, 0x1f, 0x0007);
3375 rtl_writephy(tp
, 0x1e, 0x002d);
3376 rtl_w0w1_phy(tp
, 0x18, 0x0010, 0x0000);
3377 rtl_writephy(tp
, 0x1f, 0x0000);
3378 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0x0000);
3380 /* Improve 10M EEE waveform */
3381 rtl_writephy(tp
, 0x1f, 0x0005);
3382 rtl_writephy(tp
, 0x05, 0x8b86);
3383 rtl_w0w1_phy(tp
, 0x06, 0x0001, 0x0000);
3384 rtl_writephy(tp
, 0x1f, 0x0000);
3386 rtl8168f_config_eee_phy(tp
);
3390 static void rtl8168f_1_hw_phy_config(struct rtl8169_private
*tp
)
3392 static const struct phy_reg phy_reg_init
[] = {
3393 /* Channel estimation fine tune */
3398 /* Modify green table for giga & fnet */
3415 /* Modify green table for 10M */
3421 /* Disable hiimpedance detection (RTCT) */
3427 rtl_apply_firmware(tp
);
3429 rtl_writephy_batch(tp
, phy_reg_init
);
3431 rtl8168f_hw_phy_config(tp
);
3433 /* Improve 2-pair detection performance */
3434 rtl_writephy(tp
, 0x1f, 0x0005);
3435 rtl_writephy(tp
, 0x05, 0x8b85);
3436 rtl_w0w1_phy(tp
, 0x06, 0x4000, 0x0000);
3437 rtl_writephy(tp
, 0x1f, 0x0000);
3440 static void rtl8168f_2_hw_phy_config(struct rtl8169_private
*tp
)
3442 rtl_apply_firmware(tp
);
3444 rtl8168f_hw_phy_config(tp
);
3447 static void rtl8411_hw_phy_config(struct rtl8169_private
*tp
)
3449 static const struct phy_reg phy_reg_init
[] = {
3450 /* Channel estimation fine tune */
3455 /* Modify green table for giga & fnet */
3472 /* Modify green table for 10M */
3478 /* Disable hiimpedance detection (RTCT) */
3485 rtl_apply_firmware(tp
);
3487 rtl8168f_hw_phy_config(tp
);
3489 /* Improve 2-pair detection performance */
3490 rtl_writephy(tp
, 0x1f, 0x0005);
3491 rtl_writephy(tp
, 0x05, 0x8b85);
3492 rtl_w0w1_phy(tp
, 0x06, 0x4000, 0x0000);
3493 rtl_writephy(tp
, 0x1f, 0x0000);
3495 rtl_writephy_batch(tp
, phy_reg_init
);
3497 /* Modify green table for giga */
3498 rtl_writephy(tp
, 0x1f, 0x0005);
3499 rtl_writephy(tp
, 0x05, 0x8b54);
3500 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x0800);
3501 rtl_writephy(tp
, 0x05, 0x8b5d);
3502 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x0800);
3503 rtl_writephy(tp
, 0x05, 0x8a7c);
3504 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x0100);
3505 rtl_writephy(tp
, 0x05, 0x8a7f);
3506 rtl_w0w1_phy(tp
, 0x06, 0x0100, 0x0000);
3507 rtl_writephy(tp
, 0x05, 0x8a82);
3508 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x0100);
3509 rtl_writephy(tp
, 0x05, 0x8a85);
3510 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x0100);
3511 rtl_writephy(tp
, 0x05, 0x8a88);
3512 rtl_w0w1_phy(tp
, 0x06, 0x0000, 0x0100);
3513 rtl_writephy(tp
, 0x1f, 0x0000);
3515 /* uc same-seed solution */
3516 rtl_writephy(tp
, 0x1f, 0x0005);
3517 rtl_writephy(tp
, 0x05, 0x8b85);
3518 rtl_w0w1_phy(tp
, 0x06, 0x8000, 0x0000);
3519 rtl_writephy(tp
, 0x1f, 0x0000);
3522 rtl_writephy(tp
, 0x1f, 0x0003);
3523 rtl_w0w1_phy(tp
, 0x19, 0x0000, 0x0001);
3524 rtl_w0w1_phy(tp
, 0x10, 0x0000, 0x0400);
3525 rtl_writephy(tp
, 0x1f, 0x0000);
3528 static void rtl8168g_disable_aldps(struct rtl8169_private
*tp
)
3530 phy_write(tp
->phydev
, 0x1f, 0x0a43);
3531 phy_clear_bits(tp
->phydev
, 0x10, BIT(2));
3534 static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private
*tp
)
3536 struct phy_device
*phydev
= tp
->phydev
;
3538 phy_write(phydev
, 0x1f, 0x0bcc);
3539 phy_clear_bits(phydev
, 0x14, BIT(8));
3541 phy_write(phydev
, 0x1f, 0x0a44);
3542 phy_set_bits(phydev
, 0x11, BIT(7) | BIT(6));
3544 phy_write(phydev
, 0x1f, 0x0a43);
3545 phy_write(phydev
, 0x13, 0x8084);
3546 phy_clear_bits(phydev
, 0x14, BIT(14) | BIT(13));
3547 phy_set_bits(phydev
, 0x10, BIT(12) | BIT(1) | BIT(0));
3549 phy_write(phydev
, 0x1f, 0x0000);
3552 static void rtl8168g_1_hw_phy_config(struct rtl8169_private
*tp
)
3554 rtl_apply_firmware(tp
);
3556 rtl_writephy(tp
, 0x1f, 0x0a46);
3557 if (rtl_readphy(tp
, 0x10) & 0x0100) {
3558 rtl_writephy(tp
, 0x1f, 0x0bcc);
3559 rtl_w0w1_phy(tp
, 0x12, 0x0000, 0x8000);
3561 rtl_writephy(tp
, 0x1f, 0x0bcc);
3562 rtl_w0w1_phy(tp
, 0x12, 0x8000, 0x0000);
3565 rtl_writephy(tp
, 0x1f, 0x0a46);
3566 if (rtl_readphy(tp
, 0x13) & 0x0100) {
3567 rtl_writephy(tp
, 0x1f, 0x0c41);
3568 rtl_w0w1_phy(tp
, 0x15, 0x0002, 0x0000);
3570 rtl_writephy(tp
, 0x1f, 0x0c41);
3571 rtl_w0w1_phy(tp
, 0x15, 0x0000, 0x0002);
3574 /* Enable PHY auto speed down */
3575 rtl_writephy(tp
, 0x1f, 0x0a44);
3576 rtl_w0w1_phy(tp
, 0x11, 0x000c, 0x0000);
3578 rtl8168g_phy_adjust_10m_aldps(tp
);
3580 /* EEE auto-fallback function */
3581 rtl_writephy(tp
, 0x1f, 0x0a4b);
3582 rtl_w0w1_phy(tp
, 0x11, 0x0004, 0x0000);
3584 /* Enable UC LPF tune function */
3585 rtl_writephy(tp
, 0x1f, 0x0a43);
3586 rtl_writephy(tp
, 0x13, 0x8012);
3587 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0x0000);
3589 rtl_writephy(tp
, 0x1f, 0x0c42);
3590 rtl_w0w1_phy(tp
, 0x11, 0x4000, 0x2000);
3592 /* Improve SWR Efficiency */
3593 rtl_writephy(tp
, 0x1f, 0x0bcd);
3594 rtl_writephy(tp
, 0x14, 0x5065);
3595 rtl_writephy(tp
, 0x14, 0xd065);
3596 rtl_writephy(tp
, 0x1f, 0x0bc8);
3597 rtl_writephy(tp
, 0x11, 0x5655);
3598 rtl_writephy(tp
, 0x1f, 0x0bcd);
3599 rtl_writephy(tp
, 0x14, 0x1065);
3600 rtl_writephy(tp
, 0x14, 0x9065);
3601 rtl_writephy(tp
, 0x14, 0x1065);
3603 rtl8168g_disable_aldps(tp
);
3604 rtl8168g_config_eee_phy(tp
);
3608 static void rtl8168g_2_hw_phy_config(struct rtl8169_private
*tp
)
3610 rtl_apply_firmware(tp
);
3611 rtl8168g_config_eee_phy(tp
);
3615 static void rtl8168h_1_hw_phy_config(struct rtl8169_private
*tp
)
3620 rtl_apply_firmware(tp
);
3622 /* CHN EST parameters adjust - giga master */
3623 rtl_writephy(tp
, 0x1f, 0x0a43);
3624 rtl_writephy(tp
, 0x13, 0x809b);
3625 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0xf800);
3626 rtl_writephy(tp
, 0x13, 0x80a2);
3627 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0xff00);
3628 rtl_writephy(tp
, 0x13, 0x80a4);
3629 rtl_w0w1_phy(tp
, 0x14, 0x8500, 0xff00);
3630 rtl_writephy(tp
, 0x13, 0x809c);
3631 rtl_w0w1_phy(tp
, 0x14, 0xbd00, 0xff00);
3632 rtl_writephy(tp
, 0x1f, 0x0000);
3634 /* CHN EST parameters adjust - giga slave */
3635 rtl_writephy(tp
, 0x1f, 0x0a43);
3636 rtl_writephy(tp
, 0x13, 0x80ad);
3637 rtl_w0w1_phy(tp
, 0x14, 0x7000, 0xf800);
3638 rtl_writephy(tp
, 0x13, 0x80b4);
3639 rtl_w0w1_phy(tp
, 0x14, 0x5000, 0xff00);
3640 rtl_writephy(tp
, 0x13, 0x80ac);
3641 rtl_w0w1_phy(tp
, 0x14, 0x4000, 0xff00);
3642 rtl_writephy(tp
, 0x1f, 0x0000);
3644 /* CHN EST parameters adjust - fnet */
3645 rtl_writephy(tp
, 0x1f, 0x0a43);
3646 rtl_writephy(tp
, 0x13, 0x808e);
3647 rtl_w0w1_phy(tp
, 0x14, 0x1200, 0xff00);
3648 rtl_writephy(tp
, 0x13, 0x8090);
3649 rtl_w0w1_phy(tp
, 0x14, 0xe500, 0xff00);
3650 rtl_writephy(tp
, 0x13, 0x8092);
3651 rtl_w0w1_phy(tp
, 0x14, 0x9f00, 0xff00);
3652 rtl_writephy(tp
, 0x1f, 0x0000);
3654 /* enable R-tune & PGA-retune function */
3656 rtl_writephy(tp
, 0x1f, 0x0a46);
3657 data
= rtl_readphy(tp
, 0x13);
3660 dout_tapbin
|= data
;
3661 data
= rtl_readphy(tp
, 0x12);
3664 dout_tapbin
|= data
;
3665 dout_tapbin
= ~(dout_tapbin
^0x08);
3667 dout_tapbin
&= 0xf000;
3668 rtl_writephy(tp
, 0x1f, 0x0a43);
3669 rtl_writephy(tp
, 0x13, 0x827a);
3670 rtl_w0w1_phy(tp
, 0x14, dout_tapbin
, 0xf000);
3671 rtl_writephy(tp
, 0x13, 0x827b);
3672 rtl_w0w1_phy(tp
, 0x14, dout_tapbin
, 0xf000);
3673 rtl_writephy(tp
, 0x13, 0x827c);
3674 rtl_w0w1_phy(tp
, 0x14, dout_tapbin
, 0xf000);
3675 rtl_writephy(tp
, 0x13, 0x827d);
3676 rtl_w0w1_phy(tp
, 0x14, dout_tapbin
, 0xf000);
3678 rtl_writephy(tp
, 0x1f, 0x0a43);
3679 rtl_writephy(tp
, 0x13, 0x0811);
3680 rtl_w0w1_phy(tp
, 0x14, 0x0800, 0x0000);
3681 rtl_writephy(tp
, 0x1f, 0x0a42);
3682 rtl_w0w1_phy(tp
, 0x16, 0x0002, 0x0000);
3683 rtl_writephy(tp
, 0x1f, 0x0000);
3685 /* enable GPHY 10M */
3686 rtl_writephy(tp
, 0x1f, 0x0a44);
3687 rtl_w0w1_phy(tp
, 0x11, 0x0800, 0x0000);
3688 rtl_writephy(tp
, 0x1f, 0x0000);
3690 /* SAR ADC performance */
3691 rtl_writephy(tp
, 0x1f, 0x0bca);
3692 rtl_w0w1_phy(tp
, 0x17, 0x4000, 0x3000);
3693 rtl_writephy(tp
, 0x1f, 0x0000);
3695 rtl_writephy(tp
, 0x1f, 0x0a43);
3696 rtl_writephy(tp
, 0x13, 0x803f);
3697 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3698 rtl_writephy(tp
, 0x13, 0x8047);
3699 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3700 rtl_writephy(tp
, 0x13, 0x804f);
3701 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3702 rtl_writephy(tp
, 0x13, 0x8057);
3703 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3704 rtl_writephy(tp
, 0x13, 0x805f);
3705 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3706 rtl_writephy(tp
, 0x13, 0x8067);
3707 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3708 rtl_writephy(tp
, 0x13, 0x806f);
3709 rtl_w0w1_phy(tp
, 0x14, 0x0000, 0x3000);
3710 rtl_writephy(tp
, 0x1f, 0x0000);
3712 /* disable phy pfm mode */
3713 rtl_writephy(tp
, 0x1f, 0x0a44);
3714 rtl_w0w1_phy(tp
, 0x11, 0x0000, 0x0080);
3715 rtl_writephy(tp
, 0x1f, 0x0000);
3717 rtl8168g_disable_aldps(tp
);
3718 rtl8168g_config_eee_phy(tp
);
3722 static void rtl8168h_2_hw_phy_config(struct rtl8169_private
*tp
)
3724 u16 ioffset_p3
, ioffset_p2
, ioffset_p1
, ioffset_p0
;
3728 rtl_apply_firmware(tp
);
3730 /* CHIN EST parameter update */
3731 rtl_writephy(tp
, 0x1f, 0x0a43);
3732 rtl_writephy(tp
, 0x13, 0x808a);
3733 rtl_w0w1_phy(tp
, 0x14, 0x000a, 0x003f);
3734 rtl_writephy(tp
, 0x1f, 0x0000);
3736 /* enable R-tune & PGA-retune function */
3737 rtl_writephy(tp
, 0x1f, 0x0a43);
3738 rtl_writephy(tp
, 0x13, 0x0811);
3739 rtl_w0w1_phy(tp
, 0x14, 0x0800, 0x0000);
3740 rtl_writephy(tp
, 0x1f, 0x0a42);
3741 rtl_w0w1_phy(tp
, 0x16, 0x0002, 0x0000);
3742 rtl_writephy(tp
, 0x1f, 0x0000);
3744 /* enable GPHY 10M */
3745 rtl_writephy(tp
, 0x1f, 0x0a44);
3746 rtl_w0w1_phy(tp
, 0x11, 0x0800, 0x0000);
3747 rtl_writephy(tp
, 0x1f, 0x0000);
3749 r8168_mac_ocp_write(tp
, 0xdd02, 0x807d);
3750 data
= r8168_mac_ocp_read(tp
, 0xdd02);
3751 ioffset_p3
= ((data
& 0x80)>>7);
3754 data
= r8168_mac_ocp_read(tp
, 0xdd00);
3755 ioffset_p3
|= ((data
& (0xe000))>>13);
3756 ioffset_p2
= ((data
& (0x1e00))>>9);
3757 ioffset_p1
= ((data
& (0x01e0))>>5);
3758 ioffset_p0
= ((data
& 0x0010)>>4);
3760 ioffset_p0
|= (data
& (0x07));
3761 data
= (ioffset_p3
<<12)|(ioffset_p2
<<8)|(ioffset_p1
<<4)|(ioffset_p0
);
3763 if ((ioffset_p3
!= 0x0f) || (ioffset_p2
!= 0x0f) ||
3764 (ioffset_p1
!= 0x0f) || (ioffset_p0
!= 0x0f)) {
3765 rtl_writephy(tp
, 0x1f, 0x0bcf);
3766 rtl_writephy(tp
, 0x16, data
);
3767 rtl_writephy(tp
, 0x1f, 0x0000);
3770 /* Modify rlen (TX LPF corner frequency) level */
3771 rtl_writephy(tp
, 0x1f, 0x0bcd);
3772 data
= rtl_readphy(tp
, 0x16);
3777 data
= rlen
| (rlen
<<4) | (rlen
<<8) | (rlen
<<12);
3778 rtl_writephy(tp
, 0x17, data
);
3779 rtl_writephy(tp
, 0x1f, 0x0bcd);
3780 rtl_writephy(tp
, 0x1f, 0x0000);
3782 /* disable phy pfm mode */
3783 rtl_writephy(tp
, 0x1f, 0x0a44);
3784 rtl_w0w1_phy(tp
, 0x11, 0x0000, 0x0080);
3785 rtl_writephy(tp
, 0x1f, 0x0000);
3787 rtl8168g_disable_aldps(tp
);
3788 rtl8168g_config_eee_phy(tp
);
3792 static void rtl8168ep_1_hw_phy_config(struct rtl8169_private
*tp
)
3794 /* Enable PHY auto speed down */
3795 rtl_writephy(tp
, 0x1f, 0x0a44);
3796 rtl_w0w1_phy(tp
, 0x11, 0x000c, 0x0000);
3797 rtl_writephy(tp
, 0x1f, 0x0000);
3799 rtl8168g_phy_adjust_10m_aldps(tp
);
3801 /* Enable EEE auto-fallback function */
3802 rtl_writephy(tp
, 0x1f, 0x0a4b);
3803 rtl_w0w1_phy(tp
, 0x11, 0x0004, 0x0000);
3804 rtl_writephy(tp
, 0x1f, 0x0000);
3806 /* Enable UC LPF tune function */
3807 rtl_writephy(tp
, 0x1f, 0x0a43);
3808 rtl_writephy(tp
, 0x13, 0x8012);
3809 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0x0000);
3810 rtl_writephy(tp
, 0x1f, 0x0000);
3812 /* set rg_sel_sdm_rate */
3813 rtl_writephy(tp
, 0x1f, 0x0c42);
3814 rtl_w0w1_phy(tp
, 0x11, 0x4000, 0x2000);
3815 rtl_writephy(tp
, 0x1f, 0x0000);
3817 rtl8168g_disable_aldps(tp
);
3818 rtl8168g_config_eee_phy(tp
);
3822 static void rtl8168ep_2_hw_phy_config(struct rtl8169_private
*tp
)
3824 rtl8168g_phy_adjust_10m_aldps(tp
);
3826 /* Enable UC LPF tune function */
3827 rtl_writephy(tp
, 0x1f, 0x0a43);
3828 rtl_writephy(tp
, 0x13, 0x8012);
3829 rtl_w0w1_phy(tp
, 0x14, 0x8000, 0x0000);
3830 rtl_writephy(tp
, 0x1f, 0x0000);
3832 /* Set rg_sel_sdm_rate */
3833 rtl_writephy(tp
, 0x1f, 0x0c42);
3834 rtl_w0w1_phy(tp
, 0x11, 0x4000, 0x2000);
3835 rtl_writephy(tp
, 0x1f, 0x0000);
3837 /* Channel estimation parameters */
3838 rtl_writephy(tp
, 0x1f, 0x0a43);
3839 rtl_writephy(tp
, 0x13, 0x80f3);
3840 rtl_w0w1_phy(tp
, 0x14, 0x8b00, ~0x8bff);
3841 rtl_writephy(tp
, 0x13, 0x80f0);
3842 rtl_w0w1_phy(tp
, 0x14, 0x3a00, ~0x3aff);
3843 rtl_writephy(tp
, 0x13, 0x80ef);
3844 rtl_w0w1_phy(tp
, 0x14, 0x0500, ~0x05ff);
3845 rtl_writephy(tp
, 0x13, 0x80f6);
3846 rtl_w0w1_phy(tp
, 0x14, 0x6e00, ~0x6eff);
3847 rtl_writephy(tp
, 0x13, 0x80ec);
3848 rtl_w0w1_phy(tp
, 0x14, 0x6800, ~0x68ff);
3849 rtl_writephy(tp
, 0x13, 0x80ed);
3850 rtl_w0w1_phy(tp
, 0x14, 0x7c00, ~0x7cff);
3851 rtl_writephy(tp
, 0x13, 0x80f2);
3852 rtl_w0w1_phy(tp
, 0x14, 0xf400, ~0xf4ff);
3853 rtl_writephy(tp
, 0x13, 0x80f4);
3854 rtl_w0w1_phy(tp
, 0x14, 0x8500, ~0x85ff);
3855 rtl_writephy(tp
, 0x1f, 0x0a43);
3856 rtl_writephy(tp
, 0x13, 0x8110);
3857 rtl_w0w1_phy(tp
, 0x14, 0xa800, ~0xa8ff);
3858 rtl_writephy(tp
, 0x13, 0x810f);
3859 rtl_w0w1_phy(tp
, 0x14, 0x1d00, ~0x1dff);
3860 rtl_writephy(tp
, 0x13, 0x8111);
3861 rtl_w0w1_phy(tp
, 0x14, 0xf500, ~0xf5ff);
3862 rtl_writephy(tp
, 0x13, 0x8113);
3863 rtl_w0w1_phy(tp
, 0x14, 0x6100, ~0x61ff);
3864 rtl_writephy(tp
, 0x13, 0x8115);
3865 rtl_w0w1_phy(tp
, 0x14, 0x9200, ~0x92ff);
3866 rtl_writephy(tp
, 0x13, 0x810e);
3867 rtl_w0w1_phy(tp
, 0x14, 0x0400, ~0x04ff);
3868 rtl_writephy(tp
, 0x13, 0x810c);
3869 rtl_w0w1_phy(tp
, 0x14, 0x7c00, ~0x7cff);
3870 rtl_writephy(tp
, 0x13, 0x810b);
3871 rtl_w0w1_phy(tp
, 0x14, 0x5a00, ~0x5aff);
3872 rtl_writephy(tp
, 0x1f, 0x0a43);
3873 rtl_writephy(tp
, 0x13, 0x80d1);
3874 rtl_w0w1_phy(tp
, 0x14, 0xff00, ~0xffff);
3875 rtl_writephy(tp
, 0x13, 0x80cd);
3876 rtl_w0w1_phy(tp
, 0x14, 0x9e00, ~0x9eff);
3877 rtl_writephy(tp
, 0x13, 0x80d3);
3878 rtl_w0w1_phy(tp
, 0x14, 0x0e00, ~0x0eff);
3879 rtl_writephy(tp
, 0x13, 0x80d5);
3880 rtl_w0w1_phy(tp
, 0x14, 0xca00, ~0xcaff);
3881 rtl_writephy(tp
, 0x13, 0x80d7);
3882 rtl_w0w1_phy(tp
, 0x14, 0x8400, ~0x84ff);
3884 /* Force PWM-mode */
3885 rtl_writephy(tp
, 0x1f, 0x0bcd);
3886 rtl_writephy(tp
, 0x14, 0x5065);
3887 rtl_writephy(tp
, 0x14, 0xd065);
3888 rtl_writephy(tp
, 0x1f, 0x0bc8);
3889 rtl_writephy(tp
, 0x12, 0x00ed);
3890 rtl_writephy(tp
, 0x1f, 0x0bcd);
3891 rtl_writephy(tp
, 0x14, 0x1065);
3892 rtl_writephy(tp
, 0x14, 0x9065);
3893 rtl_writephy(tp
, 0x14, 0x1065);
3894 rtl_writephy(tp
, 0x1f, 0x0000);
3896 rtl8168g_disable_aldps(tp
);
3897 rtl8168g_config_eee_phy(tp
);
3901 static void rtl8102e_hw_phy_config(struct rtl8169_private
*tp
)
3903 static const struct phy_reg phy_reg_init
[] = {
3910 rtl_writephy(tp
, 0x1f, 0x0000);
3911 rtl_patchphy(tp
, 0x11, 1 << 12);
3912 rtl_patchphy(tp
, 0x19, 1 << 13);
3913 rtl_patchphy(tp
, 0x10, 1 << 15);
3915 rtl_writephy_batch(tp
, phy_reg_init
);
3918 static void rtl8105e_hw_phy_config(struct rtl8169_private
*tp
)
3920 static const struct phy_reg phy_reg_init
[] = {
3934 /* Disable ALDPS before ram code */
3935 rtl_writephy(tp
, 0x1f, 0x0000);
3936 rtl_writephy(tp
, 0x18, 0x0310);
3939 rtl_apply_firmware(tp
);
3941 rtl_writephy_batch(tp
, phy_reg_init
);
3944 static void rtl8402_hw_phy_config(struct rtl8169_private
*tp
)
3946 /* Disable ALDPS before setting firmware */
3947 rtl_writephy(tp
, 0x1f, 0x0000);
3948 rtl_writephy(tp
, 0x18, 0x0310);
3951 rtl_apply_firmware(tp
);
3954 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000);
3955 rtl_writephy(tp
, 0x1f, 0x0004);
3956 rtl_writephy(tp
, 0x10, 0x401f);
3957 rtl_writephy(tp
, 0x19, 0x7030);
3958 rtl_writephy(tp
, 0x1f, 0x0000);
3961 static void rtl8106e_hw_phy_config(struct rtl8169_private
*tp
)
3963 static const struct phy_reg phy_reg_init
[] = {
3970 /* Disable ALDPS before ram code */
3971 rtl_writephy(tp
, 0x1f, 0x0000);
3972 rtl_writephy(tp
, 0x18, 0x0310);
3975 rtl_apply_firmware(tp
);
3977 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000);
3978 rtl_writephy_batch(tp
, phy_reg_init
);
3980 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000);
3983 static void rtl_hw_phy_config(struct net_device
*dev
)
3985 static const rtl_generic_fct phy_configs
[] = {
3987 [RTL_GIGA_MAC_VER_01
] = NULL
,
3988 [RTL_GIGA_MAC_VER_02
] = rtl8169s_hw_phy_config
,
3989 [RTL_GIGA_MAC_VER_03
] = rtl8169s_hw_phy_config
,
3990 [RTL_GIGA_MAC_VER_04
] = rtl8169sb_hw_phy_config
,
3991 [RTL_GIGA_MAC_VER_05
] = rtl8169scd_hw_phy_config
,
3992 [RTL_GIGA_MAC_VER_06
] = rtl8169sce_hw_phy_config
,
3993 /* PCI-E devices. */
3994 [RTL_GIGA_MAC_VER_07
] = rtl8102e_hw_phy_config
,
3995 [RTL_GIGA_MAC_VER_08
] = rtl8102e_hw_phy_config
,
3996 [RTL_GIGA_MAC_VER_09
] = rtl8102e_hw_phy_config
,
3997 [RTL_GIGA_MAC_VER_10
] = NULL
,
3998 [RTL_GIGA_MAC_VER_11
] = rtl8168bb_hw_phy_config
,
3999 [RTL_GIGA_MAC_VER_12
] = rtl8168bef_hw_phy_config
,
4000 [RTL_GIGA_MAC_VER_13
] = NULL
,
4001 [RTL_GIGA_MAC_VER_14
] = NULL
,
4002 [RTL_GIGA_MAC_VER_15
] = NULL
,
4003 [RTL_GIGA_MAC_VER_16
] = NULL
,
4004 [RTL_GIGA_MAC_VER_17
] = rtl8168bef_hw_phy_config
,
4005 [RTL_GIGA_MAC_VER_18
] = rtl8168cp_1_hw_phy_config
,
4006 [RTL_GIGA_MAC_VER_19
] = rtl8168c_1_hw_phy_config
,
4007 [RTL_GIGA_MAC_VER_20
] = rtl8168c_2_hw_phy_config
,
4008 [RTL_GIGA_MAC_VER_21
] = rtl8168c_3_hw_phy_config
,
4009 [RTL_GIGA_MAC_VER_22
] = rtl8168c_4_hw_phy_config
,
4010 [RTL_GIGA_MAC_VER_23
] = rtl8168cp_2_hw_phy_config
,
4011 [RTL_GIGA_MAC_VER_24
] = rtl8168cp_2_hw_phy_config
,
4012 [RTL_GIGA_MAC_VER_25
] = rtl8168d_1_hw_phy_config
,
4013 [RTL_GIGA_MAC_VER_26
] = rtl8168d_2_hw_phy_config
,
4014 [RTL_GIGA_MAC_VER_27
] = rtl8168d_3_hw_phy_config
,
4015 [RTL_GIGA_MAC_VER_28
] = rtl8168d_4_hw_phy_config
,
4016 [RTL_GIGA_MAC_VER_29
] = rtl8105e_hw_phy_config
,
4017 [RTL_GIGA_MAC_VER_30
] = rtl8105e_hw_phy_config
,
4018 [RTL_GIGA_MAC_VER_31
] = NULL
,
4019 [RTL_GIGA_MAC_VER_32
] = rtl8168e_1_hw_phy_config
,
4020 [RTL_GIGA_MAC_VER_33
] = rtl8168e_1_hw_phy_config
,
4021 [RTL_GIGA_MAC_VER_34
] = rtl8168e_2_hw_phy_config
,
4022 [RTL_GIGA_MAC_VER_35
] = rtl8168f_1_hw_phy_config
,
4023 [RTL_GIGA_MAC_VER_36
] = rtl8168f_2_hw_phy_config
,
4024 [RTL_GIGA_MAC_VER_37
] = rtl8402_hw_phy_config
,
4025 [RTL_GIGA_MAC_VER_38
] = rtl8411_hw_phy_config
,
4026 [RTL_GIGA_MAC_VER_39
] = rtl8106e_hw_phy_config
,
4027 [RTL_GIGA_MAC_VER_40
] = rtl8168g_1_hw_phy_config
,
4028 [RTL_GIGA_MAC_VER_41
] = NULL
,
4029 [RTL_GIGA_MAC_VER_42
] = rtl8168g_2_hw_phy_config
,
4030 [RTL_GIGA_MAC_VER_43
] = rtl8168g_2_hw_phy_config
,
4031 [RTL_GIGA_MAC_VER_44
] = rtl8168g_2_hw_phy_config
,
4032 [RTL_GIGA_MAC_VER_45
] = rtl8168h_1_hw_phy_config
,
4033 [RTL_GIGA_MAC_VER_46
] = rtl8168h_2_hw_phy_config
,
4034 [RTL_GIGA_MAC_VER_47
] = rtl8168h_1_hw_phy_config
,
4035 [RTL_GIGA_MAC_VER_48
] = rtl8168h_2_hw_phy_config
,
4036 [RTL_GIGA_MAC_VER_49
] = rtl8168ep_1_hw_phy_config
,
4037 [RTL_GIGA_MAC_VER_50
] = rtl8168ep_2_hw_phy_config
,
4038 [RTL_GIGA_MAC_VER_51
] = rtl8168ep_2_hw_phy_config
,
4040 struct rtl8169_private
*tp
= netdev_priv(dev
);
4042 if (phy_configs
[tp
->mac_version
])
4043 phy_configs
[tp
->mac_version
](tp
);
4046 static void rtl_schedule_task(struct rtl8169_private
*tp
, enum rtl_flag flag
)
4048 if (!test_and_set_bit(flag
, tp
->wk
.flags
))
4049 schedule_work(&tp
->wk
.work
);
4052 static bool rtl_tbi_enabled(struct rtl8169_private
*tp
)
4054 return (tp
->mac_version
== RTL_GIGA_MAC_VER_01
) &&
4055 (RTL_R8(tp
, PHYstatus
) & TBI_Enable
);
4058 static void rtl8169_init_phy(struct net_device
*dev
, struct rtl8169_private
*tp
)
4060 rtl_hw_phy_config(dev
);
4062 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
) {
4063 pci_write_config_byte(tp
->pci_dev
, PCI_LATENCY_TIMER
, 0x40);
4064 pci_write_config_byte(tp
->pci_dev
, PCI_CACHE_LINE_SIZE
, 0x08);
4065 netif_dbg(tp
, drv
, dev
,
4066 "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4067 RTL_W8(tp
, 0x82, 0x01);
4070 /* We may have called phy_speed_down before */
4071 phy_speed_up(tp
->phydev
);
4073 genphy_soft_reset(tp
->phydev
);
4076 static void rtl_rar_set(struct rtl8169_private
*tp
, u8
*addr
)
4080 rtl_unlock_config_regs(tp
);
4082 RTL_W32(tp
, MAC4
, addr
[4] | addr
[5] << 8);
4085 RTL_W32(tp
, MAC0
, addr
[0] | addr
[1] << 8 | addr
[2] << 16 | addr
[3] << 24);
4088 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
)
4089 rtl_rar_exgmac_set(tp
, addr
);
4091 rtl_lock_config_regs(tp
);
4093 rtl_unlock_work(tp
);
4096 static int rtl_set_mac_address(struct net_device
*dev
, void *p
)
4098 struct rtl8169_private
*tp
= netdev_priv(dev
);
4099 struct device
*d
= tp_to_dev(tp
);
4102 ret
= eth_mac_addr(dev
, p
);
4106 pm_runtime_get_noresume(d
);
4108 if (pm_runtime_active(d
))
4109 rtl_rar_set(tp
, dev
->dev_addr
);
4111 pm_runtime_put_noidle(d
);
4116 static int rtl8169_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
4118 struct rtl8169_private
*tp
= netdev_priv(dev
);
4120 if (!netif_running(dev
))
4123 return phy_mii_ioctl(tp
->phydev
, ifr
, cmd
);
4126 static void rtl_init_mdio_ops(struct rtl8169_private
*tp
)
4128 struct mdio_ops
*ops
= &tp
->mdio_ops
;
4130 switch (tp
->mac_version
) {
4131 case RTL_GIGA_MAC_VER_27
:
4132 ops
->write
= r8168dp_1_mdio_write
;
4133 ops
->read
= r8168dp_1_mdio_read
;
4135 case RTL_GIGA_MAC_VER_28
:
4136 case RTL_GIGA_MAC_VER_31
:
4137 ops
->write
= r8168dp_2_mdio_write
;
4138 ops
->read
= r8168dp_2_mdio_read
;
4140 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
4141 ops
->write
= r8168g_mdio_write
;
4142 ops
->read
= r8168g_mdio_read
;
4145 ops
->write
= r8169_mdio_write
;
4146 ops
->read
= r8169_mdio_read
;
4151 static void rtl_wol_suspend_quirk(struct rtl8169_private
*tp
)
4153 switch (tp
->mac_version
) {
4154 case RTL_GIGA_MAC_VER_25
:
4155 case RTL_GIGA_MAC_VER_26
:
4156 case RTL_GIGA_MAC_VER_29
:
4157 case RTL_GIGA_MAC_VER_30
:
4158 case RTL_GIGA_MAC_VER_32
:
4159 case RTL_GIGA_MAC_VER_33
:
4160 case RTL_GIGA_MAC_VER_34
:
4161 case RTL_GIGA_MAC_VER_37
... RTL_GIGA_MAC_VER_51
:
4162 RTL_W32(tp
, RxConfig
, RTL_R32(tp
, RxConfig
) |
4163 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
);
4170 static void r8168_pll_power_down(struct rtl8169_private
*tp
)
4172 if (r8168_check_dash(tp
))
4175 if (tp
->mac_version
== RTL_GIGA_MAC_VER_32
||
4176 tp
->mac_version
== RTL_GIGA_MAC_VER_33
)
4177 rtl_ephy_write(tp
, 0x19, 0xff64);
4179 if (device_may_wakeup(tp_to_dev(tp
))) {
4180 phy_speed_down(tp
->phydev
, false);
4181 rtl_wol_suspend_quirk(tp
);
4185 switch (tp
->mac_version
) {
4186 case RTL_GIGA_MAC_VER_25
... RTL_GIGA_MAC_VER_33
:
4187 case RTL_GIGA_MAC_VER_37
:
4188 case RTL_GIGA_MAC_VER_39
:
4189 case RTL_GIGA_MAC_VER_43
:
4190 case RTL_GIGA_MAC_VER_44
:
4191 case RTL_GIGA_MAC_VER_45
:
4192 case RTL_GIGA_MAC_VER_46
:
4193 case RTL_GIGA_MAC_VER_47
:
4194 case RTL_GIGA_MAC_VER_48
:
4195 case RTL_GIGA_MAC_VER_50
:
4196 case RTL_GIGA_MAC_VER_51
:
4197 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) & ~0x80);
4199 case RTL_GIGA_MAC_VER_40
:
4200 case RTL_GIGA_MAC_VER_41
:
4201 case RTL_GIGA_MAC_VER_49
:
4202 rtl_eri_clear_bits(tp
, 0x1a8, ERIAR_MASK_1111
, 0xfc000000);
4203 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) & ~0x80);
4208 static void r8168_pll_power_up(struct rtl8169_private
*tp
)
4210 switch (tp
->mac_version
) {
4211 case RTL_GIGA_MAC_VER_25
... RTL_GIGA_MAC_VER_33
:
4212 case RTL_GIGA_MAC_VER_37
:
4213 case RTL_GIGA_MAC_VER_39
:
4214 case RTL_GIGA_MAC_VER_43
:
4215 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) | 0x80);
4217 case RTL_GIGA_MAC_VER_44
:
4218 case RTL_GIGA_MAC_VER_45
:
4219 case RTL_GIGA_MAC_VER_46
:
4220 case RTL_GIGA_MAC_VER_47
:
4221 case RTL_GIGA_MAC_VER_48
:
4222 case RTL_GIGA_MAC_VER_50
:
4223 case RTL_GIGA_MAC_VER_51
:
4224 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) | 0xc0);
4226 case RTL_GIGA_MAC_VER_40
:
4227 case RTL_GIGA_MAC_VER_41
:
4228 case RTL_GIGA_MAC_VER_49
:
4229 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) | 0xc0);
4230 rtl_eri_set_bits(tp
, 0x1a8, ERIAR_MASK_1111
, 0xfc000000);
4234 phy_resume(tp
->phydev
);
4235 /* give MAC/PHY some time to resume */
4239 static void rtl_pll_power_down(struct rtl8169_private
*tp
)
4241 switch (tp
->mac_version
) {
4242 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_06
:
4243 case RTL_GIGA_MAC_VER_13
... RTL_GIGA_MAC_VER_15
:
4246 r8168_pll_power_down(tp
);
4250 static void rtl_pll_power_up(struct rtl8169_private
*tp
)
4252 switch (tp
->mac_version
) {
4253 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_06
:
4254 case RTL_GIGA_MAC_VER_13
... RTL_GIGA_MAC_VER_15
:
4257 r8168_pll_power_up(tp
);
4261 static void rtl_init_rxcfg(struct rtl8169_private
*tp
)
4263 switch (tp
->mac_version
) {
4264 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_06
:
4265 case RTL_GIGA_MAC_VER_10
... RTL_GIGA_MAC_VER_17
:
4266 RTL_W32(tp
, RxConfig
, RX_FIFO_THRESH
| RX_DMA_BURST
);
4268 case RTL_GIGA_MAC_VER_18
... RTL_GIGA_MAC_VER_24
:
4269 case RTL_GIGA_MAC_VER_34
... RTL_GIGA_MAC_VER_36
:
4270 case RTL_GIGA_MAC_VER_38
:
4271 RTL_W32(tp
, RxConfig
, RX128_INT_EN
| RX_MULTI_EN
| RX_DMA_BURST
);
4273 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
4274 RTL_W32(tp
, RxConfig
, RX128_INT_EN
| RX_MULTI_EN
| RX_DMA_BURST
| RX_EARLY_OFF
);
4277 RTL_W32(tp
, RxConfig
, RX128_INT_EN
| RX_DMA_BURST
);
4282 static void rtl8169_init_ring_indexes(struct rtl8169_private
*tp
)
4284 tp
->dirty_tx
= tp
->cur_tx
= tp
->cur_rx
= 0;
4287 static void rtl_hw_jumbo_enable(struct rtl8169_private
*tp
)
4289 if (tp
->jumbo_ops
.enable
) {
4290 rtl_unlock_config_regs(tp
);
4291 tp
->jumbo_ops
.enable(tp
);
4292 rtl_lock_config_regs(tp
);
4296 static void rtl_hw_jumbo_disable(struct rtl8169_private
*tp
)
4298 if (tp
->jumbo_ops
.disable
) {
4299 rtl_unlock_config_regs(tp
);
4300 tp
->jumbo_ops
.disable(tp
);
4301 rtl_lock_config_regs(tp
);
4305 static void r8168c_hw_jumbo_enable(struct rtl8169_private
*tp
)
4307 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) | Jumbo_En0
);
4308 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) | Jumbo_En1
);
4309 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_512B
);
4312 static void r8168c_hw_jumbo_disable(struct rtl8169_private
*tp
)
4314 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Jumbo_En0
);
4315 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~Jumbo_En1
);
4316 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4319 static void r8168dp_hw_jumbo_enable(struct rtl8169_private
*tp
)
4321 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) | Jumbo_En0
);
4324 static void r8168dp_hw_jumbo_disable(struct rtl8169_private
*tp
)
4326 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Jumbo_En0
);
4329 static void r8168e_hw_jumbo_enable(struct rtl8169_private
*tp
)
4331 RTL_W8(tp
, MaxTxPacketSize
, 0x3f);
4332 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) | Jumbo_En0
);
4333 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) | 0x01);
4334 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_512B
);
4337 static void r8168e_hw_jumbo_disable(struct rtl8169_private
*tp
)
4339 RTL_W8(tp
, MaxTxPacketSize
, 0x0c);
4340 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Jumbo_En0
);
4341 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~0x01);
4342 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4345 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private
*tp
)
4347 rtl_tx_performance_tweak(tp
,
4348 PCI_EXP_DEVCTL_READRQ_512B
| PCI_EXP_DEVCTL_NOSNOOP_EN
);
4351 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private
*tp
)
4353 rtl_tx_performance_tweak(tp
,
4354 PCI_EXP_DEVCTL_READRQ_4096B
| PCI_EXP_DEVCTL_NOSNOOP_EN
);
4357 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private
*tp
)
4359 r8168b_0_hw_jumbo_enable(tp
);
4361 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) | (1 << 0));
4364 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private
*tp
)
4366 r8168b_0_hw_jumbo_disable(tp
);
4368 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~(1 << 0));
4371 static void rtl_init_jumbo_ops(struct rtl8169_private
*tp
)
4373 struct jumbo_ops
*ops
= &tp
->jumbo_ops
;
4375 switch (tp
->mac_version
) {
4376 case RTL_GIGA_MAC_VER_11
:
4377 ops
->disable
= r8168b_0_hw_jumbo_disable
;
4378 ops
->enable
= r8168b_0_hw_jumbo_enable
;
4380 case RTL_GIGA_MAC_VER_12
:
4381 case RTL_GIGA_MAC_VER_17
:
4382 ops
->disable
= r8168b_1_hw_jumbo_disable
;
4383 ops
->enable
= r8168b_1_hw_jumbo_enable
;
4385 case RTL_GIGA_MAC_VER_18
: /* Wild guess. Needs info from Realtek. */
4386 case RTL_GIGA_MAC_VER_19
:
4387 case RTL_GIGA_MAC_VER_20
:
4388 case RTL_GIGA_MAC_VER_21
: /* Wild guess. Needs info from Realtek. */
4389 case RTL_GIGA_MAC_VER_22
:
4390 case RTL_GIGA_MAC_VER_23
:
4391 case RTL_GIGA_MAC_VER_24
:
4392 case RTL_GIGA_MAC_VER_25
:
4393 case RTL_GIGA_MAC_VER_26
:
4394 ops
->disable
= r8168c_hw_jumbo_disable
;
4395 ops
->enable
= r8168c_hw_jumbo_enable
;
4397 case RTL_GIGA_MAC_VER_27
:
4398 case RTL_GIGA_MAC_VER_28
:
4399 ops
->disable
= r8168dp_hw_jumbo_disable
;
4400 ops
->enable
= r8168dp_hw_jumbo_enable
;
4402 case RTL_GIGA_MAC_VER_31
: /* Wild guess. Needs info from Realtek. */
4403 case RTL_GIGA_MAC_VER_32
:
4404 case RTL_GIGA_MAC_VER_33
:
4405 case RTL_GIGA_MAC_VER_34
:
4406 ops
->disable
= r8168e_hw_jumbo_disable
;
4407 ops
->enable
= r8168e_hw_jumbo_enable
;
4411 * No action needed for jumbo frames with 8169.
4412 * No jumbo for 810x at all.
4414 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
4416 ops
->disable
= NULL
;
4422 DECLARE_RTL_COND(rtl_chipcmd_cond
)
4424 return RTL_R8(tp
, ChipCmd
) & CmdReset
;
4427 static void rtl_hw_reset(struct rtl8169_private
*tp
)
4429 RTL_W8(tp
, ChipCmd
, CmdReset
);
4431 rtl_udelay_loop_wait_low(tp
, &rtl_chipcmd_cond
, 100, 100);
4434 static void rtl_request_firmware(struct rtl8169_private
*tp
)
4436 struct rtl_fw
*rtl_fw
;
4439 /* firmware loaded already or no firmware available */
4440 if (tp
->rtl_fw
|| !tp
->fw_name
)
4443 rtl_fw
= kzalloc(sizeof(*rtl_fw
), GFP_KERNEL
);
4447 rc
= request_firmware(&rtl_fw
->fw
, tp
->fw_name
, tp_to_dev(tp
));
4451 rc
= rtl_check_firmware(tp
, rtl_fw
);
4453 goto err_release_firmware
;
4455 tp
->rtl_fw
= rtl_fw
;
4459 err_release_firmware
:
4460 release_firmware(rtl_fw
->fw
);
4464 netif_warn(tp
, ifup
, tp
->dev
, "unable to load firmware patch %s (%d)\n",
4468 static void rtl_rx_close(struct rtl8169_private
*tp
)
4470 RTL_W32(tp
, RxConfig
, RTL_R32(tp
, RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
);
4473 DECLARE_RTL_COND(rtl_npq_cond
)
4475 return RTL_R8(tp
, TxPoll
) & NPQ
;
4478 DECLARE_RTL_COND(rtl_txcfg_empty_cond
)
4480 return RTL_R32(tp
, TxConfig
) & TXCFG_EMPTY
;
4483 static void rtl8169_hw_reset(struct rtl8169_private
*tp
)
4485 /* Disable interrupts */
4486 rtl8169_irq_mask_and_ack(tp
);
4490 switch (tp
->mac_version
) {
4491 case RTL_GIGA_MAC_VER_27
:
4492 case RTL_GIGA_MAC_VER_28
:
4493 case RTL_GIGA_MAC_VER_31
:
4494 rtl_udelay_loop_wait_low(tp
, &rtl_npq_cond
, 20, 42*42);
4496 case RTL_GIGA_MAC_VER_34
... RTL_GIGA_MAC_VER_38
:
4497 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
4498 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) | StopReq
);
4499 rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 666);
4502 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) | StopReq
);
4510 static void rtl_set_tx_config_registers(struct rtl8169_private
*tp
)
4512 u32 val
= TX_DMA_BURST
<< TxDMAShift
|
4513 InterFrameGap
<< TxInterFrameGapShift
;
4515 if (tp
->mac_version
>= RTL_GIGA_MAC_VER_34
&&
4516 tp
->mac_version
!= RTL_GIGA_MAC_VER_39
)
4517 val
|= TXCFG_AUTO_FIFO
;
4519 RTL_W32(tp
, TxConfig
, val
);
4522 static void rtl_set_rx_max_size(struct rtl8169_private
*tp
)
4524 /* Low hurts. Let's disable the filtering. */
4525 RTL_W16(tp
, RxMaxSize
, R8169_RX_BUF_SIZE
+ 1);
4528 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private
*tp
)
4531 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4532 * register to be written before TxDescAddrLow to work.
4533 * Switching from MMIO to I/O access fixes the issue as well.
4535 RTL_W32(tp
, TxDescStartAddrHigh
, ((u64
) tp
->TxPhyAddr
) >> 32);
4536 RTL_W32(tp
, TxDescStartAddrLow
, ((u64
) tp
->TxPhyAddr
) & DMA_BIT_MASK(32));
4537 RTL_W32(tp
, RxDescAddrHigh
, ((u64
) tp
->RxPhyAddr
) >> 32);
4538 RTL_W32(tp
, RxDescAddrLow
, ((u64
) tp
->RxPhyAddr
) & DMA_BIT_MASK(32));
4541 static void rtl8169_set_magic_reg(struct rtl8169_private
*tp
, unsigned mac_version
)
4545 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
4547 else if (tp
->mac_version
== RTL_GIGA_MAC_VER_06
)
4552 if (RTL_R8(tp
, Config2
) & PCI_Clock_66MHz
)
4555 RTL_W32(tp
, 0x7c, val
);
4558 static void rtl_set_rx_mode(struct net_device
*dev
)
4560 struct rtl8169_private
*tp
= netdev_priv(dev
);
4561 u32 mc_filter
[2]; /* Multicast hash filter */
4565 if (dev
->flags
& IFF_PROMISC
) {
4566 /* Unconditionally log net taps. */
4567 netif_notice(tp
, link
, dev
, "Promiscuous mode enabled\n");
4569 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
|
4571 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4572 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
4573 (dev
->flags
& IFF_ALLMULTI
)) {
4574 /* Too many to filter perfectly -- accept all multicasts. */
4575 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
4576 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4578 struct netdev_hw_addr
*ha
;
4580 rx_mode
= AcceptBroadcast
| AcceptMyPhys
;
4581 mc_filter
[1] = mc_filter
[0] = 0;
4582 netdev_for_each_mc_addr(ha
, dev
) {
4583 int bit_nr
= ether_crc(ETH_ALEN
, ha
->addr
) >> 26;
4584 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
4585 rx_mode
|= AcceptMulticast
;
4589 if (dev
->features
& NETIF_F_RXALL
)
4590 rx_mode
|= (AcceptErr
| AcceptRunt
);
4592 tmp
= (RTL_R32(tp
, RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
) | rx_mode
;
4594 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
) {
4595 u32 data
= mc_filter
[0];
4597 mc_filter
[0] = swab32(mc_filter
[1]);
4598 mc_filter
[1] = swab32(data
);
4601 if (tp
->mac_version
== RTL_GIGA_MAC_VER_35
)
4602 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4604 RTL_W32(tp
, MAR0
+ 4, mc_filter
[1]);
4605 RTL_W32(tp
, MAR0
+ 0, mc_filter
[0]);
4607 RTL_W32(tp
, RxConfig
, tmp
);
4610 static void rtl_hw_start(struct rtl8169_private
*tp
)
4612 rtl_unlock_config_regs(tp
);
4616 rtl_set_rx_max_size(tp
);
4617 rtl_set_rx_tx_desc_registers(tp
);
4618 rtl_lock_config_regs(tp
);
4620 /* disable interrupt coalescing */
4621 RTL_W16(tp
, IntrMitigate
, 0x0000);
4622 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4623 RTL_R8(tp
, IntrMask
);
4624 RTL_W8(tp
, ChipCmd
, CmdTxEnb
| CmdRxEnb
);
4626 rtl_set_tx_config_registers(tp
);
4628 rtl_set_rx_mode(tp
->dev
);
4629 /* no early-rx interrupts */
4630 RTL_W16(tp
, MultiIntr
, RTL_R16(tp
, MultiIntr
) & 0xf000);
4634 static void rtl_hw_start_8169(struct rtl8169_private
*tp
)
4636 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
4637 pci_write_config_byte(tp
->pci_dev
, PCI_CACHE_LINE_SIZE
, 0x08);
4639 RTL_W8(tp
, EarlyTxThres
, NoEarlyTx
);
4641 tp
->cp_cmd
|= PCIMulRW
;
4643 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4644 tp
->mac_version
== RTL_GIGA_MAC_VER_03
) {
4645 netif_dbg(tp
, drv
, tp
->dev
,
4646 "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
4647 tp
->cp_cmd
|= (1 << 14);
4650 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
4652 rtl8169_set_magic_reg(tp
, tp
->mac_version
);
4654 RTL_W32(tp
, RxMissed
, 0);
4657 DECLARE_RTL_COND(rtl_csiar_cond
)
4659 return RTL_R32(tp
, CSIAR
) & CSIAR_FLAG
;
4662 static void rtl_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4664 u32 func
= PCI_FUNC(tp
->pci_dev
->devfn
);
4666 RTL_W32(tp
, CSIDR
, value
);
4667 RTL_W32(tp
, CSIAR
, CSIAR_WRITE_CMD
| (addr
& CSIAR_ADDR_MASK
) |
4668 CSIAR_BYTE_ENABLE
| func
<< 16);
4670 rtl_udelay_loop_wait_low(tp
, &rtl_csiar_cond
, 10, 100);
4673 static u32
rtl_csi_read(struct rtl8169_private
*tp
, int addr
)
4675 u32 func
= PCI_FUNC(tp
->pci_dev
->devfn
);
4677 RTL_W32(tp
, CSIAR
, (addr
& CSIAR_ADDR_MASK
) | func
<< 16 |
4680 return rtl_udelay_loop_wait_high(tp
, &rtl_csiar_cond
, 10, 100) ?
4681 RTL_R32(tp
, CSIDR
) : ~0;
4684 static void rtl_csi_access_enable(struct rtl8169_private
*tp
, u8 val
)
4686 struct pci_dev
*pdev
= tp
->pci_dev
;
4689 /* According to Realtek the value at config space address 0x070f
4690 * controls the L0s/L1 entrance latency. We try standard ECAM access
4691 * first and if it fails fall back to CSI.
4693 if (pdev
->cfg_size
> 0x070f &&
4694 pci_write_config_byte(pdev
, 0x070f, val
) == PCIBIOS_SUCCESSFUL
)
4697 netdev_notice_once(tp
->dev
,
4698 "No native access to PCI extended config space, falling back to CSI\n");
4699 csi
= rtl_csi_read(tp
, 0x070c) & 0x00ffffff;
4700 rtl_csi_write(tp
, 0x070c, csi
| val
<< 24);
4703 static void rtl_set_def_aspm_entry_latency(struct rtl8169_private
*tp
)
4705 rtl_csi_access_enable(tp
, 0x27);
4709 unsigned int offset
;
4714 static void __rtl_ephy_init(struct rtl8169_private
*tp
,
4715 const struct ephy_info
*e
, int len
)
4720 w
= (rtl_ephy_read(tp
, e
->offset
) & ~e
->mask
) | e
->bits
;
4721 rtl_ephy_write(tp
, e
->offset
, w
);
4726 #define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
4728 static void rtl_disable_clock_request(struct rtl8169_private
*tp
)
4730 pcie_capability_clear_word(tp
->pci_dev
, PCI_EXP_LNKCTL
,
4731 PCI_EXP_LNKCTL_CLKREQ_EN
);
4734 static void rtl_enable_clock_request(struct rtl8169_private
*tp
)
4736 pcie_capability_set_word(tp
->pci_dev
, PCI_EXP_LNKCTL
,
4737 PCI_EXP_LNKCTL_CLKREQ_EN
);
4740 static void rtl_pcie_state_l2l3_disable(struct rtl8169_private
*tp
)
4742 /* work around an issue when PCI reset occurs during L2/L3 state */
4743 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Rdy_to_L23
);
4746 static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private
*tp
, bool enable
)
4749 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) | ASPM_en
);
4750 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) | ClkReqEn
);
4752 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) & ~ClkReqEn
);
4753 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~ASPM_en
);
4759 static void rtl_set_fifo_size(struct rtl8169_private
*tp
, u16 rx_stat
,
4760 u16 tx_stat
, u16 rx_dyn
, u16 tx_dyn
)
4762 /* Usage of dynamic vs. static FIFO is controlled by bit
4763 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
4765 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, (rx_stat
<< 16) | rx_dyn
);
4766 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, (tx_stat
<< 16) | tx_dyn
);
4769 static void rtl8168g_set_pause_thresholds(struct rtl8169_private
*tp
,
4772 /* FIFO thresholds for pause flow control */
4773 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_0001
, low
);
4774 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_0001
, high
);
4777 static void rtl_hw_start_8168bb(struct rtl8169_private
*tp
)
4779 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
4781 tp
->cp_cmd
&= CPCMD_QUIRK_MASK
;
4782 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
4784 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
4785 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
|
4786 PCI_EXP_DEVCTL_NOSNOOP_EN
);
4790 static void rtl_hw_start_8168bef(struct rtl8169_private
*tp
)
4792 rtl_hw_start_8168bb(tp
);
4794 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
4796 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~(1 << 0));
4799 static void __rtl_hw_start_8168cp(struct rtl8169_private
*tp
)
4801 RTL_W8(tp
, Config1
, RTL_R8(tp
, Config1
) | Speed_down
);
4803 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
4805 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4806 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4808 rtl_disable_clock_request(tp
);
4810 tp
->cp_cmd
&= CPCMD_QUIRK_MASK
;
4811 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
4814 static void rtl_hw_start_8168cp_1(struct rtl8169_private
*tp
)
4816 static const struct ephy_info e_info_8168cp
[] = {
4817 { 0x01, 0, 0x0001 },
4818 { 0x02, 0x0800, 0x1000 },
4819 { 0x03, 0, 0x0042 },
4820 { 0x06, 0x0080, 0x0000 },
4824 rtl_set_def_aspm_entry_latency(tp
);
4826 rtl_ephy_init(tp
, e_info_8168cp
);
4828 __rtl_hw_start_8168cp(tp
);
4831 static void rtl_hw_start_8168cp_2(struct rtl8169_private
*tp
)
4833 rtl_set_def_aspm_entry_latency(tp
);
4835 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
4837 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4838 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4840 tp
->cp_cmd
&= CPCMD_QUIRK_MASK
;
4841 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
4844 static void rtl_hw_start_8168cp_3(struct rtl8169_private
*tp
)
4846 rtl_set_def_aspm_entry_latency(tp
);
4848 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
4851 RTL_W8(tp
, DBG_REG
, 0x20);
4853 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
4855 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4856 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4858 tp
->cp_cmd
&= CPCMD_QUIRK_MASK
;
4859 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
4862 static void rtl_hw_start_8168c_1(struct rtl8169_private
*tp
)
4864 static const struct ephy_info e_info_8168c_1
[] = {
4865 { 0x02, 0x0800, 0x1000 },
4866 { 0x03, 0, 0x0002 },
4867 { 0x06, 0x0080, 0x0000 }
4870 rtl_set_def_aspm_entry_latency(tp
);
4872 RTL_W8(tp
, DBG_REG
, 0x06 | FIX_NAK_1
| FIX_NAK_2
);
4874 rtl_ephy_init(tp
, e_info_8168c_1
);
4876 __rtl_hw_start_8168cp(tp
);
4879 static void rtl_hw_start_8168c_2(struct rtl8169_private
*tp
)
4881 static const struct ephy_info e_info_8168c_2
[] = {
4882 { 0x01, 0, 0x0001 },
4883 { 0x03, 0x0400, 0x0220 }
4886 rtl_set_def_aspm_entry_latency(tp
);
4888 rtl_ephy_init(tp
, e_info_8168c_2
);
4890 __rtl_hw_start_8168cp(tp
);
4893 static void rtl_hw_start_8168c_3(struct rtl8169_private
*tp
)
4895 rtl_hw_start_8168c_2(tp
);
4898 static void rtl_hw_start_8168c_4(struct rtl8169_private
*tp
)
4900 rtl_set_def_aspm_entry_latency(tp
);
4902 __rtl_hw_start_8168cp(tp
);
4905 static void rtl_hw_start_8168d(struct rtl8169_private
*tp
)
4907 rtl_set_def_aspm_entry_latency(tp
);
4909 rtl_disable_clock_request(tp
);
4911 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
4913 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4914 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4916 tp
->cp_cmd
&= CPCMD_QUIRK_MASK
;
4917 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
4920 static void rtl_hw_start_8168dp(struct rtl8169_private
*tp
)
4922 rtl_set_def_aspm_entry_latency(tp
);
4924 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4925 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4927 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
4929 rtl_disable_clock_request(tp
);
4932 static void rtl_hw_start_8168d_4(struct rtl8169_private
*tp
)
4934 static const struct ephy_info e_info_8168d_4
[] = {
4935 { 0x0b, 0x0000, 0x0048 },
4936 { 0x19, 0x0020, 0x0050 },
4937 { 0x0c, 0x0100, 0x0020 }
4940 rtl_set_def_aspm_entry_latency(tp
);
4942 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4944 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
4946 rtl_ephy_init(tp
, e_info_8168d_4
);
4948 rtl_enable_clock_request(tp
);
4951 static void rtl_hw_start_8168e_1(struct rtl8169_private
*tp
)
4953 static const struct ephy_info e_info_8168e_1
[] = {
4954 { 0x00, 0x0200, 0x0100 },
4955 { 0x00, 0x0000, 0x0004 },
4956 { 0x06, 0x0002, 0x0001 },
4957 { 0x06, 0x0000, 0x0030 },
4958 { 0x07, 0x0000, 0x2000 },
4959 { 0x00, 0x0000, 0x0020 },
4960 { 0x03, 0x5800, 0x2000 },
4961 { 0x03, 0x0000, 0x0001 },
4962 { 0x01, 0x0800, 0x1000 },
4963 { 0x07, 0x0000, 0x4000 },
4964 { 0x1e, 0x0000, 0x2000 },
4965 { 0x19, 0xffff, 0xfe6c },
4966 { 0x0a, 0x0000, 0x0040 }
4969 rtl_set_def_aspm_entry_latency(tp
);
4971 rtl_ephy_init(tp
, e_info_8168e_1
);
4973 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4974 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
4976 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
4978 rtl_disable_clock_request(tp
);
4980 /* Reset tx FIFO pointer */
4981 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | TXPLA_RST
);
4982 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~TXPLA_RST
);
4984 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~Spi_en
);
4987 static void rtl_hw_start_8168e_2(struct rtl8169_private
*tp
)
4989 static const struct ephy_info e_info_8168e_2
[] = {
4990 { 0x09, 0x0000, 0x0080 },
4991 { 0x19, 0x0000, 0x0224 }
4994 rtl_set_def_aspm_entry_latency(tp
);
4996 rtl_ephy_init(tp
, e_info_8168e_2
);
4998 if (tp
->dev
->mtu
<= ETH_DATA_LEN
)
4999 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5001 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
5002 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
5003 rtl_set_fifo_size(tp
, 0x10, 0x10, 0x02, 0x06);
5004 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050);
5005 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x07ff0060);
5006 rtl_eri_set_bits(tp
, 0x1b0, ERIAR_MASK_0001
, BIT(4));
5007 rtl_w0w1_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00);
5009 RTL_W8(tp
, MaxTxPacketSize
, EarlySize
);
5011 rtl_disable_clock_request(tp
);
5013 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
5015 rtl8168_config_eee_mac(tp
);
5017 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) | PFM_EN
);
5018 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | PWM_EN
);
5019 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~Spi_en
);
5021 rtl_hw_aspm_clkreq_enable(tp
, true);
5024 static void rtl_hw_start_8168f(struct rtl8169_private
*tp
)
5026 rtl_set_def_aspm_entry_latency(tp
);
5028 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5030 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
5031 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
5032 rtl_set_fifo_size(tp
, 0x10, 0x10, 0x02, 0x06);
5033 rtl_reset_packet_filter(tp
);
5034 rtl_eri_set_bits(tp
, 0x1b0, ERIAR_MASK_0001
, BIT(4));
5035 rtl_eri_set_bits(tp
, 0x1d0, ERIAR_MASK_0001
, BIT(4));
5036 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050);
5037 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x00000060);
5039 RTL_W8(tp
, MaxTxPacketSize
, EarlySize
);
5041 rtl_disable_clock_request(tp
);
5043 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
5044 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) | PFM_EN
);
5045 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | PWM_EN
);
5046 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~Spi_en
);
5048 rtl8168_config_eee_mac(tp
);
5051 static void rtl_hw_start_8168f_1(struct rtl8169_private
*tp
)
5053 static const struct ephy_info e_info_8168f_1
[] = {
5054 { 0x06, 0x00c0, 0x0020 },
5055 { 0x08, 0x0001, 0x0002 },
5056 { 0x09, 0x0000, 0x0080 },
5057 { 0x19, 0x0000, 0x0224 }
5060 rtl_hw_start_8168f(tp
);
5062 rtl_ephy_init(tp
, e_info_8168f_1
);
5064 rtl_w0w1_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00);
5067 static void rtl_hw_start_8411(struct rtl8169_private
*tp
)
5069 static const struct ephy_info e_info_8168f_1
[] = {
5070 { 0x06, 0x00c0, 0x0020 },
5071 { 0x0f, 0xffff, 0x5200 },
5072 { 0x1e, 0x0000, 0x4000 },
5073 { 0x19, 0x0000, 0x0224 }
5076 rtl_hw_start_8168f(tp
);
5077 rtl_pcie_state_l2l3_disable(tp
);
5079 rtl_ephy_init(tp
, e_info_8168f_1
);
5081 rtl_eri_set_bits(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00);
5084 static void rtl_hw_start_8168g(struct rtl8169_private
*tp
)
5086 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
5087 rtl8168g_set_pause_thresholds(tp
, 0x38, 0x48);
5089 rtl_set_def_aspm_entry_latency(tp
);
5091 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5093 rtl_reset_packet_filter(tp
);
5094 rtl_eri_write(tp
, 0x2f8, ERIAR_MASK_0011
, 0x1d8f);
5096 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
5097 RTL_W8(tp
, MaxTxPacketSize
, EarlySize
);
5099 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
5100 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
5102 rtl8168_config_eee_mac(tp
);
5104 rtl_w0w1_eri(tp
, 0x2fc, ERIAR_MASK_0001
, 0x01, 0x06);
5105 rtl_eri_clear_bits(tp
, 0x1b0, ERIAR_MASK_0011
, BIT(12));
5107 rtl_pcie_state_l2l3_disable(tp
);
5110 static void rtl_hw_start_8168g_1(struct rtl8169_private
*tp
)
5112 static const struct ephy_info e_info_8168g_1
[] = {
5113 { 0x00, 0x0000, 0x0008 },
5114 { 0x0c, 0x37d0, 0x0820 },
5115 { 0x1e, 0x0000, 0x0001 },
5116 { 0x19, 0x8000, 0x0000 }
5119 rtl_hw_start_8168g(tp
);
5121 /* disable aspm and clock request before access ephy */
5122 rtl_hw_aspm_clkreq_enable(tp
, false);
5123 rtl_ephy_init(tp
, e_info_8168g_1
);
5124 rtl_hw_aspm_clkreq_enable(tp
, true);
5127 static void rtl_hw_start_8168g_2(struct rtl8169_private
*tp
)
5129 static const struct ephy_info e_info_8168g_2
[] = {
5130 { 0x00, 0x0000, 0x0008 },
5131 { 0x0c, 0x3df0, 0x0200 },
5132 { 0x19, 0xffff, 0xfc00 },
5133 { 0x1e, 0xffff, 0x20eb }
5136 rtl_hw_start_8168g(tp
);
5138 /* disable aspm and clock request before access ephy */
5139 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) & ~ClkReqEn
);
5140 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~ASPM_en
);
5141 rtl_ephy_init(tp
, e_info_8168g_2
);
5144 static void rtl_hw_start_8411_2(struct rtl8169_private
*tp
)
5146 static const struct ephy_info e_info_8411_2
[] = {
5147 { 0x00, 0x0000, 0x0008 },
5148 { 0x0c, 0x3df0, 0x0200 },
5149 { 0x0f, 0xffff, 0x5200 },
5150 { 0x19, 0x0020, 0x0000 },
5151 { 0x1e, 0x0000, 0x2000 }
5154 rtl_hw_start_8168g(tp
);
5156 /* disable aspm and clock request before access ephy */
5157 rtl_hw_aspm_clkreq_enable(tp
, false);
5158 rtl_ephy_init(tp
, e_info_8411_2
);
5159 rtl_hw_aspm_clkreq_enable(tp
, true);
5162 static void rtl_hw_start_8168h_1(struct rtl8169_private
*tp
)
5166 static const struct ephy_info e_info_8168h_1
[] = {
5167 { 0x1e, 0x0800, 0x0001 },
5168 { 0x1d, 0x0000, 0x0800 },
5169 { 0x05, 0xffff, 0x2089 },
5170 { 0x06, 0xffff, 0x5881 },
5171 { 0x04, 0xffff, 0x154a },
5172 { 0x01, 0xffff, 0x068b }
5175 /* disable aspm and clock request before access ephy */
5176 rtl_hw_aspm_clkreq_enable(tp
, false);
5177 rtl_ephy_init(tp
, e_info_8168h_1
);
5179 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
5180 rtl8168g_set_pause_thresholds(tp
, 0x38, 0x48);
5182 rtl_set_def_aspm_entry_latency(tp
);
5184 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5186 rtl_reset_packet_filter(tp
);
5188 rtl_eri_set_bits(tp
, 0xdc, ERIAR_MASK_1111
, BIT(4));
5190 rtl_eri_set_bits(tp
, 0xd4, ERIAR_MASK_1111
, 0x1f00);
5192 rtl_eri_write(tp
, 0x5f0, ERIAR_MASK_0011
, 0x4f87);
5194 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
5195 RTL_W8(tp
, MaxTxPacketSize
, EarlySize
);
5197 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
5198 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
5200 rtl8168_config_eee_mac(tp
);
5202 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
5203 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
5205 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~TX_10M_PS_EN
);
5207 rtl_eri_clear_bits(tp
, 0x1b0, ERIAR_MASK_0011
, BIT(12));
5209 rtl_pcie_state_l2l3_disable(tp
);
5211 rtl_writephy(tp
, 0x1f, 0x0c42);
5212 rg_saw_cnt
= (rtl_readphy(tp
, 0x13) & 0x3fff);
5213 rtl_writephy(tp
, 0x1f, 0x0000);
5214 if (rg_saw_cnt
> 0) {
5217 sw_cnt_1ms_ini
= 16000000/rg_saw_cnt
;
5218 sw_cnt_1ms_ini
&= 0x0fff;
5219 data
= r8168_mac_ocp_read(tp
, 0xd412);
5221 data
|= sw_cnt_1ms_ini
;
5222 r8168_mac_ocp_write(tp
, 0xd412, data
);
5225 data
= r8168_mac_ocp_read(tp
, 0xe056);
5228 r8168_mac_ocp_write(tp
, 0xe056, data
);
5230 data
= r8168_mac_ocp_read(tp
, 0xe052);
5233 r8168_mac_ocp_write(tp
, 0xe052, data
);
5235 data
= r8168_mac_ocp_read(tp
, 0xe0d6);
5238 r8168_mac_ocp_write(tp
, 0xe0d6, data
);
5240 data
= r8168_mac_ocp_read(tp
, 0xd420);
5243 r8168_mac_ocp_write(tp
, 0xd420, data
);
5245 r8168_mac_ocp_write(tp
, 0xe63e, 0x0001);
5246 r8168_mac_ocp_write(tp
, 0xe63e, 0x0000);
5247 r8168_mac_ocp_write(tp
, 0xc094, 0x0000);
5248 r8168_mac_ocp_write(tp
, 0xc09e, 0x0000);
5250 rtl_hw_aspm_clkreq_enable(tp
, true);
5253 static void rtl_hw_start_8168ep(struct rtl8169_private
*tp
)
5255 rtl8168ep_stop_cmac(tp
);
5257 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
5258 rtl8168g_set_pause_thresholds(tp
, 0x2f, 0x5f);
5260 rtl_set_def_aspm_entry_latency(tp
);
5262 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5264 rtl_reset_packet_filter(tp
);
5266 rtl_eri_set_bits(tp
, 0xd4, ERIAR_MASK_1111
, 0x1f80);
5268 rtl_eri_write(tp
, 0x5f0, ERIAR_MASK_0011
, 0x4f87);
5270 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
5271 RTL_W8(tp
, MaxTxPacketSize
, EarlySize
);
5273 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
5274 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
5276 rtl8168_config_eee_mac(tp
);
5278 rtl_w0w1_eri(tp
, 0x2fc, ERIAR_MASK_0001
, 0x01, 0x06);
5280 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~TX_10M_PS_EN
);
5282 rtl_pcie_state_l2l3_disable(tp
);
5285 static void rtl_hw_start_8168ep_1(struct rtl8169_private
*tp
)
5287 static const struct ephy_info e_info_8168ep_1
[] = {
5288 { 0x00, 0xffff, 0x10ab },
5289 { 0x06, 0xffff, 0xf030 },
5290 { 0x08, 0xffff, 0x2006 },
5291 { 0x0d, 0xffff, 0x1666 },
5292 { 0x0c, 0x3ff0, 0x0000 }
5295 /* disable aspm and clock request before access ephy */
5296 rtl_hw_aspm_clkreq_enable(tp
, false);
5297 rtl_ephy_init(tp
, e_info_8168ep_1
);
5299 rtl_hw_start_8168ep(tp
);
5301 rtl_hw_aspm_clkreq_enable(tp
, true);
5304 static void rtl_hw_start_8168ep_2(struct rtl8169_private
*tp
)
5306 static const struct ephy_info e_info_8168ep_2
[] = {
5307 { 0x00, 0xffff, 0x10a3 },
5308 { 0x19, 0xffff, 0xfc00 },
5309 { 0x1e, 0xffff, 0x20ea }
5312 /* disable aspm and clock request before access ephy */
5313 rtl_hw_aspm_clkreq_enable(tp
, false);
5314 rtl_ephy_init(tp
, e_info_8168ep_2
);
5316 rtl_hw_start_8168ep(tp
);
5318 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
5319 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
5321 rtl_hw_aspm_clkreq_enable(tp
, true);
5324 static void rtl_hw_start_8168ep_3(struct rtl8169_private
*tp
)
5327 static const struct ephy_info e_info_8168ep_3
[] = {
5328 { 0x00, 0xffff, 0x10a3 },
5329 { 0x19, 0xffff, 0x7c00 },
5330 { 0x1e, 0xffff, 0x20eb },
5331 { 0x0d, 0xffff, 0x1666 }
5334 /* disable aspm and clock request before access ephy */
5335 rtl_hw_aspm_clkreq_enable(tp
, false);
5336 rtl_ephy_init(tp
, e_info_8168ep_3
);
5338 rtl_hw_start_8168ep(tp
);
5340 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
5341 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
5343 data
= r8168_mac_ocp_read(tp
, 0xd3e2);
5346 r8168_mac_ocp_write(tp
, 0xd3e2, data
);
5348 data
= r8168_mac_ocp_read(tp
, 0xd3e4);
5350 r8168_mac_ocp_write(tp
, 0xd3e4, data
);
5352 data
= r8168_mac_ocp_read(tp
, 0xe860);
5354 r8168_mac_ocp_write(tp
, 0xe860, data
);
5356 rtl_hw_aspm_clkreq_enable(tp
, true);
5359 static void rtl_hw_start_8102e_1(struct rtl8169_private
*tp
)
5361 static const struct ephy_info e_info_8102e_1
[] = {
5362 { 0x01, 0, 0x6e65 },
5363 { 0x02, 0, 0x091f },
5364 { 0x03, 0, 0xc2f9 },
5365 { 0x06, 0, 0xafb5 },
5366 { 0x07, 0, 0x0e00 },
5367 { 0x19, 0, 0xec80 },
5368 { 0x01, 0, 0x2e65 },
5373 rtl_set_def_aspm_entry_latency(tp
);
5375 RTL_W8(tp
, DBG_REG
, FIX_NAK_1
);
5377 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5380 LEDS1
| LEDS0
| Speed_down
| MEMMAP
| IOMAP
| VPD
| PMEnable
);
5381 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
5383 cfg1
= RTL_R8(tp
, Config1
);
5384 if ((cfg1
& LEDS0
) && (cfg1
& LEDS1
))
5385 RTL_W8(tp
, Config1
, cfg1
& ~LEDS0
);
5387 rtl_ephy_init(tp
, e_info_8102e_1
);
5390 static void rtl_hw_start_8102e_2(struct rtl8169_private
*tp
)
5392 rtl_set_def_aspm_entry_latency(tp
);
5394 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5396 RTL_W8(tp
, Config1
, MEMMAP
| IOMAP
| VPD
| PMEnable
);
5397 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
5400 static void rtl_hw_start_8102e_3(struct rtl8169_private
*tp
)
5402 rtl_hw_start_8102e_2(tp
);
5404 rtl_ephy_write(tp
, 0x03, 0xc2f9);
5407 static void rtl_hw_start_8105e_1(struct rtl8169_private
*tp
)
5409 static const struct ephy_info e_info_8105e_1
[] = {
5410 { 0x07, 0, 0x4000 },
5411 { 0x19, 0, 0x0200 },
5412 { 0x19, 0, 0x0020 },
5413 { 0x1e, 0, 0x2000 },
5414 { 0x03, 0, 0x0001 },
5415 { 0x19, 0, 0x0100 },
5416 { 0x19, 0, 0x0004 },
5420 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5421 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) | 0x002800);
5423 /* Disable Early Tally Counter */
5424 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) & ~0x010000);
5426 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) | EN_NDP
| EN_OOB_RESET
);
5427 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) | PFM_EN
);
5429 rtl_ephy_init(tp
, e_info_8105e_1
);
5431 rtl_pcie_state_l2l3_disable(tp
);
5434 static void rtl_hw_start_8105e_2(struct rtl8169_private
*tp
)
5436 rtl_hw_start_8105e_1(tp
);
5437 rtl_ephy_write(tp
, 0x1e, rtl_ephy_read(tp
, 0x1e) | 0x8000);
5440 static void rtl_hw_start_8402(struct rtl8169_private
*tp
)
5442 static const struct ephy_info e_info_8402
[] = {
5443 { 0x19, 0xffff, 0xff64 },
5447 rtl_set_def_aspm_entry_latency(tp
);
5449 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5450 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) | 0x002800);
5452 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
5454 rtl_ephy_init(tp
, e_info_8402
);
5456 rtl_tx_performance_tweak(tp
, PCI_EXP_DEVCTL_READRQ_4096B
);
5458 rtl_set_fifo_size(tp
, 0x00, 0x00, 0x02, 0x06);
5459 rtl_reset_packet_filter(tp
);
5460 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
5461 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
5462 rtl_w0w1_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0e00, 0xff00);
5464 rtl_pcie_state_l2l3_disable(tp
);
5467 static void rtl_hw_start_8106(struct rtl8169_private
*tp
)
5469 rtl_hw_aspm_clkreq_enable(tp
, false);
5471 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5472 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) | 0x002800);
5474 RTL_W32(tp
, MISC
, (RTL_R32(tp
, MISC
) | DISABLE_LAN_EN
) & ~EARLY_TALLY_EN
);
5475 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) | EN_NDP
| EN_OOB_RESET
);
5476 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
5478 rtl_pcie_state_l2l3_disable(tp
);
5479 rtl_hw_aspm_clkreq_enable(tp
, true);
5482 static void rtl_hw_config(struct rtl8169_private
*tp
)
5484 static const rtl_generic_fct hw_configs
[] = {
5485 [RTL_GIGA_MAC_VER_07
] = rtl_hw_start_8102e_1
,
5486 [RTL_GIGA_MAC_VER_08
] = rtl_hw_start_8102e_3
,
5487 [RTL_GIGA_MAC_VER_09
] = rtl_hw_start_8102e_2
,
5488 [RTL_GIGA_MAC_VER_10
] = NULL
,
5489 [RTL_GIGA_MAC_VER_11
] = rtl_hw_start_8168bb
,
5490 [RTL_GIGA_MAC_VER_12
] = rtl_hw_start_8168bef
,
5491 [RTL_GIGA_MAC_VER_13
] = NULL
,
5492 [RTL_GIGA_MAC_VER_14
] = NULL
,
5493 [RTL_GIGA_MAC_VER_15
] = NULL
,
5494 [RTL_GIGA_MAC_VER_16
] = NULL
,
5495 [RTL_GIGA_MAC_VER_17
] = rtl_hw_start_8168bef
,
5496 [RTL_GIGA_MAC_VER_18
] = rtl_hw_start_8168cp_1
,
5497 [RTL_GIGA_MAC_VER_19
] = rtl_hw_start_8168c_1
,
5498 [RTL_GIGA_MAC_VER_20
] = rtl_hw_start_8168c_2
,
5499 [RTL_GIGA_MAC_VER_21
] = rtl_hw_start_8168c_3
,
5500 [RTL_GIGA_MAC_VER_22
] = rtl_hw_start_8168c_4
,
5501 [RTL_GIGA_MAC_VER_23
] = rtl_hw_start_8168cp_2
,
5502 [RTL_GIGA_MAC_VER_24
] = rtl_hw_start_8168cp_3
,
5503 [RTL_GIGA_MAC_VER_25
] = rtl_hw_start_8168d
,
5504 [RTL_GIGA_MAC_VER_26
] = rtl_hw_start_8168d
,
5505 [RTL_GIGA_MAC_VER_27
] = rtl_hw_start_8168d
,
5506 [RTL_GIGA_MAC_VER_28
] = rtl_hw_start_8168d_4
,
5507 [RTL_GIGA_MAC_VER_29
] = rtl_hw_start_8105e_1
,
5508 [RTL_GIGA_MAC_VER_30
] = rtl_hw_start_8105e_2
,
5509 [RTL_GIGA_MAC_VER_31
] = rtl_hw_start_8168dp
,
5510 [RTL_GIGA_MAC_VER_32
] = rtl_hw_start_8168e_1
,
5511 [RTL_GIGA_MAC_VER_33
] = rtl_hw_start_8168e_1
,
5512 [RTL_GIGA_MAC_VER_34
] = rtl_hw_start_8168e_2
,
5513 [RTL_GIGA_MAC_VER_35
] = rtl_hw_start_8168f_1
,
5514 [RTL_GIGA_MAC_VER_36
] = rtl_hw_start_8168f_1
,
5515 [RTL_GIGA_MAC_VER_37
] = rtl_hw_start_8402
,
5516 [RTL_GIGA_MAC_VER_38
] = rtl_hw_start_8411
,
5517 [RTL_GIGA_MAC_VER_39
] = rtl_hw_start_8106
,
5518 [RTL_GIGA_MAC_VER_40
] = rtl_hw_start_8168g_1
,
5519 [RTL_GIGA_MAC_VER_41
] = rtl_hw_start_8168g_1
,
5520 [RTL_GIGA_MAC_VER_42
] = rtl_hw_start_8168g_2
,
5521 [RTL_GIGA_MAC_VER_43
] = rtl_hw_start_8168g_2
,
5522 [RTL_GIGA_MAC_VER_44
] = rtl_hw_start_8411_2
,
5523 [RTL_GIGA_MAC_VER_45
] = rtl_hw_start_8168h_1
,
5524 [RTL_GIGA_MAC_VER_46
] = rtl_hw_start_8168h_1
,
5525 [RTL_GIGA_MAC_VER_47
] = rtl_hw_start_8168h_1
,
5526 [RTL_GIGA_MAC_VER_48
] = rtl_hw_start_8168h_1
,
5527 [RTL_GIGA_MAC_VER_49
] = rtl_hw_start_8168ep_1
,
5528 [RTL_GIGA_MAC_VER_50
] = rtl_hw_start_8168ep_2
,
5529 [RTL_GIGA_MAC_VER_51
] = rtl_hw_start_8168ep_3
,
5532 if (hw_configs
[tp
->mac_version
])
5533 hw_configs
[tp
->mac_version
](tp
);
5536 static void rtl_hw_start_8168(struct rtl8169_private
*tp
)
5538 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
5540 /* Workaround for RxFIFO overflow. */
5541 if (tp
->mac_version
== RTL_GIGA_MAC_VER_11
) {
5542 tp
->irq_mask
|= RxFIFOOver
;
5543 tp
->irq_mask
&= ~RxOverflow
;
5549 static void rtl_hw_start_8101(struct rtl8169_private
*tp
)
5551 if (tp
->mac_version
>= RTL_GIGA_MAC_VER_30
)
5552 tp
->irq_mask
&= ~RxFIFOOver
;
5554 if (tp
->mac_version
== RTL_GIGA_MAC_VER_13
||
5555 tp
->mac_version
== RTL_GIGA_MAC_VER_16
)
5556 pcie_capability_set_word(tp
->pci_dev
, PCI_EXP_DEVCTL
,
5557 PCI_EXP_DEVCTL_NOSNOOP_EN
);
5559 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
5561 tp
->cp_cmd
&= CPCMD_QUIRK_MASK
;
5562 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
5567 static int rtl8169_change_mtu(struct net_device
*dev
, int new_mtu
)
5569 struct rtl8169_private
*tp
= netdev_priv(dev
);
5571 if (new_mtu
> ETH_DATA_LEN
)
5572 rtl_hw_jumbo_enable(tp
);
5574 rtl_hw_jumbo_disable(tp
);
5577 netdev_update_features(dev
);
5582 static inline void rtl8169_make_unusable_by_asic(struct RxDesc
*desc
)
5584 desc
->addr
= cpu_to_le64(0x0badbadbadbadbadull
);
5585 desc
->opts1
&= ~cpu_to_le32(DescOwn
| RsvdMask
);
5588 static void rtl8169_free_rx_databuff(struct rtl8169_private
*tp
,
5589 void **data_buff
, struct RxDesc
*desc
)
5591 dma_unmap_single(tp_to_dev(tp
), le64_to_cpu(desc
->addr
),
5592 R8169_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
5596 rtl8169_make_unusable_by_asic(desc
);
5599 static inline void rtl8169_mark_to_asic(struct RxDesc
*desc
)
5601 u32 eor
= le32_to_cpu(desc
->opts1
) & RingEnd
;
5603 /* Force memory writes to complete before releasing descriptor */
5606 desc
->opts1
= cpu_to_le32(DescOwn
| eor
| R8169_RX_BUF_SIZE
);
5609 static struct sk_buff
*rtl8169_alloc_rx_data(struct rtl8169_private
*tp
,
5610 struct RxDesc
*desc
)
5614 struct device
*d
= tp_to_dev(tp
);
5615 int node
= dev_to_node(d
);
5617 data
= kmalloc_node(R8169_RX_BUF_SIZE
, GFP_KERNEL
, node
);
5621 /* Memory should be properly aligned, but better check. */
5622 if (!IS_ALIGNED((unsigned long)data
, 8)) {
5623 netdev_err_once(tp
->dev
, "RX buffer not 8-byte-aligned\n");
5627 mapping
= dma_map_single(d
, data
, R8169_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
5628 if (unlikely(dma_mapping_error(d
, mapping
))) {
5629 if (net_ratelimit())
5630 netif_err(tp
, drv
, tp
->dev
, "Failed to map RX DMA!\n");
5634 desc
->addr
= cpu_to_le64(mapping
);
5635 rtl8169_mark_to_asic(desc
);
5643 static void rtl8169_rx_clear(struct rtl8169_private
*tp
)
5647 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
5648 if (tp
->Rx_databuff
[i
]) {
5649 rtl8169_free_rx_databuff(tp
, tp
->Rx_databuff
+ i
,
5650 tp
->RxDescArray
+ i
);
5655 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc
*desc
)
5657 desc
->opts1
|= cpu_to_le32(RingEnd
);
5660 static int rtl8169_rx_fill(struct rtl8169_private
*tp
)
5664 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
5667 data
= rtl8169_alloc_rx_data(tp
, tp
->RxDescArray
+ i
);
5669 rtl8169_make_unusable_by_asic(tp
->RxDescArray
+ i
);
5672 tp
->Rx_databuff
[i
] = data
;
5675 rtl8169_mark_as_last_descriptor(tp
->RxDescArray
+ NUM_RX_DESC
- 1);
5679 rtl8169_rx_clear(tp
);
5683 static int rtl8169_init_ring(struct rtl8169_private
*tp
)
5685 rtl8169_init_ring_indexes(tp
);
5687 memset(tp
->tx_skb
, 0, sizeof(tp
->tx_skb
));
5688 memset(tp
->Rx_databuff
, 0, sizeof(tp
->Rx_databuff
));
5690 return rtl8169_rx_fill(tp
);
5693 static void rtl8169_unmap_tx_skb(struct device
*d
, struct ring_info
*tx_skb
,
5694 struct TxDesc
*desc
)
5696 unsigned int len
= tx_skb
->len
;
5698 dma_unmap_single(d
, le64_to_cpu(desc
->addr
), len
, DMA_TO_DEVICE
);
5706 static void rtl8169_tx_clear_range(struct rtl8169_private
*tp
, u32 start
,
5711 for (i
= 0; i
< n
; i
++) {
5712 unsigned int entry
= (start
+ i
) % NUM_TX_DESC
;
5713 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
5714 unsigned int len
= tx_skb
->len
;
5717 struct sk_buff
*skb
= tx_skb
->skb
;
5719 rtl8169_unmap_tx_skb(tp_to_dev(tp
), tx_skb
,
5720 tp
->TxDescArray
+ entry
);
5722 dev_consume_skb_any(skb
);
5729 static void rtl8169_tx_clear(struct rtl8169_private
*tp
)
5731 rtl8169_tx_clear_range(tp
, tp
->dirty_tx
, NUM_TX_DESC
);
5732 tp
->cur_tx
= tp
->dirty_tx
= 0;
5733 netdev_reset_queue(tp
->dev
);
5736 static void rtl_reset_work(struct rtl8169_private
*tp
)
5738 struct net_device
*dev
= tp
->dev
;
5741 napi_disable(&tp
->napi
);
5742 netif_stop_queue(dev
);
5745 rtl8169_hw_reset(tp
);
5747 for (i
= 0; i
< NUM_RX_DESC
; i
++)
5748 rtl8169_mark_to_asic(tp
->RxDescArray
+ i
);
5750 rtl8169_tx_clear(tp
);
5751 rtl8169_init_ring_indexes(tp
);
5753 napi_enable(&tp
->napi
);
5755 netif_wake_queue(dev
);
5758 static void rtl8169_tx_timeout(struct net_device
*dev
)
5760 struct rtl8169_private
*tp
= netdev_priv(dev
);
5762 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
5765 static __le32
rtl8169_get_txd_opts1(u32 opts0
, u32 len
, unsigned int entry
)
5767 u32 status
= opts0
| len
;
5769 if (entry
== NUM_TX_DESC
- 1)
5772 return cpu_to_le32(status
);
5775 static int rtl8169_xmit_frags(struct rtl8169_private
*tp
, struct sk_buff
*skb
,
5778 struct skb_shared_info
*info
= skb_shinfo(skb
);
5779 unsigned int cur_frag
, entry
;
5780 struct TxDesc
*uninitialized_var(txd
);
5781 struct device
*d
= tp_to_dev(tp
);
5784 for (cur_frag
= 0; cur_frag
< info
->nr_frags
; cur_frag
++) {
5785 const skb_frag_t
*frag
= info
->frags
+ cur_frag
;
5790 entry
= (entry
+ 1) % NUM_TX_DESC
;
5792 txd
= tp
->TxDescArray
+ entry
;
5793 len
= skb_frag_size(frag
);
5794 addr
= skb_frag_address(frag
);
5795 mapping
= dma_map_single(d
, addr
, len
, DMA_TO_DEVICE
);
5796 if (unlikely(dma_mapping_error(d
, mapping
))) {
5797 if (net_ratelimit())
5798 netif_err(tp
, drv
, tp
->dev
,
5799 "Failed to map TX fragments DMA!\n");
5803 txd
->opts1
= rtl8169_get_txd_opts1(opts
[0], len
, entry
);
5804 txd
->opts2
= cpu_to_le32(opts
[1]);
5805 txd
->addr
= cpu_to_le64(mapping
);
5807 tp
->tx_skb
[entry
].len
= len
;
5811 tp
->tx_skb
[entry
].skb
= skb
;
5812 txd
->opts1
|= cpu_to_le32(LastFrag
);
5818 rtl8169_tx_clear_range(tp
, tp
->cur_tx
+ 1, cur_frag
);
5822 static bool rtl_test_hw_pad_bug(struct rtl8169_private
*tp
, struct sk_buff
*skb
)
5824 return skb
->len
< ETH_ZLEN
&& tp
->mac_version
== RTL_GIGA_MAC_VER_34
;
5827 static netdev_tx_t
rtl8169_start_xmit(struct sk_buff
*skb
,
5828 struct net_device
*dev
);
5829 /* r8169_csum_workaround()
5830 * The hw limites the value the transport offset. When the offset is out of the
5831 * range, calculate the checksum by sw.
5833 static void r8169_csum_workaround(struct rtl8169_private
*tp
,
5834 struct sk_buff
*skb
)
5836 if (skb_shinfo(skb
)->gso_size
) {
5837 netdev_features_t features
= tp
->dev
->features
;
5838 struct sk_buff
*segs
, *nskb
;
5840 features
&= ~(NETIF_F_SG
| NETIF_F_IPV6_CSUM
| NETIF_F_TSO6
);
5841 segs
= skb_gso_segment(skb
, features
);
5842 if (IS_ERR(segs
) || !segs
)
5849 rtl8169_start_xmit(nskb
, tp
->dev
);
5852 dev_consume_skb_any(skb
);
5853 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5854 if (skb_checksum_help(skb
) < 0)
5857 rtl8169_start_xmit(skb
, tp
->dev
);
5859 struct net_device_stats
*stats
;
5862 stats
= &tp
->dev
->stats
;
5863 stats
->tx_dropped
++;
5864 dev_kfree_skb_any(skb
);
5868 /* msdn_giant_send_check()
5869 * According to the document of microsoft, the TCP Pseudo Header excludes the
5870 * packet length for IPv6 TCP large packets.
5872 static int msdn_giant_send_check(struct sk_buff
*skb
)
5874 const struct ipv6hdr
*ipv6h
;
5878 ret
= skb_cow_head(skb
, 0);
5882 ipv6h
= ipv6_hdr(skb
);
5886 th
->check
= ~tcp_v6_check(0, &ipv6h
->saddr
, &ipv6h
->daddr
, 0);
5891 static bool rtl8169_tso_csum_v1(struct rtl8169_private
*tp
,
5892 struct sk_buff
*skb
, u32
*opts
)
5894 u32 mss
= skb_shinfo(skb
)->gso_size
;
5898 opts
[0] |= min(mss
, TD_MSS_MAX
) << TD0_MSS_SHIFT
;
5899 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5900 const struct iphdr
*ip
= ip_hdr(skb
);
5902 if (ip
->protocol
== IPPROTO_TCP
)
5903 opts
[0] |= TD0_IP_CS
| TD0_TCP_CS
;
5904 else if (ip
->protocol
== IPPROTO_UDP
)
5905 opts
[0] |= TD0_IP_CS
| TD0_UDP_CS
;
5913 static bool rtl8169_tso_csum_v2(struct rtl8169_private
*tp
,
5914 struct sk_buff
*skb
, u32
*opts
)
5916 u32 transport_offset
= (u32
)skb_transport_offset(skb
);
5917 u32 mss
= skb_shinfo(skb
)->gso_size
;
5920 if (transport_offset
> GTTCPHO_MAX
) {
5921 netif_warn(tp
, tx_err
, tp
->dev
,
5922 "Invalid transport offset 0x%x for TSO\n",
5927 switch (vlan_get_protocol(skb
)) {
5928 case htons(ETH_P_IP
):
5929 opts
[0] |= TD1_GTSENV4
;
5932 case htons(ETH_P_IPV6
):
5933 if (msdn_giant_send_check(skb
))
5936 opts
[0] |= TD1_GTSENV6
;
5944 opts
[0] |= transport_offset
<< GTTCPHO_SHIFT
;
5945 opts
[1] |= min(mss
, TD_MSS_MAX
) << TD1_MSS_SHIFT
;
5946 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5949 if (unlikely(rtl_test_hw_pad_bug(tp
, skb
)))
5950 return !(skb_checksum_help(skb
) || eth_skb_pad(skb
));
5952 if (transport_offset
> TCPHO_MAX
) {
5953 netif_warn(tp
, tx_err
, tp
->dev
,
5954 "Invalid transport offset 0x%x\n",
5959 switch (vlan_get_protocol(skb
)) {
5960 case htons(ETH_P_IP
):
5961 opts
[1] |= TD1_IPv4_CS
;
5962 ip_protocol
= ip_hdr(skb
)->protocol
;
5965 case htons(ETH_P_IPV6
):
5966 opts
[1] |= TD1_IPv6_CS
;
5967 ip_protocol
= ipv6_hdr(skb
)->nexthdr
;
5971 ip_protocol
= IPPROTO_RAW
;
5975 if (ip_protocol
== IPPROTO_TCP
)
5976 opts
[1] |= TD1_TCP_CS
;
5977 else if (ip_protocol
== IPPROTO_UDP
)
5978 opts
[1] |= TD1_UDP_CS
;
5982 opts
[1] |= transport_offset
<< TCPHO_SHIFT
;
5984 if (unlikely(rtl_test_hw_pad_bug(tp
, skb
)))
5985 return !eth_skb_pad(skb
);
5991 static bool rtl_tx_slots_avail(struct rtl8169_private
*tp
,
5992 unsigned int nr_frags
)
5994 unsigned int slots_avail
= tp
->dirty_tx
+ NUM_TX_DESC
- tp
->cur_tx
;
5996 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
5997 return slots_avail
> nr_frags
;
6000 static netdev_tx_t
rtl8169_start_xmit(struct sk_buff
*skb
,
6001 struct net_device
*dev
)
6003 struct rtl8169_private
*tp
= netdev_priv(dev
);
6004 unsigned int entry
= tp
->cur_tx
% NUM_TX_DESC
;
6005 struct TxDesc
*txd
= tp
->TxDescArray
+ entry
;
6006 struct device
*d
= tp_to_dev(tp
);
6011 if (unlikely(!rtl_tx_slots_avail(tp
, skb_shinfo(skb
)->nr_frags
))) {
6012 netif_err(tp
, drv
, dev
, "BUG! Tx Ring full when queue awake!\n");
6016 if (unlikely(le32_to_cpu(txd
->opts1
) & DescOwn
))
6019 opts
[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb
));
6022 if (!tp
->tso_csum(tp
, skb
, opts
)) {
6023 r8169_csum_workaround(tp
, skb
);
6024 return NETDEV_TX_OK
;
6027 len
= skb_headlen(skb
);
6028 mapping
= dma_map_single(d
, skb
->data
, len
, DMA_TO_DEVICE
);
6029 if (unlikely(dma_mapping_error(d
, mapping
))) {
6030 if (net_ratelimit())
6031 netif_err(tp
, drv
, dev
, "Failed to map TX DMA!\n");
6035 tp
->tx_skb
[entry
].len
= len
;
6036 txd
->addr
= cpu_to_le64(mapping
);
6038 frags
= rtl8169_xmit_frags(tp
, skb
, opts
);
6042 opts
[0] |= FirstFrag
;
6044 opts
[0] |= FirstFrag
| LastFrag
;
6045 tp
->tx_skb
[entry
].skb
= skb
;
6048 txd
->opts2
= cpu_to_le32(opts
[1]);
6050 netdev_sent_queue(dev
, skb
->len
);
6052 skb_tx_timestamp(skb
);
6054 /* Force memory writes to complete before releasing descriptor */
6057 txd
->opts1
= rtl8169_get_txd_opts1(opts
[0], len
, entry
);
6059 /* Force all memory writes to complete before notifying device */
6062 tp
->cur_tx
+= frags
+ 1;
6064 RTL_W8(tp
, TxPoll
, NPQ
);
6066 if (!rtl_tx_slots_avail(tp
, MAX_SKB_FRAGS
)) {
6067 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
6068 * not miss a ring update when it notices a stopped queue.
6071 netif_stop_queue(dev
);
6072 /* Sync with rtl_tx:
6073 * - publish queue status and cur_tx ring index (write barrier)
6074 * - refresh dirty_tx ring index (read barrier).
6075 * May the current thread have a pessimistic view of the ring
6076 * status and forget to wake up queue, a racing rtl_tx thread
6080 if (rtl_tx_slots_avail(tp
, MAX_SKB_FRAGS
))
6081 netif_start_queue(dev
);
6084 return NETDEV_TX_OK
;
6087 rtl8169_unmap_tx_skb(d
, tp
->tx_skb
+ entry
, txd
);
6089 dev_kfree_skb_any(skb
);
6090 dev
->stats
.tx_dropped
++;
6091 return NETDEV_TX_OK
;
6094 netif_stop_queue(dev
);
6095 dev
->stats
.tx_dropped
++;
6096 return NETDEV_TX_BUSY
;
6099 static void rtl8169_pcierr_interrupt(struct net_device
*dev
)
6101 struct rtl8169_private
*tp
= netdev_priv(dev
);
6102 struct pci_dev
*pdev
= tp
->pci_dev
;
6103 u16 pci_status
, pci_cmd
;
6105 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
6106 pci_read_config_word(pdev
, PCI_STATUS
, &pci_status
);
6108 netif_err(tp
, intr
, dev
, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
6109 pci_cmd
, pci_status
);
6112 * The recovery sequence below admits a very elaborated explanation:
6113 * - it seems to work;
6114 * - I did not see what else could be done;
6115 * - it makes iop3xx happy.
6117 * Feel free to adjust to your needs.
6119 if (pdev
->broken_parity_status
)
6120 pci_cmd
&= ~PCI_COMMAND_PARITY
;
6122 pci_cmd
|= PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
;
6124 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
6126 pci_write_config_word(pdev
, PCI_STATUS
,
6127 pci_status
& (PCI_STATUS_DETECTED_PARITY
|
6128 PCI_STATUS_SIG_SYSTEM_ERROR
| PCI_STATUS_REC_MASTER_ABORT
|
6129 PCI_STATUS_REC_TARGET_ABORT
| PCI_STATUS_SIG_TARGET_ABORT
));
6131 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
6134 static void rtl_tx(struct net_device
*dev
, struct rtl8169_private
*tp
,
6137 unsigned int dirty_tx
, tx_left
, bytes_compl
= 0, pkts_compl
= 0;
6139 dirty_tx
= tp
->dirty_tx
;
6141 tx_left
= tp
->cur_tx
- dirty_tx
;
6143 while (tx_left
> 0) {
6144 unsigned int entry
= dirty_tx
% NUM_TX_DESC
;
6145 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
6148 status
= le32_to_cpu(tp
->TxDescArray
[entry
].opts1
);
6149 if (status
& DescOwn
)
6152 /* This barrier is needed to keep us from reading
6153 * any other fields out of the Tx descriptor until
6154 * we know the status of DescOwn
6158 rtl8169_unmap_tx_skb(tp_to_dev(tp
), tx_skb
,
6159 tp
->TxDescArray
+ entry
);
6160 if (status
& LastFrag
) {
6162 bytes_compl
+= tx_skb
->skb
->len
;
6163 napi_consume_skb(tx_skb
->skb
, budget
);
6170 if (tp
->dirty_tx
!= dirty_tx
) {
6171 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
6173 u64_stats_update_begin(&tp
->tx_stats
.syncp
);
6174 tp
->tx_stats
.packets
+= pkts_compl
;
6175 tp
->tx_stats
.bytes
+= bytes_compl
;
6176 u64_stats_update_end(&tp
->tx_stats
.syncp
);
6178 tp
->dirty_tx
= dirty_tx
;
6179 /* Sync with rtl8169_start_xmit:
6180 * - publish dirty_tx ring index (write barrier)
6181 * - refresh cur_tx ring index and queue status (read barrier)
6182 * May the current thread miss the stopped queue condition,
6183 * a racing xmit thread can only have a right view of the
6187 if (netif_queue_stopped(dev
) &&
6188 rtl_tx_slots_avail(tp
, MAX_SKB_FRAGS
)) {
6189 netif_wake_queue(dev
);
6192 * 8168 hack: TxPoll requests are lost when the Tx packets are
6193 * too close. Let's kick an extra TxPoll request when a burst
6194 * of start_xmit activity is detected (if it is not detected,
6195 * it is slow enough). -- FR
6197 if (tp
->cur_tx
!= dirty_tx
)
6198 RTL_W8(tp
, TxPoll
, NPQ
);
6202 static inline int rtl8169_fragmented_frame(u32 status
)
6204 return (status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
);
6207 static inline void rtl8169_rx_csum(struct sk_buff
*skb
, u32 opts1
)
6209 u32 status
= opts1
& RxProtoMask
;
6211 if (((status
== RxProtoTCP
) && !(opts1
& TCPFail
)) ||
6212 ((status
== RxProtoUDP
) && !(opts1
& UDPFail
)))
6213 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6215 skb_checksum_none_assert(skb
);
6218 static struct sk_buff
*rtl8169_try_rx_copy(void *data
,
6219 struct rtl8169_private
*tp
,
6223 struct sk_buff
*skb
;
6224 struct device
*d
= tp_to_dev(tp
);
6226 dma_sync_single_for_cpu(d
, addr
, pkt_size
, DMA_FROM_DEVICE
);
6228 skb
= napi_alloc_skb(&tp
->napi
, pkt_size
);
6230 skb_copy_to_linear_data(skb
, data
, pkt_size
);
6231 dma_sync_single_for_device(d
, addr
, pkt_size
, DMA_FROM_DEVICE
);
6236 static int rtl_rx(struct net_device
*dev
, struct rtl8169_private
*tp
, u32 budget
)
6238 unsigned int cur_rx
, rx_left
;
6241 cur_rx
= tp
->cur_rx
;
6243 for (rx_left
= min(budget
, NUM_RX_DESC
); rx_left
> 0; rx_left
--, cur_rx
++) {
6244 unsigned int entry
= cur_rx
% NUM_RX_DESC
;
6245 struct RxDesc
*desc
= tp
->RxDescArray
+ entry
;
6248 status
= le32_to_cpu(desc
->opts1
);
6249 if (status
& DescOwn
)
6252 /* This barrier is needed to keep us from reading
6253 * any other fields out of the Rx descriptor until
6254 * we know the status of DescOwn
6258 if (unlikely(status
& RxRES
)) {
6259 netif_info(tp
, rx_err
, dev
, "Rx ERROR. status = %08x\n",
6261 dev
->stats
.rx_errors
++;
6262 if (status
& (RxRWT
| RxRUNT
))
6263 dev
->stats
.rx_length_errors
++;
6265 dev
->stats
.rx_crc_errors
++;
6266 /* RxFOVF is a reserved bit on later chip versions */
6267 if (tp
->mac_version
== RTL_GIGA_MAC_VER_01
&&
6269 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
6270 dev
->stats
.rx_fifo_errors
++;
6271 } else if (status
& (RxRUNT
| RxCRC
) &&
6272 !(status
& RxRWT
) &&
6273 dev
->features
& NETIF_F_RXALL
) {
6277 struct sk_buff
*skb
;
6282 addr
= le64_to_cpu(desc
->addr
);
6283 if (likely(!(dev
->features
& NETIF_F_RXFCS
)))
6284 pkt_size
= (status
& 0x00003fff) - 4;
6286 pkt_size
= status
& 0x00003fff;
6289 * The driver does not support incoming fragmented
6290 * frames. They are seen as a symptom of over-mtu
6293 if (unlikely(rtl8169_fragmented_frame(status
))) {
6294 dev
->stats
.rx_dropped
++;
6295 dev
->stats
.rx_length_errors
++;
6296 goto release_descriptor
;
6299 skb
= rtl8169_try_rx_copy(tp
->Rx_databuff
[entry
],
6300 tp
, pkt_size
, addr
);
6302 dev
->stats
.rx_dropped
++;
6303 goto release_descriptor
;
6306 rtl8169_rx_csum(skb
, status
);
6307 skb_put(skb
, pkt_size
);
6308 skb
->protocol
= eth_type_trans(skb
, dev
);
6310 rtl8169_rx_vlan_tag(desc
, skb
);
6312 if (skb
->pkt_type
== PACKET_MULTICAST
)
6313 dev
->stats
.multicast
++;
6315 napi_gro_receive(&tp
->napi
, skb
);
6317 u64_stats_update_begin(&tp
->rx_stats
.syncp
);
6318 tp
->rx_stats
.packets
++;
6319 tp
->rx_stats
.bytes
+= pkt_size
;
6320 u64_stats_update_end(&tp
->rx_stats
.syncp
);
6324 rtl8169_mark_to_asic(desc
);
6327 count
= cur_rx
- tp
->cur_rx
;
6328 tp
->cur_rx
= cur_rx
;
6333 static irqreturn_t
rtl8169_interrupt(int irq
, void *dev_instance
)
6335 struct rtl8169_private
*tp
= dev_instance
;
6336 u16 status
= RTL_R16(tp
, IntrStatus
);
6338 if (!tp
->irq_enabled
|| status
== 0xffff || !(status
& tp
->irq_mask
))
6341 if (unlikely(status
& SYSErr
)) {
6342 rtl8169_pcierr_interrupt(tp
->dev
);
6346 if (status
& LinkChg
)
6347 phy_mac_interrupt(tp
->phydev
);
6349 if (unlikely(status
& RxFIFOOver
&&
6350 tp
->mac_version
== RTL_GIGA_MAC_VER_11
)) {
6351 netif_stop_queue(tp
->dev
);
6352 /* XXX - Hack alert. See rtl_task(). */
6353 set_bit(RTL_FLAG_TASK_RESET_PENDING
, tp
->wk
.flags
);
6356 rtl_irq_disable(tp
);
6357 napi_schedule_irqoff(&tp
->napi
);
6359 rtl_ack_events(tp
, status
);
6364 static void rtl_task(struct work_struct
*work
)
6366 static const struct {
6368 void (*action
)(struct rtl8169_private
*);
6370 { RTL_FLAG_TASK_RESET_PENDING
, rtl_reset_work
},
6372 struct rtl8169_private
*tp
=
6373 container_of(work
, struct rtl8169_private
, wk
.work
);
6374 struct net_device
*dev
= tp
->dev
;
6379 if (!netif_running(dev
) ||
6380 !test_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
))
6383 for (i
= 0; i
< ARRAY_SIZE(rtl_work
); i
++) {
6386 pending
= test_and_clear_bit(rtl_work
[i
].bitnr
, tp
->wk
.flags
);
6388 rtl_work
[i
].action(tp
);
6392 rtl_unlock_work(tp
);
6395 static int rtl8169_poll(struct napi_struct
*napi
, int budget
)
6397 struct rtl8169_private
*tp
= container_of(napi
, struct rtl8169_private
, napi
);
6398 struct net_device
*dev
= tp
->dev
;
6401 work_done
= rtl_rx(dev
, tp
, (u32
) budget
);
6403 rtl_tx(dev
, tp
, budget
);
6405 if (work_done
< budget
) {
6406 napi_complete_done(napi
, work_done
);
6413 static void rtl8169_rx_missed(struct net_device
*dev
)
6415 struct rtl8169_private
*tp
= netdev_priv(dev
);
6417 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
)
6420 dev
->stats
.rx_missed_errors
+= RTL_R32(tp
, RxMissed
) & 0xffffff;
6421 RTL_W32(tp
, RxMissed
, 0);
6424 static void r8169_phylink_handler(struct net_device
*ndev
)
6426 struct rtl8169_private
*tp
= netdev_priv(ndev
);
6428 if (netif_carrier_ok(ndev
)) {
6429 rtl_link_chg_patch(tp
);
6430 pm_request_resume(&tp
->pci_dev
->dev
);
6432 pm_runtime_idle(&tp
->pci_dev
->dev
);
6435 if (net_ratelimit())
6436 phy_print_status(tp
->phydev
);
6439 static int r8169_phy_connect(struct rtl8169_private
*tp
)
6441 struct phy_device
*phydev
= tp
->phydev
;
6442 phy_interface_t phy_mode
;
6445 phy_mode
= tp
->supports_gmii
? PHY_INTERFACE_MODE_GMII
:
6446 PHY_INTERFACE_MODE_MII
;
6448 ret
= phy_connect_direct(tp
->dev
, phydev
, r8169_phylink_handler
,
6453 if (!tp
->supports_gmii
)
6454 phy_set_max_speed(phydev
, SPEED_100
);
6456 phy_support_asym_pause(phydev
);
6458 phy_attached_info(phydev
);
6463 static void rtl8169_down(struct net_device
*dev
)
6465 struct rtl8169_private
*tp
= netdev_priv(dev
);
6467 phy_stop(tp
->phydev
);
6469 napi_disable(&tp
->napi
);
6470 netif_stop_queue(dev
);
6472 rtl8169_hw_reset(tp
);
6474 * At this point device interrupts can not be enabled in any function,
6475 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6476 * and napi is disabled (rtl8169_poll).
6478 rtl8169_rx_missed(dev
);
6480 /* Give a racing hard_start_xmit a few cycles to complete. */
6483 rtl8169_tx_clear(tp
);
6485 rtl8169_rx_clear(tp
);
6487 rtl_pll_power_down(tp
);
6490 static int rtl8169_close(struct net_device
*dev
)
6492 struct rtl8169_private
*tp
= netdev_priv(dev
);
6493 struct pci_dev
*pdev
= tp
->pci_dev
;
6495 pm_runtime_get_sync(&pdev
->dev
);
6497 /* Update counters before going down */
6498 rtl8169_update_counters(tp
);
6501 /* Clear all task flags */
6502 bitmap_zero(tp
->wk
.flags
, RTL_FLAG_MAX
);
6505 rtl_unlock_work(tp
);
6507 cancel_work_sync(&tp
->wk
.work
);
6509 phy_disconnect(tp
->phydev
);
6511 pci_free_irq(pdev
, 0, tp
);
6513 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
6515 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
6517 tp
->TxDescArray
= NULL
;
6518 tp
->RxDescArray
= NULL
;
6520 pm_runtime_put_sync(&pdev
->dev
);
6525 #ifdef CONFIG_NET_POLL_CONTROLLER
6526 static void rtl8169_netpoll(struct net_device
*dev
)
6528 struct rtl8169_private
*tp
= netdev_priv(dev
);
6530 rtl8169_interrupt(pci_irq_vector(tp
->pci_dev
, 0), tp
);
6534 static int rtl_open(struct net_device
*dev
)
6536 struct rtl8169_private
*tp
= netdev_priv(dev
);
6537 struct pci_dev
*pdev
= tp
->pci_dev
;
6538 int retval
= -ENOMEM
;
6540 pm_runtime_get_sync(&pdev
->dev
);
6543 * Rx and Tx descriptors needs 256 bytes alignment.
6544 * dma_alloc_coherent provides more.
6546 tp
->TxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
,
6547 &tp
->TxPhyAddr
, GFP_KERNEL
);
6548 if (!tp
->TxDescArray
)
6549 goto err_pm_runtime_put
;
6551 tp
->RxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
,
6552 &tp
->RxPhyAddr
, GFP_KERNEL
);
6553 if (!tp
->RxDescArray
)
6556 retval
= rtl8169_init_ring(tp
);
6560 rtl_request_firmware(tp
);
6562 retval
= pci_request_irq(pdev
, 0, rtl8169_interrupt
, NULL
, tp
,
6565 goto err_release_fw_2
;
6567 retval
= r8169_phy_connect(tp
);
6573 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6575 napi_enable(&tp
->napi
);
6577 rtl8169_init_phy(dev
, tp
);
6579 rtl_pll_power_up(tp
);
6583 if (!rtl8169_init_counter_offsets(tp
))
6584 netif_warn(tp
, hw
, dev
, "counter reset/update failed\n");
6586 phy_start(tp
->phydev
);
6587 netif_start_queue(dev
);
6589 rtl_unlock_work(tp
);
6591 pm_runtime_put_sync(&pdev
->dev
);
6596 pci_free_irq(pdev
, 0, tp
);
6598 rtl_release_firmware(tp
);
6599 rtl8169_rx_clear(tp
);
6601 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
6603 tp
->RxDescArray
= NULL
;
6605 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
6607 tp
->TxDescArray
= NULL
;
6609 pm_runtime_put_noidle(&pdev
->dev
);
6614 rtl8169_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6616 struct rtl8169_private
*tp
= netdev_priv(dev
);
6617 struct pci_dev
*pdev
= tp
->pci_dev
;
6618 struct rtl8169_counters
*counters
= tp
->counters
;
6621 pm_runtime_get_noresume(&pdev
->dev
);
6623 if (netif_running(dev
) && pm_runtime_active(&pdev
->dev
))
6624 rtl8169_rx_missed(dev
);
6627 start
= u64_stats_fetch_begin_irq(&tp
->rx_stats
.syncp
);
6628 stats
->rx_packets
= tp
->rx_stats
.packets
;
6629 stats
->rx_bytes
= tp
->rx_stats
.bytes
;
6630 } while (u64_stats_fetch_retry_irq(&tp
->rx_stats
.syncp
, start
));
6633 start
= u64_stats_fetch_begin_irq(&tp
->tx_stats
.syncp
);
6634 stats
->tx_packets
= tp
->tx_stats
.packets
;
6635 stats
->tx_bytes
= tp
->tx_stats
.bytes
;
6636 } while (u64_stats_fetch_retry_irq(&tp
->tx_stats
.syncp
, start
));
6638 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
6639 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
6640 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
6641 stats
->rx_errors
= dev
->stats
.rx_errors
;
6642 stats
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
6643 stats
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
6644 stats
->rx_missed_errors
= dev
->stats
.rx_missed_errors
;
6645 stats
->multicast
= dev
->stats
.multicast
;
6648 * Fetch additonal counter values missing in stats collected by driver
6649 * from tally counters.
6651 if (pm_runtime_active(&pdev
->dev
))
6652 rtl8169_update_counters(tp
);
6655 * Subtract values fetched during initalization.
6656 * See rtl8169_init_counter_offsets for a description why we do that.
6658 stats
->tx_errors
= le64_to_cpu(counters
->tx_errors
) -
6659 le64_to_cpu(tp
->tc_offset
.tx_errors
);
6660 stats
->collisions
= le32_to_cpu(counters
->tx_multi_collision
) -
6661 le32_to_cpu(tp
->tc_offset
.tx_multi_collision
);
6662 stats
->tx_aborted_errors
= le16_to_cpu(counters
->tx_aborted
) -
6663 le16_to_cpu(tp
->tc_offset
.tx_aborted
);
6665 pm_runtime_put_noidle(&pdev
->dev
);
6668 static void rtl8169_net_suspend(struct net_device
*dev
)
6670 struct rtl8169_private
*tp
= netdev_priv(dev
);
6672 if (!netif_running(dev
))
6675 phy_stop(tp
->phydev
);
6676 netif_device_detach(dev
);
6679 napi_disable(&tp
->napi
);
6680 /* Clear all task flags */
6681 bitmap_zero(tp
->wk
.flags
, RTL_FLAG_MAX
);
6683 rtl_unlock_work(tp
);
6685 rtl_pll_power_down(tp
);
6690 static int rtl8169_suspend(struct device
*device
)
6692 struct net_device
*dev
= dev_get_drvdata(device
);
6693 struct rtl8169_private
*tp
= netdev_priv(dev
);
6695 rtl8169_net_suspend(dev
);
6696 clk_disable_unprepare(tp
->clk
);
6701 static void __rtl8169_resume(struct net_device
*dev
)
6703 struct rtl8169_private
*tp
= netdev_priv(dev
);
6705 netif_device_attach(dev
);
6707 rtl_pll_power_up(tp
);
6708 rtl8169_init_phy(dev
, tp
);
6710 phy_start(tp
->phydev
);
6713 napi_enable(&tp
->napi
);
6714 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6716 rtl_unlock_work(tp
);
6719 static int rtl8169_resume(struct device
*device
)
6721 struct net_device
*dev
= dev_get_drvdata(device
);
6722 struct rtl8169_private
*tp
= netdev_priv(dev
);
6724 clk_prepare_enable(tp
->clk
);
6726 if (netif_running(dev
))
6727 __rtl8169_resume(dev
);
6732 static int rtl8169_runtime_suspend(struct device
*device
)
6734 struct net_device
*dev
= dev_get_drvdata(device
);
6735 struct rtl8169_private
*tp
= netdev_priv(dev
);
6737 if (!tp
->TxDescArray
)
6741 __rtl8169_set_wol(tp
, WAKE_ANY
);
6742 rtl_unlock_work(tp
);
6744 rtl8169_net_suspend(dev
);
6746 /* Update counters before going runtime suspend */
6747 rtl8169_rx_missed(dev
);
6748 rtl8169_update_counters(tp
);
6753 static int rtl8169_runtime_resume(struct device
*device
)
6755 struct net_device
*dev
= dev_get_drvdata(device
);
6756 struct rtl8169_private
*tp
= netdev_priv(dev
);
6757 rtl_rar_set(tp
, dev
->dev_addr
);
6759 if (!tp
->TxDescArray
)
6763 __rtl8169_set_wol(tp
, tp
->saved_wolopts
);
6764 rtl_unlock_work(tp
);
6766 __rtl8169_resume(dev
);
6771 static int rtl8169_runtime_idle(struct device
*device
)
6773 struct net_device
*dev
= dev_get_drvdata(device
);
6775 if (!netif_running(dev
) || !netif_carrier_ok(dev
))
6776 pm_schedule_suspend(device
, 10000);
6781 static const struct dev_pm_ops rtl8169_pm_ops
= {
6782 .suspend
= rtl8169_suspend
,
6783 .resume
= rtl8169_resume
,
6784 .freeze
= rtl8169_suspend
,
6785 .thaw
= rtl8169_resume
,
6786 .poweroff
= rtl8169_suspend
,
6787 .restore
= rtl8169_resume
,
6788 .runtime_suspend
= rtl8169_runtime_suspend
,
6789 .runtime_resume
= rtl8169_runtime_resume
,
6790 .runtime_idle
= rtl8169_runtime_idle
,
6793 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6795 #else /* !CONFIG_PM */
6797 #define RTL8169_PM_OPS NULL
6799 #endif /* !CONFIG_PM */
6801 static void rtl_wol_shutdown_quirk(struct rtl8169_private
*tp
)
6803 /* WoL fails with 8168b when the receiver is disabled. */
6804 switch (tp
->mac_version
) {
6805 case RTL_GIGA_MAC_VER_11
:
6806 case RTL_GIGA_MAC_VER_12
:
6807 case RTL_GIGA_MAC_VER_17
:
6808 pci_clear_master(tp
->pci_dev
);
6810 RTL_W8(tp
, ChipCmd
, CmdRxEnb
);
6812 RTL_R8(tp
, ChipCmd
);
6819 static void rtl_shutdown(struct pci_dev
*pdev
)
6821 struct net_device
*dev
= pci_get_drvdata(pdev
);
6822 struct rtl8169_private
*tp
= netdev_priv(dev
);
6824 rtl8169_net_suspend(dev
);
6826 /* Restore original MAC address */
6827 rtl_rar_set(tp
, dev
->perm_addr
);
6829 rtl8169_hw_reset(tp
);
6831 if (system_state
== SYSTEM_POWER_OFF
) {
6832 if (tp
->saved_wolopts
) {
6833 rtl_wol_suspend_quirk(tp
);
6834 rtl_wol_shutdown_quirk(tp
);
6837 pci_wake_from_d3(pdev
, true);
6838 pci_set_power_state(pdev
, PCI_D3hot
);
6842 static void rtl_remove_one(struct pci_dev
*pdev
)
6844 struct net_device
*dev
= pci_get_drvdata(pdev
);
6845 struct rtl8169_private
*tp
= netdev_priv(dev
);
6847 if (r8168_check_dash(tp
))
6848 rtl8168_driver_stop(tp
);
6850 netif_napi_del(&tp
->napi
);
6852 unregister_netdev(dev
);
6853 mdiobus_unregister(tp
->phydev
->mdio
.bus
);
6855 rtl_release_firmware(tp
);
6857 if (pci_dev_run_wake(pdev
))
6858 pm_runtime_get_noresume(&pdev
->dev
);
6860 /* restore original MAC address */
6861 rtl_rar_set(tp
, dev
->perm_addr
);
6864 static const struct net_device_ops rtl_netdev_ops
= {
6865 .ndo_open
= rtl_open
,
6866 .ndo_stop
= rtl8169_close
,
6867 .ndo_get_stats64
= rtl8169_get_stats64
,
6868 .ndo_start_xmit
= rtl8169_start_xmit
,
6869 .ndo_tx_timeout
= rtl8169_tx_timeout
,
6870 .ndo_validate_addr
= eth_validate_addr
,
6871 .ndo_change_mtu
= rtl8169_change_mtu
,
6872 .ndo_fix_features
= rtl8169_fix_features
,
6873 .ndo_set_features
= rtl8169_set_features
,
6874 .ndo_set_mac_address
= rtl_set_mac_address
,
6875 .ndo_do_ioctl
= rtl8169_ioctl
,
6876 .ndo_set_rx_mode
= rtl_set_rx_mode
,
6877 #ifdef CONFIG_NET_POLL_CONTROLLER
6878 .ndo_poll_controller
= rtl8169_netpoll
,
6883 static const struct rtl_cfg_info
{
6884 void (*hw_start
)(struct rtl8169_private
*tp
);
6886 unsigned int has_gmii
:1;
6887 const struct rtl_coalesce_info
*coalesce_info
;
6888 } rtl_cfg_infos
[] = {
6890 .hw_start
= rtl_hw_start_8169
,
6891 .irq_mask
= SYSErr
| LinkChg
| RxOverflow
| RxFIFOOver
,
6893 .coalesce_info
= rtl_coalesce_info_8169
,
6896 .hw_start
= rtl_hw_start_8168
,
6897 .irq_mask
= LinkChg
| RxOverflow
,
6899 .coalesce_info
= rtl_coalesce_info_8168_8136
,
6902 .hw_start
= rtl_hw_start_8101
,
6903 .irq_mask
= LinkChg
| RxOverflow
| RxFIFOOver
,
6904 .coalesce_info
= rtl_coalesce_info_8168_8136
,
6908 static int rtl_alloc_irq(struct rtl8169_private
*tp
)
6912 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
) {
6913 rtl_unlock_config_regs(tp
);
6914 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) & ~MSIEnable
);
6915 rtl_lock_config_regs(tp
);
6916 flags
= PCI_IRQ_LEGACY
;
6918 flags
= PCI_IRQ_ALL_TYPES
;
6921 return pci_alloc_irq_vectors(tp
->pci_dev
, 1, 1, flags
);
6924 static void rtl_read_mac_address(struct rtl8169_private
*tp
,
6925 u8 mac_addr
[ETH_ALEN
])
6929 /* Get MAC address */
6930 switch (tp
->mac_version
) {
6931 case RTL_GIGA_MAC_VER_35
... RTL_GIGA_MAC_VER_38
:
6932 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_51
:
6933 value
= rtl_eri_read(tp
, 0xe0);
6934 mac_addr
[0] = (value
>> 0) & 0xff;
6935 mac_addr
[1] = (value
>> 8) & 0xff;
6936 mac_addr
[2] = (value
>> 16) & 0xff;
6937 mac_addr
[3] = (value
>> 24) & 0xff;
6939 value
= rtl_eri_read(tp
, 0xe4);
6940 mac_addr
[4] = (value
>> 0) & 0xff;
6941 mac_addr
[5] = (value
>> 8) & 0xff;
6948 DECLARE_RTL_COND(rtl_link_list_ready_cond
)
6950 return RTL_R8(tp
, MCU
) & LINK_LIST_RDY
;
6953 DECLARE_RTL_COND(rtl_rxtx_empty_cond
)
6955 return (RTL_R8(tp
, MCU
) & RXTX_EMPTY
) == RXTX_EMPTY
;
6958 static int r8169_mdio_read_reg(struct mii_bus
*mii_bus
, int phyaddr
, int phyreg
)
6960 struct rtl8169_private
*tp
= mii_bus
->priv
;
6965 return rtl_readphy(tp
, phyreg
);
6968 static int r8169_mdio_write_reg(struct mii_bus
*mii_bus
, int phyaddr
,
6969 int phyreg
, u16 val
)
6971 struct rtl8169_private
*tp
= mii_bus
->priv
;
6976 rtl_writephy(tp
, phyreg
, val
);
6981 static int r8169_mdio_register(struct rtl8169_private
*tp
)
6983 struct pci_dev
*pdev
= tp
->pci_dev
;
6984 struct mii_bus
*new_bus
;
6987 new_bus
= devm_mdiobus_alloc(&pdev
->dev
);
6991 new_bus
->name
= "r8169";
6993 new_bus
->parent
= &pdev
->dev
;
6994 new_bus
->irq
[0] = PHY_IGNORE_INTERRUPT
;
6995 snprintf(new_bus
->id
, MII_BUS_ID_SIZE
, "r8169-%x", pci_dev_id(pdev
));
6997 new_bus
->read
= r8169_mdio_read_reg
;
6998 new_bus
->write
= r8169_mdio_write_reg
;
7000 ret
= mdiobus_register(new_bus
);
7004 tp
->phydev
= mdiobus_get_phy(new_bus
, 0);
7006 mdiobus_unregister(new_bus
);
7010 /* PHY will be woken up in rtl_open() */
7011 phy_suspend(tp
->phydev
);
7016 static void rtl_hw_init_8168g(struct rtl8169_private
*tp
)
7020 tp
->ocp_base
= OCP_STD_PHY_BASE
;
7022 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | RXDV_GATED_EN
);
7024 if (!rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 42))
7027 if (!rtl_udelay_loop_wait_high(tp
, &rtl_rxtx_empty_cond
, 100, 42))
7030 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) & ~(CmdTxEnb
| CmdRxEnb
));
7032 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
7034 data
= r8168_mac_ocp_read(tp
, 0xe8de);
7036 r8168_mac_ocp_write(tp
, 0xe8de, data
);
7038 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
7041 data
= r8168_mac_ocp_read(tp
, 0xe8de);
7043 r8168_mac_ocp_write(tp
, 0xe8de, data
);
7045 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
7049 static void rtl_hw_init_8168ep(struct rtl8169_private
*tp
)
7051 rtl8168ep_stop_cmac(tp
);
7052 rtl_hw_init_8168g(tp
);
7055 static void rtl_hw_initialize(struct rtl8169_private
*tp
)
7057 switch (tp
->mac_version
) {
7058 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_48
:
7059 rtl_hw_init_8168g(tp
);
7061 case RTL_GIGA_MAC_VER_49
... RTL_GIGA_MAC_VER_51
:
7062 rtl_hw_init_8168ep(tp
);
7069 /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
7070 static bool rtl_chip_supports_csum_v2(struct rtl8169_private
*tp
)
7072 switch (tp
->mac_version
) {
7073 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_06
:
7074 case RTL_GIGA_MAC_VER_10
... RTL_GIGA_MAC_VER_17
:
7081 static int rtl_jumbo_max(struct rtl8169_private
*tp
)
7083 /* Non-GBit versions don't support jumbo frames */
7084 if (!tp
->supports_gmii
)
7087 switch (tp
->mac_version
) {
7089 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_06
:
7092 case RTL_GIGA_MAC_VER_11
:
7093 case RTL_GIGA_MAC_VER_12
:
7094 case RTL_GIGA_MAC_VER_17
:
7097 case RTL_GIGA_MAC_VER_18
... RTL_GIGA_MAC_VER_24
:
7104 static void rtl_disable_clk(void *data
)
7106 clk_disable_unprepare(data
);
7109 static int rtl_get_ether_clk(struct rtl8169_private
*tp
)
7111 struct device
*d
= tp_to_dev(tp
);
7115 clk
= devm_clk_get(d
, "ether_clk");
7119 /* clk-core allows NULL (for suspend / resume) */
7121 else if (rc
!= -EPROBE_DEFER
)
7122 dev_err(d
, "failed to get clk: %d\n", rc
);
7125 rc
= clk_prepare_enable(clk
);
7127 dev_err(d
, "failed to enable clk: %d\n", rc
);
7129 rc
= devm_add_action_or_reset(d
, rtl_disable_clk
, clk
);
7135 static int rtl_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
7137 const struct rtl_cfg_info
*cfg
= rtl_cfg_infos
+ ent
->driver_data
;
7138 /* align to u16 for is_valid_ether_addr() */
7139 u8 mac_addr
[ETH_ALEN
] __aligned(2) = {};
7140 struct rtl8169_private
*tp
;
7141 struct net_device
*dev
;
7142 int chipset
, region
, i
;
7145 dev
= devm_alloc_etherdev(&pdev
->dev
, sizeof (*tp
));
7149 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7150 dev
->netdev_ops
= &rtl_netdev_ops
;
7151 tp
= netdev_priv(dev
);
7154 tp
->msg_enable
= netif_msg_init(debug
.msg_enable
, R8169_MSG_DEFAULT
);
7155 tp
->supports_gmii
= cfg
->has_gmii
;
7157 /* Get the *optional* external "ether_clk" used on some boards */
7158 rc
= rtl_get_ether_clk(tp
);
7162 /* Disable ASPM completely as that cause random device stop working
7163 * problems as well as full system hangs for some PCIe devices users.
7165 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
);
7167 /* enable device (incl. PCI PM wakeup and hotplug setup) */
7168 rc
= pcim_enable_device(pdev
);
7170 dev_err(&pdev
->dev
, "enable failure\n");
7174 if (pcim_set_mwi(pdev
) < 0)
7175 dev_info(&pdev
->dev
, "Mem-Wr-Inval unavailable\n");
7177 /* use first MMIO region */
7178 region
= ffs(pci_select_bars(pdev
, IORESOURCE_MEM
)) - 1;
7180 dev_err(&pdev
->dev
, "no MMIO resource found\n");
7184 /* check for weird/broken PCI region reporting */
7185 if (pci_resource_len(pdev
, region
) < R8169_REGS_SIZE
) {
7186 dev_err(&pdev
->dev
, "Invalid PCI region size(s), aborting\n");
7190 rc
= pcim_iomap_regions(pdev
, BIT(region
), MODULENAME
);
7192 dev_err(&pdev
->dev
, "cannot remap MMIO, aborting\n");
7196 tp
->mmio_addr
= pcim_iomap_table(pdev
)[region
];
7198 /* Identify chip attached to board */
7199 rtl8169_get_mac_version(tp
);
7200 if (tp
->mac_version
== RTL_GIGA_MAC_NONE
)
7203 if (rtl_tbi_enabled(tp
)) {
7204 dev_err(&pdev
->dev
, "TBI fiber mode not supported\n");
7208 tp
->cp_cmd
= RTL_R16(tp
, CPlusCmd
);
7210 if (sizeof(dma_addr_t
) > 4 && tp
->mac_version
>= RTL_GIGA_MAC_VER_18
&&
7211 !dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
7212 dev
->features
|= NETIF_F_HIGHDMA
;
7214 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
7216 dev_err(&pdev
->dev
, "DMA configuration failed\n");
7223 rtl8169_irq_mask_and_ack(tp
);
7225 rtl_hw_initialize(tp
);
7229 pci_set_master(pdev
);
7231 rtl_init_mdio_ops(tp
);
7232 rtl_init_jumbo_ops(tp
);
7234 chipset
= tp
->mac_version
;
7236 rc
= rtl_alloc_irq(tp
);
7238 dev_err(&pdev
->dev
, "Can't allocate interrupt\n");
7242 mutex_init(&tp
->wk
.mutex
);
7243 INIT_WORK(&tp
->wk
.work
, rtl_task
);
7244 u64_stats_init(&tp
->rx_stats
.syncp
);
7245 u64_stats_init(&tp
->tx_stats
.syncp
);
7247 /* get MAC address */
7248 rc
= eth_platform_get_mac_address(&pdev
->dev
, mac_addr
);
7250 rtl_read_mac_address(tp
, mac_addr
);
7252 if (is_valid_ether_addr(mac_addr
))
7253 rtl_rar_set(tp
, mac_addr
);
7255 for (i
= 0; i
< ETH_ALEN
; i
++)
7256 dev
->dev_addr
[i
] = RTL_R8(tp
, MAC0
+ i
);
7258 dev
->ethtool_ops
= &rtl8169_ethtool_ops
;
7260 netif_napi_add(dev
, &tp
->napi
, rtl8169_poll
, NAPI_POLL_WEIGHT
);
7262 /* don't enable SG, IP_CSUM and TSO by default - it might not work
7263 * properly for all devices */
7264 dev
->features
|= NETIF_F_RXCSUM
|
7265 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
7267 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
7268 NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_CTAG_TX
|
7269 NETIF_F_HW_VLAN_CTAG_RX
;
7270 dev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
7272 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
7274 tp
->cp_cmd
|= RxChkSum
| RxVlan
;
7277 * Pretend we are using VLANs; This bypasses a nasty bug where
7278 * Interrupts stop flowing on high load on 8110SCd controllers.
7280 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
7281 /* Disallow toggling */
7282 dev
->hw_features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
7284 if (rtl_chip_supports_csum_v2(tp
)) {
7285 tp
->tso_csum
= rtl8169_tso_csum_v2
;
7286 dev
->hw_features
|= NETIF_F_IPV6_CSUM
| NETIF_F_TSO6
;
7288 tp
->tso_csum
= rtl8169_tso_csum_v1
;
7291 dev
->hw_features
|= NETIF_F_RXALL
;
7292 dev
->hw_features
|= NETIF_F_RXFCS
;
7294 /* MTU range: 60 - hw-specific max */
7295 dev
->min_mtu
= ETH_ZLEN
;
7296 jumbo_max
= rtl_jumbo_max(tp
);
7297 dev
->max_mtu
= jumbo_max
;
7299 tp
->hw_start
= cfg
->hw_start
;
7300 tp
->irq_mask
= RTL_EVENT_NAPI
| cfg
->irq_mask
;
7301 tp
->coalesce_info
= cfg
->coalesce_info
;
7303 tp
->fw_name
= rtl_chip_infos
[chipset
].fw_name
;
7305 tp
->counters
= dmam_alloc_coherent (&pdev
->dev
, sizeof(*tp
->counters
),
7306 &tp
->counters_phys_addr
,
7311 pci_set_drvdata(pdev
, dev
);
7313 rc
= r8169_mdio_register(tp
);
7317 /* chip gets powered up in rtl_open() */
7318 rtl_pll_power_down(tp
);
7320 rc
= register_netdev(dev
);
7322 goto err_mdio_unregister
;
7324 netif_info(tp
, probe
, dev
, "%s, %pM, XID %03x, IRQ %d\n",
7325 rtl_chip_infos
[chipset
].name
, dev
->dev_addr
,
7326 (RTL_R32(tp
, TxConfig
) >> 20) & 0xfcf,
7327 pci_irq_vector(pdev
, 0));
7329 if (jumbo_max
> JUMBO_1K
)
7330 netif_info(tp
, probe
, dev
,
7331 "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
7332 jumbo_max
, tp
->mac_version
<= RTL_GIGA_MAC_VER_06
?
7335 if (r8168_check_dash(tp
))
7336 rtl8168_driver_start(tp
);
7338 if (pci_dev_run_wake(pdev
))
7339 pm_runtime_put_sync(&pdev
->dev
);
7343 err_mdio_unregister
:
7344 mdiobus_unregister(tp
->phydev
->mdio
.bus
);
7348 static struct pci_driver rtl8169_pci_driver
= {
7350 .id_table
= rtl8169_pci_tbl
,
7351 .probe
= rtl_init_one
,
7352 .remove
= rtl_remove_one
,
7353 .shutdown
= rtl_shutdown
,
7354 .driver
.pm
= RTL8169_PM_OPS
,
7357 module_pci_driver(rtl8169_pci_driver
);