]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/r8169.c
r8169: add helpers for per-device hw_start handler
[mirror_ubuntu-bionic-kernel.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
99f252b0 69#include <asm/system.h>
1da177e4
LT
70#include <asm/io.h>
71#include <asm/irq.h>
72
f7ccf420
SH
73#ifdef CONFIG_R8169_NAPI
74#define NAPI_SUFFIX "-NAPI"
75#else
76#define NAPI_SUFFIX ""
77#endif
78
79#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
80#define MODULENAME "r8169"
81#define PFX MODULENAME ": "
82
83#ifdef RTL8169_DEBUG
84#define assert(expr) \
5b0384f4
FR
85 if (!(expr)) { \
86 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
87 #expr,__FILE__,__FUNCTION__,__LINE__); \
88 }
1da177e4
LT
89#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
90#else
91#define assert(expr) do {} while (0)
92#define dprintk(fmt, args...) do {} while (0)
93#endif /* RTL8169_DEBUG */
94
b57b7e5a 95#define R8169_MSG_DEFAULT \
f0e837d9 96 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 97
1da177e4
LT
98#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
100
101#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb
0b50f81d 103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
104#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else
106#define rtl8169_rx_skb netif_rx
0b50f81d 107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
108#define rtl8169_rx_quota(count, quota) count
109#endif
110
111/* media options */
112#define MAX_UNITS 8
113static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
114static int num_media = 0;
115
116/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 117static const int max_interrupt_work = 20;
1da177e4
LT
118
119/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
120 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 121static const int multicast_filter_limit = 32;
1da177e4
LT
122
123/* MAC address length */
124#define MAC_ADDR_LEN 6
125
126#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
127#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
129#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
130#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
131#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
132#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
133
134#define R8169_REGS_SIZE 256
135#define R8169_NAPI_WEIGHT 64
136#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
137#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
138#define RX_BUF_SIZE 1536 /* Rx Buffer size */
139#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
140#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
141
142#define RTL8169_TX_TIMEOUT (6*HZ)
143#define RTL8169_PHY_TIMEOUT (10*HZ)
144
145/* write/read MMIO register */
146#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
147#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
148#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
149#define RTL_R8(reg) readb (ioaddr + (reg))
150#define RTL_R16(reg) readw (ioaddr + (reg))
151#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
152
153enum mac_version {
bcf0bf90
FR
154 RTL_GIGA_MAC_VER_01 = 0x00,
155 RTL_GIGA_MAC_VER_02 = 0x01,
156 RTL_GIGA_MAC_VER_03 = 0x02,
157 RTL_GIGA_MAC_VER_04 = 0x03,
158 RTL_GIGA_MAC_VER_05 = 0x04,
159 RTL_GIGA_MAC_VER_11 = 0x0b,
160 RTL_GIGA_MAC_VER_12 = 0x0c,
161 RTL_GIGA_MAC_VER_13 = 0x0d,
162 RTL_GIGA_MAC_VER_14 = 0x0e,
163 RTL_GIGA_MAC_VER_15 = 0x0f
1da177e4
LT
164};
165
166enum phy_version {
167 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
170 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
171 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
172 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
173};
174
1da177e4
LT
175#define _R(NAME,MAC,MASK) \
176 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
177
3c6bee1d 178static const struct {
1da177e4
LT
179 const char *name;
180 u8 mac_version;
181 u32 RxConfigMask; /* Clears the bits supported by this chip */
182} rtl_chip_info[] = {
bcf0bf90
FR
183 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
185 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
186 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
187 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
189 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
190 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
192 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
193};
194#undef _R
195
bcf0bf90
FR
196enum cfg_version {
197 RTL_CFG_0 = 0x00,
198 RTL_CFG_1,
199 RTL_CFG_2
200};
201
07ce4064
FR
202static void rtl_hw_start_8169(struct net_device *);
203static void rtl_hw_start_8168(struct net_device *);
204static void rtl_hw_start_8101(struct net_device *);
205
bcf0bf90 206static const struct {
07ce4064 207 void (*hw_start)(struct net_device *);
bcf0bf90
FR
208 unsigned int region;
209 unsigned int align;
210} rtl_cfg_info[] = {
07ce4064
FR
211 [RTL_CFG_0] = { rtl_hw_start_8169, 1, NET_IP_ALIGN },
212 [RTL_CFG_1] = { rtl_hw_start_8168, 2, 8 },
213 [RTL_CFG_2] = { rtl_hw_start_8101, 2, 8 }
bcf0bf90
FR
214};
215
1da177e4 216static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 217 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 218 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
07ce4064 220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
bcf0bf90
FR
221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
222 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
73f5e28b 223 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
224 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
225 { PCI_VENDOR_ID_LINKSYS, 0x1032,
226 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
227 {0,},
228};
229
230MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
231
232static int rx_copybreak = 200;
233static int use_dac;
b57b7e5a
SH
234static struct {
235 u32 msg_enable;
236} debug = { -1 };
1da177e4
LT
237
238enum RTL8169_registers {
239 MAC0 = 0, /* Ethernet hardware address. */
240 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
241 CounterAddrLow = 0x10,
242 CounterAddrHigh = 0x14,
1da177e4
LT
243 TxDescStartAddrLow = 0x20,
244 TxDescStartAddrHigh = 0x24,
245 TxHDescStartAddrLow = 0x28,
246 TxHDescStartAddrHigh = 0x2c,
247 FLASH = 0x30,
248 ERSR = 0x36,
249 ChipCmd = 0x37,
250 TxPoll = 0x38,
251 IntrMask = 0x3C,
252 IntrStatus = 0x3E,
253 TxConfig = 0x40,
254 RxConfig = 0x44,
255 RxMissed = 0x4C,
256 Cfg9346 = 0x50,
257 Config0 = 0x51,
258 Config1 = 0x52,
259 Config2 = 0x53,
260 Config3 = 0x54,
261 Config4 = 0x55,
262 Config5 = 0x56,
263 MultiIntr = 0x5C,
264 PHYAR = 0x60,
265 TBICSR = 0x64,
266 TBI_ANAR = 0x68,
267 TBI_LPAR = 0x6A,
268 PHYstatus = 0x6C,
269 RxMaxSize = 0xDA,
270 CPlusCmd = 0xE0,
271 IntrMitigate = 0xE2,
272 RxDescAddrLow = 0xE4,
273 RxDescAddrHigh = 0xE8,
274 EarlyTxThres = 0xEC,
275 FuncEvent = 0xF0,
276 FuncEventMask = 0xF4,
277 FuncPresetState = 0xF8,
278 FuncForceEvent = 0xFC,
279};
280
281enum RTL8169_register_content {
282 /* InterruptStatusBits */
283 SYSErr = 0x8000,
284 PCSTimeout = 0x4000,
285 SWInt = 0x0100,
286 TxDescUnavail = 0x80,
287 RxFIFOOver = 0x40,
288 LinkChg = 0x20,
289 RxOverflow = 0x10,
290 TxErr = 0x08,
291 TxOK = 0x04,
292 RxErr = 0x02,
293 RxOK = 0x01,
294
295 /* RxStatusDesc */
9dccf611
FR
296 RxFOVF = (1 << 23),
297 RxRWT = (1 << 22),
298 RxRES = (1 << 21),
299 RxRUNT = (1 << 20),
300 RxCRC = (1 << 19),
1da177e4
LT
301
302 /* ChipCmdBits */
303 CmdReset = 0x10,
304 CmdRxEnb = 0x08,
305 CmdTxEnb = 0x04,
306 RxBufEmpty = 0x01,
307
308 /* Cfg9346Bits */
309 Cfg9346_Lock = 0x00,
310 Cfg9346_Unlock = 0xC0,
311
312 /* rx_mode_bits */
313 AcceptErr = 0x20,
314 AcceptRunt = 0x10,
315 AcceptBroadcast = 0x08,
316 AcceptMulticast = 0x04,
317 AcceptMyPhys = 0x02,
318 AcceptAllPhys = 0x01,
319
320 /* RxConfigBits */
321 RxCfgFIFOShift = 13,
322 RxCfgDMAShift = 8,
323
324 /* TxConfigBits */
325 TxInterFrameGapShift = 24,
326 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
327
5d06a99f
FR
328 /* Config1 register p.24 */
329 PMEnable = (1 << 0), /* Power Management Enable */
330
61a4dcc2
FR
331 /* Config3 register p.25 */
332 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
333 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
334
5d06a99f 335 /* Config5 register p.27 */
61a4dcc2
FR
336 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
337 MWF = (1 << 5), /* Accept Multicast wakeup frame */
338 UWF = (1 << 4), /* Accept Unicast wakeup frame */
339 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
340 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
341
1da177e4
LT
342 /* TBICSR p.28 */
343 TBIReset = 0x80000000,
344 TBILoopback = 0x40000000,
345 TBINwEnable = 0x20000000,
346 TBINwRestart = 0x10000000,
347 TBILinkOk = 0x02000000,
348 TBINwComplete = 0x01000000,
349
350 /* CPlusCmd p.31 */
351 RxVlan = (1 << 6),
352 RxChkSum = (1 << 5),
353 PCIDAC = (1 << 4),
354 PCIMulRW = (1 << 3),
355
356 /* rtl8169_PHYstatus */
357 TBI_Enable = 0x80,
358 TxFlowCtrl = 0x40,
359 RxFlowCtrl = 0x20,
360 _1000bpsF = 0x10,
361 _100bps = 0x08,
362 _10bps = 0x04,
363 LinkStatus = 0x02,
364 FullDup = 0x01,
365
1da177e4
LT
366 /* _MediaType */
367 _10_Half = 0x01,
368 _10_Full = 0x02,
369 _100_Half = 0x04,
370 _100_Full = 0x08,
371 _1000_Full = 0x10,
372
373 /* _TBICSRBit */
374 TBILinkOK = 0x02000000,
d4a3a0fc
SH
375
376 /* DumpCounterCommand */
377 CounterDump = 0x8,
1da177e4
LT
378};
379
380enum _DescStatusBit {
381 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
382 RingEnd = (1 << 30), /* End of descriptor ring */
383 FirstFrag = (1 << 29), /* First segment of a packet */
384 LastFrag = (1 << 28), /* Final segment of a packet */
385
386 /* Tx private */
387 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
388 MSSShift = 16, /* MSS value position */
389 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
390 IPCS = (1 << 18), /* Calculate IP checksum */
391 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
392 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
393 TxVlanTag = (1 << 17), /* Add VLAN tag */
394
395 /* Rx private */
396 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
397 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
398
399#define RxProtoUDP (PID1)
400#define RxProtoTCP (PID0)
401#define RxProtoIP (PID1 | PID0)
402#define RxProtoMask RxProtoIP
403
404 IPFail = (1 << 16), /* IP checksum failed */
405 UDPFail = (1 << 15), /* UDP/IP checksum failed */
406 TCPFail = (1 << 14), /* TCP/IP checksum failed */
407 RxVlanTag = (1 << 16), /* VLAN tag available */
408};
409
410#define RsvdMask 0x3fffc000
411
412struct TxDesc {
413 u32 opts1;
414 u32 opts2;
415 u64 addr;
416};
417
418struct RxDesc {
419 u32 opts1;
420 u32 opts2;
421 u64 addr;
422};
423
424struct ring_info {
425 struct sk_buff *skb;
426 u32 len;
427 u8 __pad[sizeof(void *) - sizeof(u32)];
428};
429
430struct rtl8169_private {
431 void __iomem *mmio_addr; /* memory map physical address */
432 struct pci_dev *pci_dev; /* Index of PCI device */
c4028958 433 struct net_device *dev;
1da177e4
LT
434 struct net_device_stats stats; /* statistics of net device */
435 spinlock_t lock; /* spin lock flag */
b57b7e5a 436 u32 msg_enable;
1da177e4
LT
437 int chipset;
438 int mac_version;
439 int phy_version;
440 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
441 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
442 u32 dirty_rx;
443 u32 dirty_tx;
444 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
445 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
446 dma_addr_t TxPhyAddr;
447 dma_addr_t RxPhyAddr;
448 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
449 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 450 unsigned align;
1da177e4
LT
451 unsigned rx_buf_sz;
452 struct timer_list timer;
453 u16 cp_cmd;
454 u16 intr_mask;
455 int phy_auto_nego_reg;
456 int phy_1000_ctrl_reg;
457#ifdef CONFIG_R8169_VLAN
458 struct vlan_group *vlgrp;
459#endif
460 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
461 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
462 void (*phy_reset_enable)(void __iomem *);
07ce4064 463 void (*hw_start)(struct net_device *);
1da177e4
LT
464 unsigned int (*phy_reset_pending)(void __iomem *);
465 unsigned int (*link_ok)(void __iomem *);
c4028958 466 struct delayed_work task;
61a4dcc2 467 unsigned wol_enabled : 1;
1da177e4
LT
468};
469
979b6c13 470MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
471MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
472module_param_array(media, int, &num_media, 0);
df0a1bf6 473MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 474module_param(rx_copybreak, int, 0);
1b7efd58 475MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
476module_param(use_dac, int, 0);
477MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
478module_param_named(debug, debug.msg_enable, int, 0);
479MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
480MODULE_LICENSE("GPL");
481MODULE_VERSION(RTL8169_VERSION);
482
483static int rtl8169_open(struct net_device *dev);
484static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
7d12e780 485static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4 486static int rtl8169_init_ring(struct net_device *dev);
07ce4064 487static void rtl_hw_start(struct net_device *dev);
1da177e4 488static int rtl8169_close(struct net_device *dev);
07ce4064 489static void rtl_set_rx_mode(struct net_device *dev);
1da177e4 490static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 491static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
492static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
493 void __iomem *);
4dcb7d33 494static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4 495static void rtl8169_down(struct net_device *dev);
99f252b0 496static void rtl8169_rx_clear(struct rtl8169_private *tp);
1da177e4
LT
497
498#ifdef CONFIG_R8169_NAPI
499static int rtl8169_poll(struct net_device *dev, int *budget);
500#endif
501
502static const u16 rtl8169_intr_mask =
503 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
504static const u16 rtl8169_napi_event =
505 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
506static const unsigned int rtl8169_rx_config =
5b0384f4 507 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
508
509static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
510{
511 int i;
512
513 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 514
2371408c 515 for (i = 20; i > 0; i--) {
1da177e4 516 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 517 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 518 break;
2371408c 519 udelay(25);
1da177e4
LT
520 }
521}
522
523static int mdio_read(void __iomem *ioaddr, int RegAddr)
524{
525 int i, value = -1;
526
527 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 528
2371408c 529 for (i = 20; i > 0; i--) {
1da177e4
LT
530 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
531 if (RTL_R32(PHYAR) & 0x80000000) {
532 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
533 break;
534 }
2371408c 535 udelay(25);
1da177e4
LT
536 }
537 return value;
538}
539
540static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
541{
542 RTL_W16(IntrMask, 0x0000);
543
544 RTL_W16(IntrStatus, 0xffff);
545}
546
547static void rtl8169_asic_down(void __iomem *ioaddr)
548{
549 RTL_W8(ChipCmd, 0x00);
550 rtl8169_irq_mask_and_ack(ioaddr);
551 RTL_R16(CPlusCmd);
552}
553
554static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
555{
556 return RTL_R32(TBICSR) & TBIReset;
557}
558
559static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
560{
64e4bfb4 561 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
562}
563
564static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
565{
566 return RTL_R32(TBICSR) & TBILinkOk;
567}
568
569static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
570{
571 return RTL_R8(PHYstatus) & LinkStatus;
572}
573
574static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
575{
576 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
577}
578
579static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
580{
581 unsigned int val;
582
9e0db8ef
FR
583 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
584 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
1da177e4
LT
585}
586
587static void rtl8169_check_link_status(struct net_device *dev,
588 struct rtl8169_private *tp, void __iomem *ioaddr)
589{
590 unsigned long flags;
591
592 spin_lock_irqsave(&tp->lock, flags);
593 if (tp->link_ok(ioaddr)) {
594 netif_carrier_on(dev);
b57b7e5a
SH
595 if (netif_msg_ifup(tp))
596 printk(KERN_INFO PFX "%s: link up\n", dev->name);
597 } else {
598 if (netif_msg_ifdown(tp))
599 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 600 netif_carrier_off(dev);
b57b7e5a 601 }
1da177e4
LT
602 spin_unlock_irqrestore(&tp->lock, flags);
603}
604
605static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
606{
607 struct {
608 u16 speed;
609 u8 duplex;
610 u8 autoneg;
611 u8 media;
612 } link_settings[] = {
613 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
614 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
615 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
616 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
617 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
618 /* Make TBI happy */
619 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
620 }, *p;
621 unsigned char option;
5b0384f4 622
1da177e4
LT
623 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
624
b57b7e5a 625 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
626 printk(KERN_WARNING PFX "media option is deprecated.\n");
627
628 for (p = link_settings; p->media != 0xff; p++) {
629 if (p->media == option)
630 break;
631 }
632 *autoneg = p->autoneg;
633 *speed = p->speed;
634 *duplex = p->duplex;
635}
636
61a4dcc2
FR
637static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
638{
639 struct rtl8169_private *tp = netdev_priv(dev);
640 void __iomem *ioaddr = tp->mmio_addr;
641 u8 options;
642
643 wol->wolopts = 0;
644
645#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
646 wol->supported = WAKE_ANY;
647
648 spin_lock_irq(&tp->lock);
649
650 options = RTL_R8(Config1);
651 if (!(options & PMEnable))
652 goto out_unlock;
653
654 options = RTL_R8(Config3);
655 if (options & LinkUp)
656 wol->wolopts |= WAKE_PHY;
657 if (options & MagicPacket)
658 wol->wolopts |= WAKE_MAGIC;
659
660 options = RTL_R8(Config5);
661 if (options & UWF)
662 wol->wolopts |= WAKE_UCAST;
663 if (options & BWF)
5b0384f4 664 wol->wolopts |= WAKE_BCAST;
61a4dcc2 665 if (options & MWF)
5b0384f4 666 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
667
668out_unlock:
669 spin_unlock_irq(&tp->lock);
670}
671
672static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
673{
674 struct rtl8169_private *tp = netdev_priv(dev);
675 void __iomem *ioaddr = tp->mmio_addr;
676 int i;
677 static struct {
678 u32 opt;
679 u16 reg;
680 u8 mask;
681 } cfg[] = {
682 { WAKE_ANY, Config1, PMEnable },
683 { WAKE_PHY, Config3, LinkUp },
684 { WAKE_MAGIC, Config3, MagicPacket },
685 { WAKE_UCAST, Config5, UWF },
686 { WAKE_BCAST, Config5, BWF },
687 { WAKE_MCAST, Config5, MWF },
688 { WAKE_ANY, Config5, LanWake }
689 };
690
691 spin_lock_irq(&tp->lock);
692
693 RTL_W8(Cfg9346, Cfg9346_Unlock);
694
695 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
696 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
697 if (wol->wolopts & cfg[i].opt)
698 options |= cfg[i].mask;
699 RTL_W8(cfg[i].reg, options);
700 }
701
702 RTL_W8(Cfg9346, Cfg9346_Lock);
703
704 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
705
706 spin_unlock_irq(&tp->lock);
707
708 return 0;
709}
710
1da177e4
LT
711static void rtl8169_get_drvinfo(struct net_device *dev,
712 struct ethtool_drvinfo *info)
713{
714 struct rtl8169_private *tp = netdev_priv(dev);
715
716 strcpy(info->driver, MODULENAME);
717 strcpy(info->version, RTL8169_VERSION);
718 strcpy(info->bus_info, pci_name(tp->pci_dev));
719}
720
721static int rtl8169_get_regs_len(struct net_device *dev)
722{
723 return R8169_REGS_SIZE;
724}
725
726static int rtl8169_set_speed_tbi(struct net_device *dev,
727 u8 autoneg, u16 speed, u8 duplex)
728{
729 struct rtl8169_private *tp = netdev_priv(dev);
730 void __iomem *ioaddr = tp->mmio_addr;
731 int ret = 0;
732 u32 reg;
733
734 reg = RTL_R32(TBICSR);
735 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
736 (duplex == DUPLEX_FULL)) {
737 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
738 } else if (autoneg == AUTONEG_ENABLE)
739 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
740 else {
b57b7e5a
SH
741 if (netif_msg_link(tp)) {
742 printk(KERN_WARNING "%s: "
743 "incorrect speed setting refused in TBI mode\n",
744 dev->name);
745 }
1da177e4
LT
746 ret = -EOPNOTSUPP;
747 }
748
749 return ret;
750}
751
752static int rtl8169_set_speed_xmii(struct net_device *dev,
753 u8 autoneg, u16 speed, u8 duplex)
754{
755 struct rtl8169_private *tp = netdev_priv(dev);
756 void __iomem *ioaddr = tp->mmio_addr;
757 int auto_nego, giga_ctrl;
758
64e4bfb4
FR
759 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
760 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
761 ADVERTISE_100HALF | ADVERTISE_100FULL);
762 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
763 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
764
765 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
766 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
767 ADVERTISE_100HALF | ADVERTISE_100FULL);
768 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
769 } else {
770 if (speed == SPEED_10)
64e4bfb4 771 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 772 else if (speed == SPEED_100)
64e4bfb4 773 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 774 else if (speed == SPEED_1000)
64e4bfb4 775 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
776
777 if (duplex == DUPLEX_HALF)
64e4bfb4 778 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
779
780 if (duplex == DUPLEX_FULL)
64e4bfb4 781 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
782
783 /* This tweak comes straight from Realtek's driver. */
784 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
785 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 786 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
787 }
788 }
789
790 /* The 8100e/8101e do Fast Ethernet only. */
791 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
792 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
793 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 794 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
795 netif_msg_link(tp)) {
796 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
797 dev->name);
798 }
64e4bfb4 799 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
800 }
801
623a1593
FR
802 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
803
1da177e4
LT
804 tp->phy_auto_nego_reg = auto_nego;
805 tp->phy_1000_ctrl_reg = giga_ctrl;
806
64e4bfb4
FR
807 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
808 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
809 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
810 return 0;
811}
812
813static int rtl8169_set_speed(struct net_device *dev,
814 u8 autoneg, u16 speed, u8 duplex)
815{
816 struct rtl8169_private *tp = netdev_priv(dev);
817 int ret;
818
819 ret = tp->set_speed(dev, autoneg, speed, duplex);
820
64e4bfb4 821 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
822 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
823
824 return ret;
825}
826
827static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
828{
829 struct rtl8169_private *tp = netdev_priv(dev);
830 unsigned long flags;
831 int ret;
832
833 spin_lock_irqsave(&tp->lock, flags);
834 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
835 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 836
1da177e4
LT
837 return ret;
838}
839
840static u32 rtl8169_get_rx_csum(struct net_device *dev)
841{
842 struct rtl8169_private *tp = netdev_priv(dev);
843
844 return tp->cp_cmd & RxChkSum;
845}
846
847static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
848{
849 struct rtl8169_private *tp = netdev_priv(dev);
850 void __iomem *ioaddr = tp->mmio_addr;
851 unsigned long flags;
852
853 spin_lock_irqsave(&tp->lock, flags);
854
855 if (data)
856 tp->cp_cmd |= RxChkSum;
857 else
858 tp->cp_cmd &= ~RxChkSum;
859
860 RTL_W16(CPlusCmd, tp->cp_cmd);
861 RTL_R16(CPlusCmd);
862
863 spin_unlock_irqrestore(&tp->lock, flags);
864
865 return 0;
866}
867
868#ifdef CONFIG_R8169_VLAN
869
870static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
871 struct sk_buff *skb)
872{
873 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
874 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
875}
876
877static void rtl8169_vlan_rx_register(struct net_device *dev,
878 struct vlan_group *grp)
879{
880 struct rtl8169_private *tp = netdev_priv(dev);
881 void __iomem *ioaddr = tp->mmio_addr;
882 unsigned long flags;
883
884 spin_lock_irqsave(&tp->lock, flags);
885 tp->vlgrp = grp;
886 if (tp->vlgrp)
887 tp->cp_cmd |= RxVlan;
888 else
889 tp->cp_cmd &= ~RxVlan;
890 RTL_W16(CPlusCmd, tp->cp_cmd);
891 RTL_R16(CPlusCmd);
892 spin_unlock_irqrestore(&tp->lock, flags);
893}
894
1da177e4
LT
895static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
896 struct sk_buff *skb)
897{
898 u32 opts2 = le32_to_cpu(desc->opts2);
899 int ret;
900
901 if (tp->vlgrp && (opts2 & RxVlanTag)) {
902 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
903 swab16(opts2 & 0xffff));
904 ret = 0;
905 } else
906 ret = -1;
907 desc->opts2 = 0;
908 return ret;
909}
910
911#else /* !CONFIG_R8169_VLAN */
912
913static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
914 struct sk_buff *skb)
915{
916 return 0;
917}
918
919static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
920 struct sk_buff *skb)
921{
922 return -1;
923}
924
925#endif
926
927static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
928{
929 struct rtl8169_private *tp = netdev_priv(dev);
930 void __iomem *ioaddr = tp->mmio_addr;
931 u32 status;
932
933 cmd->supported =
934 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
935 cmd->port = PORT_FIBRE;
936 cmd->transceiver = XCVR_INTERNAL;
937
938 status = RTL_R32(TBICSR);
939 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
940 cmd->autoneg = !!(status & TBINwEnable);
941
942 cmd->speed = SPEED_1000;
943 cmd->duplex = DUPLEX_FULL; /* Always set */
944}
945
946static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
947{
948 struct rtl8169_private *tp = netdev_priv(dev);
949 void __iomem *ioaddr = tp->mmio_addr;
950 u8 status;
951
952 cmd->supported = SUPPORTED_10baseT_Half |
953 SUPPORTED_10baseT_Full |
954 SUPPORTED_100baseT_Half |
955 SUPPORTED_100baseT_Full |
956 SUPPORTED_1000baseT_Full |
957 SUPPORTED_Autoneg |
5b0384f4 958 SUPPORTED_TP;
1da177e4
LT
959
960 cmd->autoneg = 1;
961 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
962
64e4bfb4 963 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 964 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 965 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 966 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 967 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 968 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 969 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 970 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 971 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
972 cmd->advertising |= ADVERTISED_1000baseT_Full;
973
974 status = RTL_R8(PHYstatus);
975
976 if (status & _1000bpsF)
977 cmd->speed = SPEED_1000;
978 else if (status & _100bps)
979 cmd->speed = SPEED_100;
980 else if (status & _10bps)
981 cmd->speed = SPEED_10;
982
623a1593
FR
983 if (status & TxFlowCtrl)
984 cmd->advertising |= ADVERTISED_Asym_Pause;
985 if (status & RxFlowCtrl)
986 cmd->advertising |= ADVERTISED_Pause;
987
1da177e4
LT
988 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
989 DUPLEX_FULL : DUPLEX_HALF;
990}
991
992static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
993{
994 struct rtl8169_private *tp = netdev_priv(dev);
995 unsigned long flags;
996
997 spin_lock_irqsave(&tp->lock, flags);
998
999 tp->get_settings(dev, cmd);
1000
1001 spin_unlock_irqrestore(&tp->lock, flags);
1002 return 0;
1003}
1004
1005static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1006 void *p)
1007{
5b0384f4
FR
1008 struct rtl8169_private *tp = netdev_priv(dev);
1009 unsigned long flags;
1da177e4 1010
5b0384f4
FR
1011 if (regs->len > R8169_REGS_SIZE)
1012 regs->len = R8169_REGS_SIZE;
1da177e4 1013
5b0384f4
FR
1014 spin_lock_irqsave(&tp->lock, flags);
1015 memcpy_fromio(p, tp->mmio_addr, regs->len);
1016 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1017}
1018
b57b7e5a
SH
1019static u32 rtl8169_get_msglevel(struct net_device *dev)
1020{
1021 struct rtl8169_private *tp = netdev_priv(dev);
1022
1023 return tp->msg_enable;
1024}
1025
1026static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1027{
1028 struct rtl8169_private *tp = netdev_priv(dev);
1029
1030 tp->msg_enable = value;
1031}
1032
d4a3a0fc
SH
1033static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1034 "tx_packets",
1035 "rx_packets",
1036 "tx_errors",
1037 "rx_errors",
1038 "rx_missed",
1039 "align_errors",
1040 "tx_single_collisions",
1041 "tx_multi_collisions",
1042 "unicast",
1043 "broadcast",
1044 "multicast",
1045 "tx_aborted",
1046 "tx_underrun",
1047};
1048
1049struct rtl8169_counters {
1050 u64 tx_packets;
1051 u64 rx_packets;
1052 u64 tx_errors;
1053 u32 rx_errors;
1054 u16 rx_missed;
1055 u16 align_errors;
1056 u32 tx_one_collision;
1057 u32 tx_multi_collision;
1058 u64 rx_unicast;
1059 u64 rx_broadcast;
1060 u32 rx_multicast;
1061 u16 tx_aborted;
1062 u16 tx_underun;
1063};
1064
1065static int rtl8169_get_stats_count(struct net_device *dev)
1066{
1067 return ARRAY_SIZE(rtl8169_gstrings);
1068}
1069
1070static void rtl8169_get_ethtool_stats(struct net_device *dev,
1071 struct ethtool_stats *stats, u64 *data)
1072{
1073 struct rtl8169_private *tp = netdev_priv(dev);
1074 void __iomem *ioaddr = tp->mmio_addr;
1075 struct rtl8169_counters *counters;
1076 dma_addr_t paddr;
1077 u32 cmd;
1078
1079 ASSERT_RTNL();
1080
1081 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1082 if (!counters)
1083 return;
1084
1085 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1086 cmd = (u64)paddr & DMA_32BIT_MASK;
1087 RTL_W32(CounterAddrLow, cmd);
1088 RTL_W32(CounterAddrLow, cmd | CounterDump);
1089
1090 while (RTL_R32(CounterAddrLow) & CounterDump) {
1091 if (msleep_interruptible(1))
1092 break;
1093 }
1094
1095 RTL_W32(CounterAddrLow, 0);
1096 RTL_W32(CounterAddrHigh, 0);
1097
5b0384f4 1098 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1099 data[1] = le64_to_cpu(counters->rx_packets);
1100 data[2] = le64_to_cpu(counters->tx_errors);
1101 data[3] = le32_to_cpu(counters->rx_errors);
1102 data[4] = le16_to_cpu(counters->rx_missed);
1103 data[5] = le16_to_cpu(counters->align_errors);
1104 data[6] = le32_to_cpu(counters->tx_one_collision);
1105 data[7] = le32_to_cpu(counters->tx_multi_collision);
1106 data[8] = le64_to_cpu(counters->rx_unicast);
1107 data[9] = le64_to_cpu(counters->rx_broadcast);
1108 data[10] = le32_to_cpu(counters->rx_multicast);
1109 data[11] = le16_to_cpu(counters->tx_aborted);
1110 data[12] = le16_to_cpu(counters->tx_underun);
1111
1112 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1113}
1114
1115static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1116{
1117 switch(stringset) {
1118 case ETH_SS_STATS:
1119 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1120 break;
1121 }
1122}
1123
1124
7282d491 1125static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1126 .get_drvinfo = rtl8169_get_drvinfo,
1127 .get_regs_len = rtl8169_get_regs_len,
1128 .get_link = ethtool_op_get_link,
1129 .get_settings = rtl8169_get_settings,
1130 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1131 .get_msglevel = rtl8169_get_msglevel,
1132 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1133 .get_rx_csum = rtl8169_get_rx_csum,
1134 .set_rx_csum = rtl8169_set_rx_csum,
1135 .get_tx_csum = ethtool_op_get_tx_csum,
1136 .set_tx_csum = ethtool_op_set_tx_csum,
1137 .get_sg = ethtool_op_get_sg,
1138 .set_sg = ethtool_op_set_sg,
1139 .get_tso = ethtool_op_get_tso,
1140 .set_tso = ethtool_op_set_tso,
1141 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1142 .get_wol = rtl8169_get_wol,
1143 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1144 .get_strings = rtl8169_get_strings,
1145 .get_stats_count = rtl8169_get_stats_count,
1146 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1147 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1148};
1149
1150static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1151 int bitval)
1152{
1153 int val;
1154
1155 val = mdio_read(ioaddr, reg);
1156 val = (bitval == 1) ?
1157 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1158 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1159}
1160
1161static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1162{
1163 const struct {
1164 u32 mask;
1165 int mac_version;
1166 } mac_info[] = {
bcf0bf90
FR
1167 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1168 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1169 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1170 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1171 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1172 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1173 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1174 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1175 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1176 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1177 }, *p = mac_info;
1178 u32 reg;
1179
1180 reg = RTL_R32(TxConfig) & 0x7c800000;
1181 while ((reg & p->mask) != p->mask)
1182 p++;
1183 tp->mac_version = p->mac_version;
1184}
1185
1186static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1187{
bcf0bf90 1188 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1189}
1190
1191static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1192{
1193 const struct {
1194 u16 mask;
1195 u16 set;
1196 int phy_version;
1197 } phy_info[] = {
1198 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1199 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1200 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1201 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1202 }, *p = phy_info;
1203 u16 reg;
1204
64e4bfb4 1205 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1206 while ((reg & p->mask) != p->set)
1207 p++;
1208 tp->phy_version = p->phy_version;
1209}
1210
1211static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1212{
1213 struct {
1214 int version;
1215 char *msg;
1216 u32 reg;
1217 } phy_print[] = {
1218 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1219 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1220 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1221 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1222 { 0, NULL, 0x0000 }
1223 }, *p;
1224
1225 for (p = phy_print; p->msg; p++) {
1226 if (tp->phy_version == p->version) {
1227 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1228 return;
1229 }
1230 }
1231 dprintk("phy_version == Unknown\n");
1232}
1233
1234static void rtl8169_hw_phy_config(struct net_device *dev)
1235{
1236 struct rtl8169_private *tp = netdev_priv(dev);
1237 void __iomem *ioaddr = tp->mmio_addr;
1238 struct {
1239 u16 regs[5]; /* Beware of bit-sign propagation */
1240 } phy_magic[5] = { {
1241 { 0x0000, //w 4 15 12 0
1242 0x00a1, //w 3 15 0 00a1
1243 0x0008, //w 2 15 0 0008
1244 0x1020, //w 1 15 0 1020
1245 0x1000 } },{ //w 0 15 0 1000
1246 { 0x7000, //w 4 15 12 7
1247 0xff41, //w 3 15 0 ff41
1248 0xde60, //w 2 15 0 de60
1249 0x0140, //w 1 15 0 0140
1250 0x0077 } },{ //w 0 15 0 0077
1251 { 0xa000, //w 4 15 12 a
1252 0xdf01, //w 3 15 0 df01
1253 0xdf20, //w 2 15 0 df20
1254 0xff95, //w 1 15 0 ff95
1255 0xfa00 } },{ //w 0 15 0 fa00
1256 { 0xb000, //w 4 15 12 b
1257 0xff41, //w 3 15 0 ff41
1258 0xde20, //w 2 15 0 de20
1259 0x0140, //w 1 15 0 0140
1260 0x00bb } },{ //w 0 15 0 00bb
1261 { 0xf000, //w 4 15 12 f
1262 0xdf01, //w 3 15 0 df01
1263 0xdf20, //w 2 15 0 df20
1264 0xff95, //w 1 15 0 ff95
1265 0xbf00 } //w 0 15 0 bf00
1266 }
1267 }, *p = phy_magic;
1268 int i;
1269
1270 rtl8169_print_mac_version(tp);
1271 rtl8169_print_phy_version(tp);
1272
bcf0bf90 1273 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1274 return;
1275 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1276 return;
1277
1278 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1279 dprintk("Do final_reg2.cfg\n");
1280
1281 /* Shazam ! */
1282
bcf0bf90 1283 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1284 mdio_write(ioaddr, 31, 0x0002);
1285 mdio_write(ioaddr, 1, 0x90d0);
1286 mdio_write(ioaddr, 31, 0x0000);
1287 return;
1288 }
1289
1290 /* phy config for RTL8169s mac_version C chip */
1291 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1292 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1293 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1294 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1295
1296 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1297 int val, pos = 4;
1298
1299 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1300 mdio_write(ioaddr, pos, val);
1301 while (--pos >= 0)
1302 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1303 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1304 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1305 }
1306 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1307}
1308
1309static void rtl8169_phy_timer(unsigned long __opaque)
1310{
1311 struct net_device *dev = (struct net_device *)__opaque;
1312 struct rtl8169_private *tp = netdev_priv(dev);
1313 struct timer_list *timer = &tp->timer;
1314 void __iomem *ioaddr = tp->mmio_addr;
1315 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1316
bcf0bf90 1317 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1318 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1319
64e4bfb4 1320 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1321 return;
1322
1323 spin_lock_irq(&tp->lock);
1324
1325 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1326 /*
1da177e4
LT
1327 * A busy loop could burn quite a few cycles on nowadays CPU.
1328 * Let's delay the execution of the timer for a few ticks.
1329 */
1330 timeout = HZ/10;
1331 goto out_mod_timer;
1332 }
1333
1334 if (tp->link_ok(ioaddr))
1335 goto out_unlock;
1336
b57b7e5a
SH
1337 if (netif_msg_link(tp))
1338 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1339
1340 tp->phy_reset_enable(ioaddr);
1341
1342out_mod_timer:
1343 mod_timer(timer, jiffies + timeout);
1344out_unlock:
1345 spin_unlock_irq(&tp->lock);
1346}
1347
1348static inline void rtl8169_delete_timer(struct net_device *dev)
1349{
1350 struct rtl8169_private *tp = netdev_priv(dev);
1351 struct timer_list *timer = &tp->timer;
1352
bcf0bf90 1353 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1354 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1355 return;
1356
1357 del_timer_sync(timer);
1358}
1359
1360static inline void rtl8169_request_timer(struct net_device *dev)
1361{
1362 struct rtl8169_private *tp = netdev_priv(dev);
1363 struct timer_list *timer = &tp->timer;
1364
bcf0bf90 1365 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1366 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1367 return;
1368
2efa53f3 1369 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
1da177e4
LT
1370}
1371
1372#ifdef CONFIG_NET_POLL_CONTROLLER
1373/*
1374 * Polling 'interrupt' - used by things like netconsole to send skbs
1375 * without having to re-enable interrupts. It's not called while
1376 * the interrupt routine is executing.
1377 */
1378static void rtl8169_netpoll(struct net_device *dev)
1379{
1380 struct rtl8169_private *tp = netdev_priv(dev);
1381 struct pci_dev *pdev = tp->pci_dev;
1382
1383 disable_irq(pdev->irq);
7d12e780 1384 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
1385 enable_irq(pdev->irq);
1386}
1387#endif
1388
1389static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1390 void __iomem *ioaddr)
1391{
1392 iounmap(ioaddr);
1393 pci_release_regions(pdev);
1394 pci_disable_device(pdev);
1395 free_netdev(dev);
1396}
1397
bf793295
FR
1398static void rtl8169_phy_reset(struct net_device *dev,
1399 struct rtl8169_private *tp)
1400{
1401 void __iomem *ioaddr = tp->mmio_addr;
1402 int i;
1403
1404 tp->phy_reset_enable(ioaddr);
1405 for (i = 0; i < 100; i++) {
1406 if (!tp->phy_reset_pending(ioaddr))
1407 return;
1408 msleep(1);
1409 }
1410 if (netif_msg_link(tp))
1411 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
1412}
1413
4ff96fa6
FR
1414static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1415{
1416 void __iomem *ioaddr = tp->mmio_addr;
1417 static int board_idx = -1;
1418 u8 autoneg, duplex;
1419 u16 speed;
1420
1421 board_idx++;
1422
1423 rtl8169_hw_phy_config(dev);
1424
1425 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1426 RTL_W8(0x82, 0x01);
1427
bcf0bf90 1428 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1429 dprintk("Set PCI Latency=0x40\n");
1430 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1431 }
1432
bcf0bf90 1433 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1434 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1435 RTL_W8(0x82, 0x01);
1436 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1437 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1438 }
1439
1440 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1441
bf793295
FR
1442 rtl8169_phy_reset(dev, tp);
1443
4ff96fa6
FR
1444 rtl8169_set_speed(dev, autoneg, speed, duplex);
1445
1446 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1447 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1448}
1449
5f787a1a
FR
1450static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1451{
1452 struct rtl8169_private *tp = netdev_priv(dev);
1453 struct mii_ioctl_data *data = if_mii(ifr);
1454
1455 if (!netif_running(dev))
1456 return -ENODEV;
1457
1458 switch (cmd) {
1459 case SIOCGMIIPHY:
1460 data->phy_id = 32; /* Internal PHY */
1461 return 0;
1462
1463 case SIOCGMIIREG:
1464 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1465 return 0;
1466
1467 case SIOCSMIIREG:
1468 if (!capable(CAP_NET_ADMIN))
1469 return -EPERM;
1470 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1471 return 0;
1472 }
1473 return -EOPNOTSUPP;
1474}
1475
1da177e4 1476static int __devinit
4ff96fa6 1477rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1478{
bcf0bf90 1479 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1480 struct rtl8169_private *tp;
4ff96fa6
FR
1481 struct net_device *dev;
1482 void __iomem *ioaddr;
315917d2
FR
1483 unsigned int pm_cap;
1484 int i, rc;
1da177e4 1485
4ff96fa6
FR
1486 if (netif_msg_drv(&debug)) {
1487 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1488 MODULENAME, RTL8169_VERSION);
1489 }
1da177e4 1490
1da177e4 1491 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1492 if (!dev) {
b57b7e5a 1493 if (netif_msg_drv(&debug))
9b91cf9d 1494 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1495 rc = -ENOMEM;
1496 goto out;
1da177e4
LT
1497 }
1498
1499 SET_MODULE_OWNER(dev);
1500 SET_NETDEV_DEV(dev, &pdev->dev);
1501 tp = netdev_priv(dev);
c4028958 1502 tp->dev = dev;
b57b7e5a 1503 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1504
1505 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1506 rc = pci_enable_device(pdev);
b57b7e5a 1507 if (rc < 0) {
2e8a538d 1508 if (netif_msg_probe(tp))
9b91cf9d 1509 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1510 goto err_out_free_dev_1;
1da177e4
LT
1511 }
1512
1513 rc = pci_set_mwi(pdev);
1514 if (rc < 0)
4ff96fa6 1515 goto err_out_disable_2;
1da177e4
LT
1516
1517 /* save power state before pci_enable_device overwrites it */
1518 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1519 if (pm_cap) {
4ff96fa6 1520 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1521
1522 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1523 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1524 } else {
4ff96fa6 1525 if (netif_msg_probe(tp)) {
9b91cf9d 1526 dev_err(&pdev->dev,
4ff96fa6
FR
1527 "PowerManagement capability not found.\n");
1528 }
1da177e4
LT
1529 }
1530
1531 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1532 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1533 if (netif_msg_probe(tp)) {
9b91cf9d 1534 dev_err(&pdev->dev,
bcf0bf90
FR
1535 "region #%d not an MMIO resource, aborting\n",
1536 region);
4ff96fa6 1537 }
1da177e4 1538 rc = -ENODEV;
4ff96fa6 1539 goto err_out_mwi_3;
1da177e4 1540 }
4ff96fa6 1541
1da177e4 1542 /* check for weird/broken PCI region reporting */
bcf0bf90 1543 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1544 if (netif_msg_probe(tp)) {
9b91cf9d 1545 dev_err(&pdev->dev,
4ff96fa6
FR
1546 "Invalid PCI region size(s), aborting\n");
1547 }
1da177e4 1548 rc = -ENODEV;
4ff96fa6 1549 goto err_out_mwi_3;
1da177e4
LT
1550 }
1551
1552 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1553 if (rc < 0) {
2e8a538d 1554 if (netif_msg_probe(tp))
9b91cf9d 1555 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1556 goto err_out_mwi_3;
1da177e4
LT
1557 }
1558
1559 tp->cp_cmd = PCIMulRW | RxChkSum;
1560
1561 if ((sizeof(dma_addr_t) > 4) &&
1562 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1563 tp->cp_cmd |= PCIDAC;
1564 dev->features |= NETIF_F_HIGHDMA;
1565 } else {
1566 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1567 if (rc < 0) {
4ff96fa6 1568 if (netif_msg_probe(tp)) {
9b91cf9d 1569 dev_err(&pdev->dev,
4ff96fa6
FR
1570 "DMA configuration failed.\n");
1571 }
1572 goto err_out_free_res_4;
1da177e4
LT
1573 }
1574 }
1575
1576 pci_set_master(pdev);
1577
1578 /* ioremap MMIO region */
bcf0bf90 1579 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1580 if (!ioaddr) {
b57b7e5a 1581 if (netif_msg_probe(tp))
9b91cf9d 1582 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1583 rc = -EIO;
4ff96fa6 1584 goto err_out_free_res_4;
1da177e4
LT
1585 }
1586
1587 /* Unneeded ? Don't mess with Mrs. Murphy. */
1588 rtl8169_irq_mask_and_ack(ioaddr);
1589
1590 /* Soft reset the chip. */
1591 RTL_W8(ChipCmd, CmdReset);
1592
1593 /* Check that the chip has finished the reset. */
b518fa8e 1594 for (i = 100; i > 0; i--) {
1da177e4
LT
1595 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1596 break;
b518fa8e 1597 msleep_interruptible(1);
1da177e4
LT
1598 }
1599
1600 /* Identify chip attached to board */
1601 rtl8169_get_mac_version(tp, ioaddr);
1602 rtl8169_get_phy_version(tp, ioaddr);
1603
1604 rtl8169_print_mac_version(tp);
1605 rtl8169_print_phy_version(tp);
1606
1607 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1608 if (tp->mac_version == rtl_chip_info[i].mac_version)
1609 break;
1610 }
1611 if (i < 0) {
1612 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1613 if (netif_msg_probe(tp)) {
2e8a538d 1614 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1615 "unknown chip version, assuming %s\n",
1616 rtl_chip_info[0].name);
b57b7e5a 1617 }
1da177e4
LT
1618 i++;
1619 }
1620 tp->chipset = i;
1621
5d06a99f
FR
1622 RTL_W8(Cfg9346, Cfg9346_Unlock);
1623 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1624 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1625 RTL_W8(Cfg9346, Cfg9346_Lock);
1626
1da177e4
LT
1627 if (RTL_R8(PHYstatus) & TBI_Enable) {
1628 tp->set_speed = rtl8169_set_speed_tbi;
1629 tp->get_settings = rtl8169_gset_tbi;
1630 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1631 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1632 tp->link_ok = rtl8169_tbi_link_ok;
1633
64e4bfb4 1634 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1635 } else {
1636 tp->set_speed = rtl8169_set_speed_xmii;
1637 tp->get_settings = rtl8169_gset_xmii;
1638 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1639 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1640 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1641
1642 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1643 }
1644
1645 /* Get MAC address. FIXME: read EEPROM */
1646 for (i = 0; i < MAC_ADDR_LEN; i++)
1647 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1648 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1649
1650 dev->open = rtl8169_open;
1651 dev->hard_start_xmit = rtl8169_start_xmit;
1652 dev->get_stats = rtl8169_get_stats;
1653 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1654 dev->stop = rtl8169_close;
1655 dev->tx_timeout = rtl8169_tx_timeout;
07ce4064 1656 dev->set_multicast_list = rtl_set_rx_mode;
1da177e4
LT
1657 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1658 dev->irq = pdev->irq;
1659 dev->base_addr = (unsigned long) ioaddr;
1660 dev->change_mtu = rtl8169_change_mtu;
1661
1662#ifdef CONFIG_R8169_NAPI
1663 dev->poll = rtl8169_poll;
1664 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1665#endif
1666
1667#ifdef CONFIG_R8169_VLAN
1668 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1669 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1da177e4
LT
1670#endif
1671
1672#ifdef CONFIG_NET_POLL_CONTROLLER
1673 dev->poll_controller = rtl8169_netpoll;
1674#endif
1675
1676 tp->intr_mask = 0xffff;
1677 tp->pci_dev = pdev;
1678 tp->mmio_addr = ioaddr;
bcf0bf90 1679 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4 1680
2efa53f3
FR
1681 init_timer(&tp->timer);
1682 tp->timer.data = (unsigned long) dev;
1683 tp->timer.function = rtl8169_phy_timer;
1684
07ce4064
FR
1685 tp->hw_start = rtl_cfg_info[ent->driver_data].hw_start;
1686
1da177e4
LT
1687 spin_lock_init(&tp->lock);
1688
1689 rc = register_netdev(dev);
4ff96fa6
FR
1690 if (rc < 0)
1691 goto err_out_unmap_5;
1da177e4
LT
1692
1693 pci_set_drvdata(pdev, dev);
1694
b57b7e5a
SH
1695 if (netif_msg_probe(tp)) {
1696 printk(KERN_INFO "%s: %s at 0x%lx, "
1697 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1698 "IRQ %d\n",
1699 dev->name,
bcf0bf90 1700 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1701 dev->base_addr,
1702 dev->dev_addr[0], dev->dev_addr[1],
1703 dev->dev_addr[2], dev->dev_addr[3],
1704 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1705 }
1da177e4 1706
4ff96fa6 1707 rtl8169_init_phy(dev, tp);
1da177e4 1708
4ff96fa6
FR
1709out:
1710 return rc;
1da177e4 1711
4ff96fa6
FR
1712err_out_unmap_5:
1713 iounmap(ioaddr);
1714err_out_free_res_4:
1715 pci_release_regions(pdev);
1716err_out_mwi_3:
1717 pci_clear_mwi(pdev);
1718err_out_disable_2:
1719 pci_disable_device(pdev);
1720err_out_free_dev_1:
1721 free_netdev(dev);
1722 goto out;
1da177e4
LT
1723}
1724
1725static void __devexit
1726rtl8169_remove_one(struct pci_dev *pdev)
1727{
1728 struct net_device *dev = pci_get_drvdata(pdev);
1729 struct rtl8169_private *tp = netdev_priv(dev);
1730
1731 assert(dev != NULL);
1732 assert(tp != NULL);
1733
eb2a021c
FR
1734 flush_scheduled_work();
1735
1da177e4
LT
1736 unregister_netdev(dev);
1737 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1738 pci_set_drvdata(pdev, NULL);
1739}
1740
1da177e4
LT
1741static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1742 struct net_device *dev)
1743{
1744 unsigned int mtu = dev->mtu;
1745
1746 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1747}
1748
1749static int rtl8169_open(struct net_device *dev)
1750{
1751 struct rtl8169_private *tp = netdev_priv(dev);
1752 struct pci_dev *pdev = tp->pci_dev;
99f252b0 1753 int retval = -ENOMEM;
1da177e4 1754
1da177e4 1755
99f252b0 1756 rtl8169_set_rxbufsize(tp, dev);
1da177e4
LT
1757
1758 /*
1759 * Rx and Tx desscriptors needs 256 bytes alignment.
1760 * pci_alloc_consistent provides more.
1761 */
1762 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1763 &tp->TxPhyAddr);
1764 if (!tp->TxDescArray)
99f252b0 1765 goto out;
1da177e4
LT
1766
1767 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1768 &tp->RxPhyAddr);
1769 if (!tp->RxDescArray)
99f252b0 1770 goto err_free_tx_0;
1da177e4
LT
1771
1772 retval = rtl8169_init_ring(dev);
1773 if (retval < 0)
99f252b0 1774 goto err_free_rx_1;
1da177e4 1775
c4028958 1776 INIT_DELAYED_WORK(&tp->task, NULL);
1da177e4 1777
99f252b0
FR
1778 smp_mb();
1779
1780 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED,
1781 dev->name, dev);
1782 if (retval < 0)
1783 goto err_release_ring_2;
1784
07ce4064 1785 rtl_hw_start(dev);
1da177e4
LT
1786
1787 rtl8169_request_timer(dev);
1788
1789 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1790out:
1791 return retval;
1792
99f252b0
FR
1793err_release_ring_2:
1794 rtl8169_rx_clear(tp);
1795err_free_rx_1:
1da177e4
LT
1796 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1797 tp->RxPhyAddr);
99f252b0 1798err_free_tx_0:
1da177e4
LT
1799 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1800 tp->TxPhyAddr);
1da177e4
LT
1801 goto out;
1802}
1803
1804static void rtl8169_hw_reset(void __iomem *ioaddr)
1805{
1806 /* Disable interrupts */
1807 rtl8169_irq_mask_and_ack(ioaddr);
1808
1809 /* Reset the chipset */
1810 RTL_W8(ChipCmd, CmdReset);
1811
1812 /* PCI commit */
1813 RTL_R8(ChipCmd);
1814}
1815
7f796d83 1816static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
9cb427b6
FR
1817{
1818 void __iomem *ioaddr = tp->mmio_addr;
1819 u32 cfg = rtl8169_rx_config;
1820
1821 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1822 RTL_W32(RxConfig, cfg);
1823
1824 /* Set DMA burst size and Interframe Gap Time */
1825 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1826 (InterFrameGap << TxInterFrameGapShift));
1827}
1828
07ce4064 1829static void rtl_hw_start(struct net_device *dev)
1da177e4
LT
1830{
1831 struct rtl8169_private *tp = netdev_priv(dev);
1832 void __iomem *ioaddr = tp->mmio_addr;
1833 u32 i;
1834
1835 /* Soft reset the chip. */
1836 RTL_W8(ChipCmd, CmdReset);
1837
1838 /* Check that the chip has finished the reset. */
b518fa8e 1839 for (i = 100; i > 0; i--) {
1da177e4
LT
1840 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1841 break;
b518fa8e 1842 msleep_interruptible(1);
1da177e4
LT
1843 }
1844
07ce4064
FR
1845 tp->hw_start(dev);
1846
1847 /* Enable all known interrupts by setting the interrupt mask. */
1848 RTL_W16(IntrMask, rtl8169_intr_mask);
1849
1850 netif_start_queue(dev);
1851}
1852
1853
7f796d83
FR
1854static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
1855 void __iomem *ioaddr)
1856{
1857 /*
1858 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1859 * register to be written before TxDescAddrLow to work.
1860 * Switching from MMIO to I/O access fixes the issue as well.
1861 */
1862 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
1863 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_32BIT_MASK);
1864 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
1865 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_32BIT_MASK);
1866}
1867
1868static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
1869{
1870 u16 cmd;
1871
1872 cmd = RTL_R16(CPlusCmd);
1873 RTL_W16(CPlusCmd, cmd);
1874 return cmd;
1875}
1876
1877static void rtl_set_rx_max_size(void __iomem *ioaddr)
1878{
1879 /* Low hurts. Let's disable the filtering. */
1880 RTL_W16(RxMaxSize, 16383);
1881}
1882
07ce4064
FR
1883static void rtl_hw_start_8169(struct net_device *dev)
1884{
1885 struct rtl8169_private *tp = netdev_priv(dev);
1886 void __iomem *ioaddr = tp->mmio_addr;
1887 struct pci_dev *pdev = tp->pci_dev;
1888 u16 cmd;
1889
9cb427b6
FR
1890 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1891 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1892 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1893 }
1894
bcf0bf90
FR
1895 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
1896 pci_write_config_word(pdev, 0x68, 0x00);
1897 pci_write_config_word(pdev, 0x69, 0x08);
1898 }
1899
1900 /* Undocumented stuff. */
1901 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
bcf0bf90
FR
1902 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1903 if ((RTL_R8(Config2) & 0x07) & 0x01)
1904 RTL_W32(0x7c, 0x0007ffff);
1905
1906 RTL_W32(0x7c, 0x0007ff00);
1907
1908 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1909 cmd = cmd & 0xef;
1910 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1911 }
1912
1913 RTL_W8(Cfg9346, Cfg9346_Unlock);
9cb427b6
FR
1914 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1915 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1916 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1917 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1918 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1919
1da177e4
LT
1920 RTL_W8(EarlyTxThres, EarlyTxThld);
1921
7f796d83 1922 rtl_set_rx_max_size(ioaddr);
1da177e4 1923
9cb427b6
FR
1924 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1925 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1926 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1927 (tp->mac_version == RTL_GIGA_MAC_VER_04))
7f796d83 1928 rtl_set_rx_tx_config_registers(tp);
1da177e4 1929
7f796d83 1930 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1da177e4 1931
bcf0bf90
FR
1932 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1933 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1934 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1935 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1936 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1937 }
1938
bcf0bf90
FR
1939 RTL_W16(CPlusCmd, tp->cp_cmd);
1940
1da177e4
LT
1941 /*
1942 * Undocumented corner. Supposedly:
1943 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1944 */
1945 RTL_W16(IntrMitigate, 0x0000);
1946
7f796d83 1947 rtl_set_rx_tx_desc_registers(tp, ioaddr);
9cb427b6
FR
1948
1949 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1950 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1951 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1952 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1953 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
7f796d83 1954 rtl_set_rx_tx_config_registers(tp);
9cb427b6
FR
1955 }
1956
1da177e4 1957 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1958
1959 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1960 RTL_R8(IntrMask);
1da177e4
LT
1961
1962 RTL_W32(RxMissed, 0);
1963
07ce4064 1964 rtl_set_rx_mode(dev);
1da177e4
LT
1965
1966 /* no early-rx interrupts */
1967 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
07ce4064 1968}
1da177e4 1969
07ce4064
FR
1970static void rtl_hw_start_8168(struct net_device *dev)
1971{
1972 rtl_hw_start_8169(dev);
1973}
1da177e4 1974
07ce4064
FR
1975static void rtl_hw_start_8101(struct net_device *dev)
1976{
1977 rtl_hw_start_8169(dev);
1da177e4
LT
1978}
1979
1980static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1981{
1982 struct rtl8169_private *tp = netdev_priv(dev);
1983 int ret = 0;
1984
1985 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
1986 return -EINVAL;
1987
1988 dev->mtu = new_mtu;
1989
1990 if (!netif_running(dev))
1991 goto out;
1992
1993 rtl8169_down(dev);
1994
1995 rtl8169_set_rxbufsize(tp, dev);
1996
1997 ret = rtl8169_init_ring(dev);
1998 if (ret < 0)
1999 goto out;
2000
2001 netif_poll_enable(dev);
2002
07ce4064 2003 rtl_hw_start(dev);
1da177e4
LT
2004
2005 rtl8169_request_timer(dev);
2006
2007out:
2008 return ret;
2009}
2010
2011static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
2012{
2013 desc->addr = 0x0badbadbadbadbadull;
2014 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
2015}
2016
2017static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
2018 struct sk_buff **sk_buff, struct RxDesc *desc)
2019{
2020 struct pci_dev *pdev = tp->pci_dev;
2021
2022 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
2023 PCI_DMA_FROMDEVICE);
2024 dev_kfree_skb(*sk_buff);
2025 *sk_buff = NULL;
2026 rtl8169_make_unusable_by_asic(desc);
2027}
2028
2029static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
2030{
2031 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
2032
2033 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
2034}
2035
2036static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
2037 u32 rx_buf_sz)
2038{
2039 desc->addr = cpu_to_le64(mapping);
2040 wmb();
2041 rtl8169_mark_to_asic(desc, rx_buf_sz);
2042}
2043
15d31758
SH
2044static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2045 struct net_device *dev,
2046 struct RxDesc *desc, int rx_buf_sz,
2047 unsigned int align)
1da177e4
LT
2048{
2049 struct sk_buff *skb;
2050 dma_addr_t mapping;
1da177e4 2051
15d31758 2052 skb = netdev_alloc_skb(dev, rx_buf_sz + align);
1da177e4
LT
2053 if (!skb)
2054 goto err_out;
2055
dcb92f88 2056 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
1da177e4 2057
689be439 2058 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2059 PCI_DMA_FROMDEVICE);
2060
2061 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
1da177e4 2062out:
15d31758 2063 return skb;
1da177e4
LT
2064
2065err_out:
1da177e4
LT
2066 rtl8169_make_unusable_by_asic(desc);
2067 goto out;
2068}
2069
2070static void rtl8169_rx_clear(struct rtl8169_private *tp)
2071{
2072 int i;
2073
2074 for (i = 0; i < NUM_RX_DESC; i++) {
2075 if (tp->Rx_skbuff[i]) {
2076 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2077 tp->RxDescArray + i);
2078 }
2079 }
2080}
2081
2082static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2083 u32 start, u32 end)
2084{
2085 u32 cur;
5b0384f4 2086
4ae47c2d 2087 for (cur = start; end - cur != 0; cur++) {
15d31758
SH
2088 struct sk_buff *skb;
2089 unsigned int i = cur % NUM_RX_DESC;
1da177e4 2090
4ae47c2d
FR
2091 WARN_ON((s32)(end - cur) < 0);
2092
1da177e4
LT
2093 if (tp->Rx_skbuff[i])
2094 continue;
bcf0bf90 2095
15d31758
SH
2096 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
2097 tp->RxDescArray + i,
2098 tp->rx_buf_sz, tp->align);
2099 if (!skb)
1da177e4 2100 break;
15d31758
SH
2101
2102 tp->Rx_skbuff[i] = skb;
1da177e4
LT
2103 }
2104 return cur - start;
2105}
2106
2107static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2108{
2109 desc->opts1 |= cpu_to_le32(RingEnd);
2110}
2111
2112static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2113{
2114 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2115}
2116
2117static int rtl8169_init_ring(struct net_device *dev)
2118{
2119 struct rtl8169_private *tp = netdev_priv(dev);
2120
2121 rtl8169_init_ring_indexes(tp);
2122
2123 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2124 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2125
2126 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2127 goto err_out;
2128
2129 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2130
2131 return 0;
2132
2133err_out:
2134 rtl8169_rx_clear(tp);
2135 return -ENOMEM;
2136}
2137
2138static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2139 struct TxDesc *desc)
2140{
2141 unsigned int len = tx_skb->len;
2142
2143 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2144 desc->opts1 = 0x00;
2145 desc->opts2 = 0x00;
2146 desc->addr = 0x00;
2147 tx_skb->len = 0;
2148}
2149
2150static void rtl8169_tx_clear(struct rtl8169_private *tp)
2151{
2152 unsigned int i;
2153
2154 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2155 unsigned int entry = i % NUM_TX_DESC;
2156 struct ring_info *tx_skb = tp->tx_skb + entry;
2157 unsigned int len = tx_skb->len;
2158
2159 if (len) {
2160 struct sk_buff *skb = tx_skb->skb;
2161
2162 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2163 tp->TxDescArray + entry);
2164 if (skb) {
2165 dev_kfree_skb(skb);
2166 tx_skb->skb = NULL;
2167 }
2168 tp->stats.tx_dropped++;
2169 }
2170 }
2171 tp->cur_tx = tp->dirty_tx = 0;
2172}
2173
c4028958 2174static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
1da177e4
LT
2175{
2176 struct rtl8169_private *tp = netdev_priv(dev);
2177
c4028958 2178 PREPARE_DELAYED_WORK(&tp->task, task);
1da177e4
LT
2179 schedule_delayed_work(&tp->task, 4);
2180}
2181
2182static void rtl8169_wait_for_quiescence(struct net_device *dev)
2183{
2184 struct rtl8169_private *tp = netdev_priv(dev);
2185 void __iomem *ioaddr = tp->mmio_addr;
2186
2187 synchronize_irq(dev->irq);
2188
2189 /* Wait for any pending NAPI task to complete */
2190 netif_poll_disable(dev);
2191
2192 rtl8169_irq_mask_and_ack(ioaddr);
2193
2194 netif_poll_enable(dev);
2195}
2196
c4028958 2197static void rtl8169_reinit_task(struct work_struct *work)
1da177e4 2198{
c4028958
DH
2199 struct rtl8169_private *tp =
2200 container_of(work, struct rtl8169_private, task.work);
2201 struct net_device *dev = tp->dev;
1da177e4
LT
2202 int ret;
2203
eb2a021c
FR
2204 rtnl_lock();
2205
2206 if (!netif_running(dev))
2207 goto out_unlock;
2208
2209 rtl8169_wait_for_quiescence(dev);
2210 rtl8169_close(dev);
1da177e4
LT
2211
2212 ret = rtl8169_open(dev);
2213 if (unlikely(ret < 0)) {
2214 if (net_ratelimit()) {
b57b7e5a
SH
2215 struct rtl8169_private *tp = netdev_priv(dev);
2216
2217 if (netif_msg_drv(tp)) {
2218 printk(PFX KERN_ERR
2219 "%s: reinit failure (status = %d)."
2220 " Rescheduling.\n", dev->name, ret);
2221 }
1da177e4
LT
2222 }
2223 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2224 }
eb2a021c
FR
2225
2226out_unlock:
2227 rtnl_unlock();
1da177e4
LT
2228}
2229
c4028958 2230static void rtl8169_reset_task(struct work_struct *work)
1da177e4 2231{
c4028958
DH
2232 struct rtl8169_private *tp =
2233 container_of(work, struct rtl8169_private, task.work);
2234 struct net_device *dev = tp->dev;
1da177e4 2235
eb2a021c
FR
2236 rtnl_lock();
2237
1da177e4 2238 if (!netif_running(dev))
eb2a021c 2239 goto out_unlock;
1da177e4
LT
2240
2241 rtl8169_wait_for_quiescence(dev);
2242
2243 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2244 rtl8169_tx_clear(tp);
2245
2246 if (tp->dirty_rx == tp->cur_rx) {
2247 rtl8169_init_ring_indexes(tp);
07ce4064 2248 rtl_hw_start(dev);
1da177e4
LT
2249 netif_wake_queue(dev);
2250 } else {
2251 if (net_ratelimit()) {
b57b7e5a
SH
2252 struct rtl8169_private *tp = netdev_priv(dev);
2253
2254 if (netif_msg_intr(tp)) {
2255 printk(PFX KERN_EMERG
2256 "%s: Rx buffers shortage\n", dev->name);
2257 }
1da177e4
LT
2258 }
2259 rtl8169_schedule_work(dev, rtl8169_reset_task);
2260 }
eb2a021c
FR
2261
2262out_unlock:
2263 rtnl_unlock();
1da177e4
LT
2264}
2265
2266static void rtl8169_tx_timeout(struct net_device *dev)
2267{
2268 struct rtl8169_private *tp = netdev_priv(dev);
2269
2270 rtl8169_hw_reset(tp->mmio_addr);
2271
2272 /* Let's wait a bit while any (async) irq lands on */
2273 rtl8169_schedule_work(dev, rtl8169_reset_task);
2274}
2275
2276static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2277 u32 opts1)
2278{
2279 struct skb_shared_info *info = skb_shinfo(skb);
2280 unsigned int cur_frag, entry;
2281 struct TxDesc *txd;
2282
2283 entry = tp->cur_tx;
2284 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2285 skb_frag_t *frag = info->frags + cur_frag;
2286 dma_addr_t mapping;
2287 u32 status, len;
2288 void *addr;
2289
2290 entry = (entry + 1) % NUM_TX_DESC;
2291
2292 txd = tp->TxDescArray + entry;
2293 len = frag->size;
2294 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2295 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2296
2297 /* anti gcc 2.95.3 bugware (sic) */
2298 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2299
2300 txd->opts1 = cpu_to_le32(status);
2301 txd->addr = cpu_to_le64(mapping);
2302
2303 tp->tx_skb[entry].len = len;
2304 }
2305
2306 if (cur_frag) {
2307 tp->tx_skb[entry].skb = skb;
2308 txd->opts1 |= cpu_to_le32(LastFrag);
2309 }
2310
2311 return cur_frag;
2312}
2313
2314static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2315{
2316 if (dev->features & NETIF_F_TSO) {
7967168c 2317 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2318
2319 if (mss)
2320 return LargeSend | ((mss & MSSMask) << MSSShift);
2321 }
84fa7933 2322 if (skb->ip_summed == CHECKSUM_PARTIAL) {
eddc9ec5 2323 const struct iphdr *ip = ip_hdr(skb);
1da177e4
LT
2324
2325 if (ip->protocol == IPPROTO_TCP)
2326 return IPCS | TCPCS;
2327 else if (ip->protocol == IPPROTO_UDP)
2328 return IPCS | UDPCS;
2329 WARN_ON(1); /* we need a WARN() */
2330 }
2331 return 0;
2332}
2333
2334static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2335{
2336 struct rtl8169_private *tp = netdev_priv(dev);
2337 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2338 struct TxDesc *txd = tp->TxDescArray + entry;
2339 void __iomem *ioaddr = tp->mmio_addr;
2340 dma_addr_t mapping;
2341 u32 status, len;
2342 u32 opts1;
188f4af0 2343 int ret = NETDEV_TX_OK;
5b0384f4 2344
1da177e4 2345 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2346 if (netif_msg_drv(tp)) {
2347 printk(KERN_ERR
2348 "%s: BUG! Tx Ring full when queue awake!\n",
2349 dev->name);
2350 }
1da177e4
LT
2351 goto err_stop;
2352 }
2353
2354 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2355 goto err_stop;
2356
2357 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2358
2359 frags = rtl8169_xmit_frags(tp, skb, opts1);
2360 if (frags) {
2361 len = skb_headlen(skb);
2362 opts1 |= FirstFrag;
2363 } else {
2364 len = skb->len;
2365
2366 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2367 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2368 goto err_update_stats;
2369 len = ETH_ZLEN;
2370 }
2371
2372 opts1 |= FirstFrag | LastFrag;
2373 tp->tx_skb[entry].skb = skb;
2374 }
2375
2376 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2377
2378 tp->tx_skb[entry].len = len;
2379 txd->addr = cpu_to_le64(mapping);
2380 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2381
2382 wmb();
2383
2384 /* anti gcc 2.95.3 bugware (sic) */
2385 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2386 txd->opts1 = cpu_to_le32(status);
2387
2388 dev->trans_start = jiffies;
2389
2390 tp->cur_tx += frags + 1;
2391
2392 smp_wmb();
2393
2394 RTL_W8(TxPoll, 0x40); /* set polling bit */
2395
2396 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2397 netif_stop_queue(dev);
2398 smp_rmb();
2399 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2400 netif_wake_queue(dev);
2401 }
2402
2403out:
2404 return ret;
2405
2406err_stop:
2407 netif_stop_queue(dev);
188f4af0 2408 ret = NETDEV_TX_BUSY;
1da177e4
LT
2409err_update_stats:
2410 tp->stats.tx_dropped++;
2411 goto out;
2412}
2413
2414static void rtl8169_pcierr_interrupt(struct net_device *dev)
2415{
2416 struct rtl8169_private *tp = netdev_priv(dev);
2417 struct pci_dev *pdev = tp->pci_dev;
2418 void __iomem *ioaddr = tp->mmio_addr;
2419 u16 pci_status, pci_cmd;
2420
2421 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2422 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2423
b57b7e5a
SH
2424 if (netif_msg_intr(tp)) {
2425 printk(KERN_ERR
2426 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2427 dev->name, pci_cmd, pci_status);
2428 }
1da177e4
LT
2429
2430 /*
2431 * The recovery sequence below admits a very elaborated explanation:
2432 * - it seems to work;
d03902b8
FR
2433 * - I did not see what else could be done;
2434 * - it makes iop3xx happy.
1da177e4
LT
2435 *
2436 * Feel free to adjust to your needs.
2437 */
a27993f3 2438 if (pdev->broken_parity_status)
d03902b8
FR
2439 pci_cmd &= ~PCI_COMMAND_PARITY;
2440 else
2441 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
2442
2443 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
2444
2445 pci_write_config_word(pdev, PCI_STATUS,
2446 pci_status & (PCI_STATUS_DETECTED_PARITY |
2447 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2448 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2449
2450 /* The infamous DAC f*ckup only happens at boot time */
2451 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2452 if (netif_msg_intr(tp))
2453 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2454 tp->cp_cmd &= ~PCIDAC;
2455 RTL_W16(CPlusCmd, tp->cp_cmd);
2456 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
2457 }
2458
2459 rtl8169_hw_reset(ioaddr);
d03902b8
FR
2460
2461 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1da177e4
LT
2462}
2463
2464static void
2465rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2466 void __iomem *ioaddr)
2467{
2468 unsigned int dirty_tx, tx_left;
2469
2470 assert(dev != NULL);
2471 assert(tp != NULL);
2472 assert(ioaddr != NULL);
2473
2474 dirty_tx = tp->dirty_tx;
2475 smp_rmb();
2476 tx_left = tp->cur_tx - dirty_tx;
2477
2478 while (tx_left > 0) {
2479 unsigned int entry = dirty_tx % NUM_TX_DESC;
2480 struct ring_info *tx_skb = tp->tx_skb + entry;
2481 u32 len = tx_skb->len;
2482 u32 status;
2483
2484 rmb();
2485 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2486 if (status & DescOwn)
2487 break;
2488
2489 tp->stats.tx_bytes += len;
2490 tp->stats.tx_packets++;
2491
2492 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2493
2494 if (status & LastFrag) {
2495 dev_kfree_skb_irq(tx_skb->skb);
2496 tx_skb->skb = NULL;
2497 }
2498 dirty_tx++;
2499 tx_left--;
2500 }
2501
2502 if (tp->dirty_tx != dirty_tx) {
2503 tp->dirty_tx = dirty_tx;
2504 smp_wmb();
2505 if (netif_queue_stopped(dev) &&
2506 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2507 netif_wake_queue(dev);
2508 }
2509 }
2510}
2511
126fa4b9
FR
2512static inline int rtl8169_fragmented_frame(u32 status)
2513{
2514 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2515}
2516
1da177e4
LT
2517static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2518{
2519 u32 opts1 = le32_to_cpu(desc->opts1);
2520 u32 status = opts1 & RxProtoMask;
2521
2522 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2523 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2524 ((status == RxProtoIP) && !(opts1 & IPFail)))
2525 skb->ip_summed = CHECKSUM_UNNECESSARY;
2526 else
2527 skb->ip_summed = CHECKSUM_NONE;
2528}
2529
b449655f
SH
2530static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
2531 struct pci_dev *pdev, dma_addr_t addr,
2532 unsigned int align)
1da177e4 2533{
b449655f
SH
2534 struct sk_buff *skb;
2535 bool done = false;
1da177e4 2536
b449655f
SH
2537 if (pkt_size >= rx_copybreak)
2538 goto out;
1da177e4 2539
b449655f
SH
2540 skb = dev_alloc_skb(pkt_size + align);
2541 if (!skb)
2542 goto out;
2543
2544 pci_dma_sync_single_for_cpu(pdev, addr, pkt_size, PCI_DMA_FROMDEVICE);
2545 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
2546 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
2547 *sk_buff = skb;
2548 done = true;
2549out:
2550 return done;
1da177e4
LT
2551}
2552
2553static int
2554rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2555 void __iomem *ioaddr)
2556{
2557 unsigned int cur_rx, rx_left;
2558 unsigned int delta, count;
2559
2560 assert(dev != NULL);
2561 assert(tp != NULL);
2562 assert(ioaddr != NULL);
2563
2564 cur_rx = tp->cur_rx;
2565 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2566 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2567
4dcb7d33 2568 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2569 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2570 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2571 u32 status;
2572
2573 rmb();
126fa4b9 2574 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2575
2576 if (status & DescOwn)
2577 break;
4dcb7d33 2578 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2579 if (netif_msg_rx_err(tp)) {
2580 printk(KERN_INFO
2581 "%s: Rx ERROR. status = %08x\n",
2582 dev->name, status);
2583 }
1da177e4
LT
2584 tp->stats.rx_errors++;
2585 if (status & (RxRWT | RxRUNT))
2586 tp->stats.rx_length_errors++;
2587 if (status & RxCRC)
2588 tp->stats.rx_crc_errors++;
9dccf611
FR
2589 if (status & RxFOVF) {
2590 rtl8169_schedule_work(dev, rtl8169_reset_task);
2591 tp->stats.rx_fifo_errors++;
2592 }
126fa4b9 2593 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2594 } else {
1da177e4 2595 struct sk_buff *skb = tp->Rx_skbuff[entry];
b449655f 2596 dma_addr_t addr = le64_to_cpu(desc->addr);
1da177e4 2597 int pkt_size = (status & 0x00001FFF) - 4;
b449655f 2598 struct pci_dev *pdev = tp->pci_dev;
1da177e4 2599
126fa4b9
FR
2600 /*
2601 * The driver does not support incoming fragmented
2602 * frames. They are seen as a symptom of over-mtu
2603 * sized frames.
2604 */
2605 if (unlikely(rtl8169_fragmented_frame(status))) {
2606 tp->stats.rx_dropped++;
2607 tp->stats.rx_length_errors++;
2608 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2609 continue;
126fa4b9
FR
2610 }
2611
1da177e4 2612 rtl8169_rx_csum(skb, desc);
bcf0bf90 2613
b449655f
SH
2614 if (rtl8169_try_rx_copy(&skb, pkt_size, pdev, addr,
2615 tp->align)) {
2616 pci_dma_sync_single_for_device(pdev, addr,
2617 pkt_size, PCI_DMA_FROMDEVICE);
2618 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2619 } else {
2620 pci_unmap_single(pdev, addr, pkt_size,
2621 PCI_DMA_FROMDEVICE);
1da177e4
LT
2622 tp->Rx_skbuff[entry] = NULL;
2623 }
2624
1da177e4
LT
2625 skb_put(skb, pkt_size);
2626 skb->protocol = eth_type_trans(skb, dev);
2627
2628 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2629 rtl8169_rx_skb(skb);
2630
2631 dev->last_rx = jiffies;
2632 tp->stats.rx_bytes += pkt_size;
2633 tp->stats.rx_packets++;
2634 }
1da177e4
LT
2635 }
2636
2637 count = cur_rx - tp->cur_rx;
2638 tp->cur_rx = cur_rx;
2639
2640 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2641 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2642 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2643 tp->dirty_rx += delta;
2644
2645 /*
2646 * FIXME: until there is periodic timer to try and refill the ring,
2647 * a temporary shortage may definitely kill the Rx process.
2648 * - disable the asic to try and avoid an overflow and kick it again
2649 * after refill ?
2650 * - how do others driver handle this condition (Uh oh...).
2651 */
b57b7e5a 2652 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2653 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2654
2655 return count;
2656}
2657
2658/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2659static irqreturn_t
7d12e780 2660rtl8169_interrupt(int irq, void *dev_instance)
1da177e4
LT
2661{
2662 struct net_device *dev = (struct net_device *) dev_instance;
2663 struct rtl8169_private *tp = netdev_priv(dev);
2664 int boguscnt = max_interrupt_work;
2665 void __iomem *ioaddr = tp->mmio_addr;
2666 int status;
2667 int handled = 0;
2668
2669 do {
2670 status = RTL_R16(IntrStatus);
2671
2672 /* hotplug/major error/no more work/shared irq */
2673 if ((status == 0xFFFF) || !status)
2674 break;
2675
2676 handled = 1;
2677
2678 if (unlikely(!netif_running(dev))) {
2679 rtl8169_asic_down(ioaddr);
2680 goto out;
2681 }
2682
2683 status &= tp->intr_mask;
2684 RTL_W16(IntrStatus,
2685 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2686
2687 if (!(status & rtl8169_intr_mask))
2688 break;
2689
2690 if (unlikely(status & SYSErr)) {
2691 rtl8169_pcierr_interrupt(dev);
2692 break;
2693 }
2694
2695 if (status & LinkChg)
2696 rtl8169_check_link_status(dev, tp, ioaddr);
2697
2698#ifdef CONFIG_R8169_NAPI
2699 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2700 tp->intr_mask = ~rtl8169_napi_event;
2701
2702 if (likely(netif_rx_schedule_prep(dev)))
2703 __netif_rx_schedule(dev);
b57b7e5a 2704 else if (netif_msg_intr(tp)) {
1da177e4 2705 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2706 dev->name, status);
1da177e4
LT
2707 }
2708 break;
2709#else
2710 /* Rx interrupt */
2711 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2712 rtl8169_rx_interrupt(dev, tp, ioaddr);
2713 }
2714 /* Tx interrupt */
2715 if (status & (TxOK | TxErr))
2716 rtl8169_tx_interrupt(dev, tp, ioaddr);
2717#endif
2718
2719 boguscnt--;
2720 } while (boguscnt > 0);
2721
2722 if (boguscnt <= 0) {
7c8b2eb4 2723 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2724 printk(KERN_WARNING
2725 "%s: Too much work at interrupt!\n", dev->name);
2726 }
1da177e4
LT
2727 /* Clear all interrupt sources. */
2728 RTL_W16(IntrStatus, 0xffff);
2729 }
2730out:
2731 return IRQ_RETVAL(handled);
2732}
2733
2734#ifdef CONFIG_R8169_NAPI
2735static int rtl8169_poll(struct net_device *dev, int *budget)
2736{
2737 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2738 struct rtl8169_private *tp = netdev_priv(dev);
2739 void __iomem *ioaddr = tp->mmio_addr;
2740
2741 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2742 rtl8169_tx_interrupt(dev, tp, ioaddr);
2743
2744 *budget -= work_done;
2745 dev->quota -= work_done;
2746
2747 if (work_done < work_to_do) {
2748 netif_rx_complete(dev);
2749 tp->intr_mask = 0xffff;
2750 /*
2751 * 20040426: the barrier is not strictly required but the
2752 * behavior of the irq handler could be less predictable
2753 * without it. Btw, the lack of flush for the posted pci
2754 * write is safe - FR
2755 */
2756 smp_wmb();
2757 RTL_W16(IntrMask, rtl8169_intr_mask);
2758 }
2759
2760 return (work_done >= work_to_do);
2761}
2762#endif
2763
2764static void rtl8169_down(struct net_device *dev)
2765{
2766 struct rtl8169_private *tp = netdev_priv(dev);
2767 void __iomem *ioaddr = tp->mmio_addr;
2768 unsigned int poll_locked = 0;
733b736c 2769 unsigned int intrmask;
1da177e4
LT
2770
2771 rtl8169_delete_timer(dev);
2772
2773 netif_stop_queue(dev);
2774
1da177e4
LT
2775core_down:
2776 spin_lock_irq(&tp->lock);
2777
2778 rtl8169_asic_down(ioaddr);
2779
2780 /* Update the error counts. */
2781 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2782 RTL_W32(RxMissed, 0);
2783
2784 spin_unlock_irq(&tp->lock);
2785
2786 synchronize_irq(dev->irq);
2787
2788 if (!poll_locked) {
2789 netif_poll_disable(dev);
2790 poll_locked++;
2791 }
2792
2793 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2794 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2795
2796 /*
2797 * And now for the 50k$ question: are IRQ disabled or not ?
2798 *
2799 * Two paths lead here:
2800 * 1) dev->close
2801 * -> netif_running() is available to sync the current code and the
2802 * IRQ handler. See rtl8169_interrupt for details.
2803 * 2) dev->change_mtu
2804 * -> rtl8169_poll can not be issued again and re-enable the
2805 * interruptions. Let's simply issue the IRQ down sequence again.
733b736c
AP
2806 *
2807 * No loop if hotpluged or major error (0xffff).
1da177e4 2808 */
733b736c
AP
2809 intrmask = RTL_R16(IntrMask);
2810 if (intrmask && (intrmask != 0xffff))
1da177e4
LT
2811 goto core_down;
2812
2813 rtl8169_tx_clear(tp);
2814
2815 rtl8169_rx_clear(tp);
2816}
2817
2818static int rtl8169_close(struct net_device *dev)
2819{
2820 struct rtl8169_private *tp = netdev_priv(dev);
2821 struct pci_dev *pdev = tp->pci_dev;
2822
2823 rtl8169_down(dev);
2824
2825 free_irq(dev->irq, dev);
2826
2827 netif_poll_enable(dev);
2828
2829 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2830 tp->RxPhyAddr);
2831 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2832 tp->TxPhyAddr);
2833 tp->TxDescArray = NULL;
2834 tp->RxDescArray = NULL;
2835
2836 return 0;
2837}
2838
07ce4064 2839static void rtl_set_rx_mode(struct net_device *dev)
1da177e4
LT
2840{
2841 struct rtl8169_private *tp = netdev_priv(dev);
2842 void __iomem *ioaddr = tp->mmio_addr;
2843 unsigned long flags;
2844 u32 mc_filter[2]; /* Multicast hash filter */
2845 int i, rx_mode;
2846 u32 tmp = 0;
2847
2848 if (dev->flags & IFF_PROMISC) {
2849 /* Unconditionally log net taps. */
b57b7e5a
SH
2850 if (netif_msg_link(tp)) {
2851 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2852 dev->name);
2853 }
1da177e4
LT
2854 rx_mode =
2855 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2856 AcceptAllPhys;
2857 mc_filter[1] = mc_filter[0] = 0xffffffff;
2858 } else if ((dev->mc_count > multicast_filter_limit)
2859 || (dev->flags & IFF_ALLMULTI)) {
2860 /* Too many to filter perfectly -- accept all multicasts. */
2861 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2862 mc_filter[1] = mc_filter[0] = 0xffffffff;
2863 } else {
2864 struct dev_mc_list *mclist;
2865 rx_mode = AcceptBroadcast | AcceptMyPhys;
2866 mc_filter[1] = mc_filter[0] = 0;
2867 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2868 i++, mclist = mclist->next) {
2869 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2870 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2871 rx_mode |= AcceptMulticast;
2872 }
2873 }
2874
2875 spin_lock_irqsave(&tp->lock, flags);
2876
2877 tmp = rtl8169_rx_config | rx_mode |
2878 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2879
bcf0bf90
FR
2880 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2881 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2882 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2883 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2884 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2885 mc_filter[0] = 0xffffffff;
2886 mc_filter[1] = 0xffffffff;
2887 }
2888
1da177e4
LT
2889 RTL_W32(RxConfig, tmp);
2890 RTL_W32(MAR0 + 0, mc_filter[0]);
2891 RTL_W32(MAR0 + 4, mc_filter[1]);
2892
2893 spin_unlock_irqrestore(&tp->lock, flags);
2894}
2895
2896/**
2897 * rtl8169_get_stats - Get rtl8169 read/write statistics
2898 * @dev: The Ethernet Device to get statistics for
2899 *
2900 * Get TX/RX statistics for rtl8169
2901 */
2902static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2903{
2904 struct rtl8169_private *tp = netdev_priv(dev);
2905 void __iomem *ioaddr = tp->mmio_addr;
2906 unsigned long flags;
2907
2908 if (netif_running(dev)) {
2909 spin_lock_irqsave(&tp->lock, flags);
2910 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2911 RTL_W32(RxMissed, 0);
2912 spin_unlock_irqrestore(&tp->lock, flags);
2913 }
5b0384f4 2914
1da177e4
LT
2915 return &tp->stats;
2916}
2917
5d06a99f
FR
2918#ifdef CONFIG_PM
2919
2920static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2921{
2922 struct net_device *dev = pci_get_drvdata(pdev);
2923 struct rtl8169_private *tp = netdev_priv(dev);
2924 void __iomem *ioaddr = tp->mmio_addr;
2925
2926 if (!netif_running(dev))
1371fa6d 2927 goto out_pci_suspend;
5d06a99f
FR
2928
2929 netif_device_detach(dev);
2930 netif_stop_queue(dev);
2931
2932 spin_lock_irq(&tp->lock);
2933
2934 rtl8169_asic_down(ioaddr);
2935
2936 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2937 RTL_W32(RxMissed, 0);
2938
2939 spin_unlock_irq(&tp->lock);
2940
1371fa6d 2941out_pci_suspend:
5d06a99f 2942 pci_save_state(pdev);
61a4dcc2 2943 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f 2944 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1371fa6d 2945
5d06a99f
FR
2946 return 0;
2947}
2948
2949static int rtl8169_resume(struct pci_dev *pdev)
2950{
2951 struct net_device *dev = pci_get_drvdata(pdev);
2952
1371fa6d
FR
2953 pci_set_power_state(pdev, PCI_D0);
2954 pci_restore_state(pdev);
2955 pci_enable_wake(pdev, PCI_D0, 0);
2956
5d06a99f
FR
2957 if (!netif_running(dev))
2958 goto out;
2959
2960 netif_device_attach(dev);
2961
5d06a99f
FR
2962 rtl8169_schedule_work(dev, rtl8169_reset_task);
2963out:
2964 return 0;
2965}
2966
2967#endif /* CONFIG_PM */
2968
1da177e4
LT
2969static struct pci_driver rtl8169_pci_driver = {
2970 .name = MODULENAME,
2971 .id_table = rtl8169_pci_tbl,
2972 .probe = rtl8169_init_one,
2973 .remove = __devexit_p(rtl8169_remove_one),
2974#ifdef CONFIG_PM
2975 .suspend = rtl8169_suspend,
2976 .resume = rtl8169_resume,
2977#endif
2978};
2979
2980static int __init
2981rtl8169_init_module(void)
2982{
29917620 2983 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
2984}
2985
2986static void __exit
2987rtl8169_cleanup_module(void)
2988{
2989 pci_unregister_driver(&rtl8169_pci_driver);
2990}
2991
2992module_init(rtl8169_init_module);
2993module_exit(rtl8169_cleanup_module);