]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/r8169.c
r8169: use netdev_alloc_skb
[mirror_ubuntu-eoan-kernel.git] / drivers / net / r8169.c
CommitLineData
1da177e4
LT
1/*
2=========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
5b0384f4 9 2004 - Massive updates. See kernel SCM system for details.
1da177e4
LT
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
5b0384f4 14
1da177e4
LT
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
5b0384f4 21
1da177e4
LT
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
5b0384f4 28 exchanging Link Code Word (FLP).
1da177e4
LT
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/pci.h>
56#include <linux/netdevice.h>
57#include <linux/etherdevice.h>
58#include <linux/delay.h>
59#include <linux/ethtool.h>
60#include <linux/mii.h>
61#include <linux/if_vlan.h>
62#include <linux/crc32.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/tcp.h>
66#include <linux/init.h>
67#include <linux/dma-mapping.h>
68
99f252b0 69#include <asm/system.h>
1da177e4
LT
70#include <asm/io.h>
71#include <asm/irq.h>
72
f7ccf420
SH
73#ifdef CONFIG_R8169_NAPI
74#define NAPI_SUFFIX "-NAPI"
75#else
76#define NAPI_SUFFIX ""
77#endif
78
79#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX
1da177e4
LT
80#define MODULENAME "r8169"
81#define PFX MODULENAME ": "
82
83#ifdef RTL8169_DEBUG
84#define assert(expr) \
5b0384f4
FR
85 if (!(expr)) { \
86 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
87 #expr,__FILE__,__FUNCTION__,__LINE__); \
88 }
1da177e4
LT
89#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
90#else
91#define assert(expr) do {} while (0)
92#define dprintk(fmt, args...) do {} while (0)
93#endif /* RTL8169_DEBUG */
94
b57b7e5a 95#define R8169_MSG_DEFAULT \
f0e837d9 96 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
b57b7e5a 97
1da177e4
LT
98#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
100
101#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb
0b50f81d 103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
1da177e4
LT
104#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else
106#define rtl8169_rx_skb netif_rx
0b50f81d 107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
1da177e4
LT
108#define rtl8169_rx_quota(count, quota) count
109#endif
110
111/* media options */
112#define MAX_UNITS 8
113static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
114static int num_media = 0;
115
116/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
f71e1309 117static const int max_interrupt_work = 20;
1da177e4
LT
118
119/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
120 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
f71e1309 121static const int multicast_filter_limit = 32;
1da177e4
LT
122
123/* MAC address length */
124#define MAC_ADDR_LEN 6
125
126#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
127#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
129#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
130#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
131#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
132#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
133
134#define R8169_REGS_SIZE 256
135#define R8169_NAPI_WEIGHT 64
136#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
137#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
138#define RX_BUF_SIZE 1536 /* Rx Buffer size */
139#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
140#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
141
142#define RTL8169_TX_TIMEOUT (6*HZ)
143#define RTL8169_PHY_TIMEOUT (10*HZ)
144
145/* write/read MMIO register */
146#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
147#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
148#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
149#define RTL_R8(reg) readb (ioaddr + (reg))
150#define RTL_R16(reg) readw (ioaddr + (reg))
151#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
152
153enum mac_version {
bcf0bf90
FR
154 RTL_GIGA_MAC_VER_01 = 0x00,
155 RTL_GIGA_MAC_VER_02 = 0x01,
156 RTL_GIGA_MAC_VER_03 = 0x02,
157 RTL_GIGA_MAC_VER_04 = 0x03,
158 RTL_GIGA_MAC_VER_05 = 0x04,
159 RTL_GIGA_MAC_VER_11 = 0x0b,
160 RTL_GIGA_MAC_VER_12 = 0x0c,
161 RTL_GIGA_MAC_VER_13 = 0x0d,
162 RTL_GIGA_MAC_VER_14 = 0x0e,
163 RTL_GIGA_MAC_VER_15 = 0x0f
1da177e4
LT
164};
165
166enum phy_version {
167 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
168 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
169 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
170 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
171 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
172 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
173};
174
1da177e4
LT
175#define _R(NAME,MAC,MASK) \
176 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
177
3c6bee1d 178static const struct {
1da177e4
LT
179 const char *name;
180 u8 mac_version;
181 u32 RxConfigMask; /* Clears the bits supported by this chip */
182} rtl_chip_info[] = {
bcf0bf90
FR
183 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
185 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
186 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
187 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
189 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
190 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
191 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
192 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
1da177e4
LT
193};
194#undef _R
195
bcf0bf90
FR
196enum cfg_version {
197 RTL_CFG_0 = 0x00,
198 RTL_CFG_1,
199 RTL_CFG_2
200};
201
202static const struct {
203 unsigned int region;
204 unsigned int align;
205} rtl_cfg_info[] = {
206 [RTL_CFG_0] = { 1, NET_IP_ALIGN },
207 [RTL_CFG_1] = { 2, NET_IP_ALIGN },
208 [RTL_CFG_2] = { 2, 8 }
209};
210
1da177e4 211static struct pci_device_id rtl8169_pci_tbl[] = {
bcf0bf90 212 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
d2eed8cf 213 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
d81bf551 214 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
215 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
216 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
217 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
73f5e28b 218 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
bcf0bf90
FR
219 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
220 { PCI_VENDOR_ID_LINKSYS, 0x1032,
221 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
1da177e4
LT
222 {0,},
223};
224
225MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
226
227static int rx_copybreak = 200;
228static int use_dac;
b57b7e5a
SH
229static struct {
230 u32 msg_enable;
231} debug = { -1 };
1da177e4
LT
232
233enum RTL8169_registers {
234 MAC0 = 0, /* Ethernet hardware address. */
235 MAR0 = 8, /* Multicast filter. */
d4a3a0fc
SH
236 CounterAddrLow = 0x10,
237 CounterAddrHigh = 0x14,
1da177e4
LT
238 TxDescStartAddrLow = 0x20,
239 TxDescStartAddrHigh = 0x24,
240 TxHDescStartAddrLow = 0x28,
241 TxHDescStartAddrHigh = 0x2c,
242 FLASH = 0x30,
243 ERSR = 0x36,
244 ChipCmd = 0x37,
245 TxPoll = 0x38,
246 IntrMask = 0x3C,
247 IntrStatus = 0x3E,
248 TxConfig = 0x40,
249 RxConfig = 0x44,
250 RxMissed = 0x4C,
251 Cfg9346 = 0x50,
252 Config0 = 0x51,
253 Config1 = 0x52,
254 Config2 = 0x53,
255 Config3 = 0x54,
256 Config4 = 0x55,
257 Config5 = 0x56,
258 MultiIntr = 0x5C,
259 PHYAR = 0x60,
260 TBICSR = 0x64,
261 TBI_ANAR = 0x68,
262 TBI_LPAR = 0x6A,
263 PHYstatus = 0x6C,
264 RxMaxSize = 0xDA,
265 CPlusCmd = 0xE0,
266 IntrMitigate = 0xE2,
267 RxDescAddrLow = 0xE4,
268 RxDescAddrHigh = 0xE8,
269 EarlyTxThres = 0xEC,
270 FuncEvent = 0xF0,
271 FuncEventMask = 0xF4,
272 FuncPresetState = 0xF8,
273 FuncForceEvent = 0xFC,
274};
275
276enum RTL8169_register_content {
277 /* InterruptStatusBits */
278 SYSErr = 0x8000,
279 PCSTimeout = 0x4000,
280 SWInt = 0x0100,
281 TxDescUnavail = 0x80,
282 RxFIFOOver = 0x40,
283 LinkChg = 0x20,
284 RxOverflow = 0x10,
285 TxErr = 0x08,
286 TxOK = 0x04,
287 RxErr = 0x02,
288 RxOK = 0x01,
289
290 /* RxStatusDesc */
9dccf611
FR
291 RxFOVF = (1 << 23),
292 RxRWT = (1 << 22),
293 RxRES = (1 << 21),
294 RxRUNT = (1 << 20),
295 RxCRC = (1 << 19),
1da177e4
LT
296
297 /* ChipCmdBits */
298 CmdReset = 0x10,
299 CmdRxEnb = 0x08,
300 CmdTxEnb = 0x04,
301 RxBufEmpty = 0x01,
302
303 /* Cfg9346Bits */
304 Cfg9346_Lock = 0x00,
305 Cfg9346_Unlock = 0xC0,
306
307 /* rx_mode_bits */
308 AcceptErr = 0x20,
309 AcceptRunt = 0x10,
310 AcceptBroadcast = 0x08,
311 AcceptMulticast = 0x04,
312 AcceptMyPhys = 0x02,
313 AcceptAllPhys = 0x01,
314
315 /* RxConfigBits */
316 RxCfgFIFOShift = 13,
317 RxCfgDMAShift = 8,
318
319 /* TxConfigBits */
320 TxInterFrameGapShift = 24,
321 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
322
5d06a99f
FR
323 /* Config1 register p.24 */
324 PMEnable = (1 << 0), /* Power Management Enable */
325
61a4dcc2
FR
326 /* Config3 register p.25 */
327 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
328 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
329
5d06a99f 330 /* Config5 register p.27 */
61a4dcc2
FR
331 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
332 MWF = (1 << 5), /* Accept Multicast wakeup frame */
333 UWF = (1 << 4), /* Accept Unicast wakeup frame */
334 LanWake = (1 << 1), /* LanWake enable/disable */
5d06a99f
FR
335 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
336
1da177e4
LT
337 /* TBICSR p.28 */
338 TBIReset = 0x80000000,
339 TBILoopback = 0x40000000,
340 TBINwEnable = 0x20000000,
341 TBINwRestart = 0x10000000,
342 TBILinkOk = 0x02000000,
343 TBINwComplete = 0x01000000,
344
345 /* CPlusCmd p.31 */
346 RxVlan = (1 << 6),
347 RxChkSum = (1 << 5),
348 PCIDAC = (1 << 4),
349 PCIMulRW = (1 << 3),
350
351 /* rtl8169_PHYstatus */
352 TBI_Enable = 0x80,
353 TxFlowCtrl = 0x40,
354 RxFlowCtrl = 0x20,
355 _1000bpsF = 0x10,
356 _100bps = 0x08,
357 _10bps = 0x04,
358 LinkStatus = 0x02,
359 FullDup = 0x01,
360
1da177e4
LT
361 /* _MediaType */
362 _10_Half = 0x01,
363 _10_Full = 0x02,
364 _100_Half = 0x04,
365 _100_Full = 0x08,
366 _1000_Full = 0x10,
367
368 /* _TBICSRBit */
369 TBILinkOK = 0x02000000,
d4a3a0fc
SH
370
371 /* DumpCounterCommand */
372 CounterDump = 0x8,
1da177e4
LT
373};
374
375enum _DescStatusBit {
376 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
377 RingEnd = (1 << 30), /* End of descriptor ring */
378 FirstFrag = (1 << 29), /* First segment of a packet */
379 LastFrag = (1 << 28), /* Final segment of a packet */
380
381 /* Tx private */
382 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
383 MSSShift = 16, /* MSS value position */
384 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
385 IPCS = (1 << 18), /* Calculate IP checksum */
386 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
387 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
388 TxVlanTag = (1 << 17), /* Add VLAN tag */
389
390 /* Rx private */
391 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
392 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
393
394#define RxProtoUDP (PID1)
395#define RxProtoTCP (PID0)
396#define RxProtoIP (PID1 | PID0)
397#define RxProtoMask RxProtoIP
398
399 IPFail = (1 << 16), /* IP checksum failed */
400 UDPFail = (1 << 15), /* UDP/IP checksum failed */
401 TCPFail = (1 << 14), /* TCP/IP checksum failed */
402 RxVlanTag = (1 << 16), /* VLAN tag available */
403};
404
405#define RsvdMask 0x3fffc000
406
407struct TxDesc {
408 u32 opts1;
409 u32 opts2;
410 u64 addr;
411};
412
413struct RxDesc {
414 u32 opts1;
415 u32 opts2;
416 u64 addr;
417};
418
419struct ring_info {
420 struct sk_buff *skb;
421 u32 len;
422 u8 __pad[sizeof(void *) - sizeof(u32)];
423};
424
425struct rtl8169_private {
426 void __iomem *mmio_addr; /* memory map physical address */
427 struct pci_dev *pci_dev; /* Index of PCI device */
c4028958 428 struct net_device *dev;
1da177e4
LT
429 struct net_device_stats stats; /* statistics of net device */
430 spinlock_t lock; /* spin lock flag */
b57b7e5a 431 u32 msg_enable;
1da177e4
LT
432 int chipset;
433 int mac_version;
434 int phy_version;
435 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
436 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
437 u32 dirty_rx;
438 u32 dirty_tx;
439 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
440 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
441 dma_addr_t TxPhyAddr;
442 dma_addr_t RxPhyAddr;
443 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
444 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
bcf0bf90 445 unsigned align;
1da177e4
LT
446 unsigned rx_buf_sz;
447 struct timer_list timer;
448 u16 cp_cmd;
449 u16 intr_mask;
450 int phy_auto_nego_reg;
451 int phy_1000_ctrl_reg;
452#ifdef CONFIG_R8169_VLAN
453 struct vlan_group *vlgrp;
454#endif
455 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
456 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
457 void (*phy_reset_enable)(void __iomem *);
458 unsigned int (*phy_reset_pending)(void __iomem *);
459 unsigned int (*link_ok)(void __iomem *);
c4028958 460 struct delayed_work task;
61a4dcc2 461 unsigned wol_enabled : 1;
1da177e4
LT
462};
463
979b6c13 464MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1da177e4
LT
465MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
466module_param_array(media, int, &num_media, 0);
df0a1bf6 467MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
1da177e4 468module_param(rx_copybreak, int, 0);
1b7efd58 469MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
470module_param(use_dac, int, 0);
471MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
b57b7e5a
SH
472module_param_named(debug, debug.msg_enable, int, 0);
473MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
1da177e4
LT
474MODULE_LICENSE("GPL");
475MODULE_VERSION(RTL8169_VERSION);
476
477static int rtl8169_open(struct net_device *dev);
478static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
7d12e780 479static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
1da177e4
LT
480static int rtl8169_init_ring(struct net_device *dev);
481static void rtl8169_hw_start(struct net_device *dev);
482static int rtl8169_close(struct net_device *dev);
483static void rtl8169_set_rx_mode(struct net_device *dev);
484static void rtl8169_tx_timeout(struct net_device *dev);
4dcb7d33 485static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
1da177e4
LT
486static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
487 void __iomem *);
4dcb7d33 488static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
1da177e4 489static void rtl8169_down(struct net_device *dev);
99f252b0 490static void rtl8169_rx_clear(struct rtl8169_private *tp);
1da177e4
LT
491
492#ifdef CONFIG_R8169_NAPI
493static int rtl8169_poll(struct net_device *dev, int *budget);
494#endif
495
496static const u16 rtl8169_intr_mask =
497 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
498static const u16 rtl8169_napi_event =
499 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
500static const unsigned int rtl8169_rx_config =
5b0384f4 501 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
1da177e4
LT
502
503static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
504{
505 int i;
506
507 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
1da177e4 508
2371408c 509 for (i = 20; i > 0; i--) {
1da177e4 510 /* Check if the RTL8169 has completed writing to the specified MII register */
5b0384f4 511 if (!(RTL_R32(PHYAR) & 0x80000000))
1da177e4 512 break;
2371408c 513 udelay(25);
1da177e4
LT
514 }
515}
516
517static int mdio_read(void __iomem *ioaddr, int RegAddr)
518{
519 int i, value = -1;
520
521 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
1da177e4 522
2371408c 523 for (i = 20; i > 0; i--) {
1da177e4
LT
524 /* Check if the RTL8169 has completed retrieving data from the specified MII register */
525 if (RTL_R32(PHYAR) & 0x80000000) {
526 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
527 break;
528 }
2371408c 529 udelay(25);
1da177e4
LT
530 }
531 return value;
532}
533
534static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
535{
536 RTL_W16(IntrMask, 0x0000);
537
538 RTL_W16(IntrStatus, 0xffff);
539}
540
541static void rtl8169_asic_down(void __iomem *ioaddr)
542{
543 RTL_W8(ChipCmd, 0x00);
544 rtl8169_irq_mask_and_ack(ioaddr);
545 RTL_R16(CPlusCmd);
546}
547
548static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
549{
550 return RTL_R32(TBICSR) & TBIReset;
551}
552
553static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
554{
64e4bfb4 555 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
1da177e4
LT
556}
557
558static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
559{
560 return RTL_R32(TBICSR) & TBILinkOk;
561}
562
563static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
564{
565 return RTL_R8(PHYstatus) & LinkStatus;
566}
567
568static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
569{
570 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
571}
572
573static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
574{
575 unsigned int val;
576
9e0db8ef
FR
577 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
578 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
1da177e4
LT
579}
580
581static void rtl8169_check_link_status(struct net_device *dev,
582 struct rtl8169_private *tp, void __iomem *ioaddr)
583{
584 unsigned long flags;
585
586 spin_lock_irqsave(&tp->lock, flags);
587 if (tp->link_ok(ioaddr)) {
588 netif_carrier_on(dev);
b57b7e5a
SH
589 if (netif_msg_ifup(tp))
590 printk(KERN_INFO PFX "%s: link up\n", dev->name);
591 } else {
592 if (netif_msg_ifdown(tp))
593 printk(KERN_INFO PFX "%s: link down\n", dev->name);
1da177e4 594 netif_carrier_off(dev);
b57b7e5a 595 }
1da177e4
LT
596 spin_unlock_irqrestore(&tp->lock, flags);
597}
598
599static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
600{
601 struct {
602 u16 speed;
603 u8 duplex;
604 u8 autoneg;
605 u8 media;
606 } link_settings[] = {
607 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
608 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
609 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
610 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
611 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
612 /* Make TBI happy */
613 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
614 }, *p;
615 unsigned char option;
5b0384f4 616
1da177e4
LT
617 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
618
b57b7e5a 619 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
1da177e4
LT
620 printk(KERN_WARNING PFX "media option is deprecated.\n");
621
622 for (p = link_settings; p->media != 0xff; p++) {
623 if (p->media == option)
624 break;
625 }
626 *autoneg = p->autoneg;
627 *speed = p->speed;
628 *duplex = p->duplex;
629}
630
61a4dcc2
FR
631static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
632{
633 struct rtl8169_private *tp = netdev_priv(dev);
634 void __iomem *ioaddr = tp->mmio_addr;
635 u8 options;
636
637 wol->wolopts = 0;
638
639#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
640 wol->supported = WAKE_ANY;
641
642 spin_lock_irq(&tp->lock);
643
644 options = RTL_R8(Config1);
645 if (!(options & PMEnable))
646 goto out_unlock;
647
648 options = RTL_R8(Config3);
649 if (options & LinkUp)
650 wol->wolopts |= WAKE_PHY;
651 if (options & MagicPacket)
652 wol->wolopts |= WAKE_MAGIC;
653
654 options = RTL_R8(Config5);
655 if (options & UWF)
656 wol->wolopts |= WAKE_UCAST;
657 if (options & BWF)
5b0384f4 658 wol->wolopts |= WAKE_BCAST;
61a4dcc2 659 if (options & MWF)
5b0384f4 660 wol->wolopts |= WAKE_MCAST;
61a4dcc2
FR
661
662out_unlock:
663 spin_unlock_irq(&tp->lock);
664}
665
666static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
667{
668 struct rtl8169_private *tp = netdev_priv(dev);
669 void __iomem *ioaddr = tp->mmio_addr;
670 int i;
671 static struct {
672 u32 opt;
673 u16 reg;
674 u8 mask;
675 } cfg[] = {
676 { WAKE_ANY, Config1, PMEnable },
677 { WAKE_PHY, Config3, LinkUp },
678 { WAKE_MAGIC, Config3, MagicPacket },
679 { WAKE_UCAST, Config5, UWF },
680 { WAKE_BCAST, Config5, BWF },
681 { WAKE_MCAST, Config5, MWF },
682 { WAKE_ANY, Config5, LanWake }
683 };
684
685 spin_lock_irq(&tp->lock);
686
687 RTL_W8(Cfg9346, Cfg9346_Unlock);
688
689 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
690 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
691 if (wol->wolopts & cfg[i].opt)
692 options |= cfg[i].mask;
693 RTL_W8(cfg[i].reg, options);
694 }
695
696 RTL_W8(Cfg9346, Cfg9346_Lock);
697
698 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
699
700 spin_unlock_irq(&tp->lock);
701
702 return 0;
703}
704
1da177e4
LT
705static void rtl8169_get_drvinfo(struct net_device *dev,
706 struct ethtool_drvinfo *info)
707{
708 struct rtl8169_private *tp = netdev_priv(dev);
709
710 strcpy(info->driver, MODULENAME);
711 strcpy(info->version, RTL8169_VERSION);
712 strcpy(info->bus_info, pci_name(tp->pci_dev));
713}
714
715static int rtl8169_get_regs_len(struct net_device *dev)
716{
717 return R8169_REGS_SIZE;
718}
719
720static int rtl8169_set_speed_tbi(struct net_device *dev,
721 u8 autoneg, u16 speed, u8 duplex)
722{
723 struct rtl8169_private *tp = netdev_priv(dev);
724 void __iomem *ioaddr = tp->mmio_addr;
725 int ret = 0;
726 u32 reg;
727
728 reg = RTL_R32(TBICSR);
729 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
730 (duplex == DUPLEX_FULL)) {
731 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
732 } else if (autoneg == AUTONEG_ENABLE)
733 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
734 else {
b57b7e5a
SH
735 if (netif_msg_link(tp)) {
736 printk(KERN_WARNING "%s: "
737 "incorrect speed setting refused in TBI mode\n",
738 dev->name);
739 }
1da177e4
LT
740 ret = -EOPNOTSUPP;
741 }
742
743 return ret;
744}
745
746static int rtl8169_set_speed_xmii(struct net_device *dev,
747 u8 autoneg, u16 speed, u8 duplex)
748{
749 struct rtl8169_private *tp = netdev_priv(dev);
750 void __iomem *ioaddr = tp->mmio_addr;
751 int auto_nego, giga_ctrl;
752
64e4bfb4
FR
753 auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
754 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
755 ADVERTISE_100HALF | ADVERTISE_100FULL);
756 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
757 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
758
759 if (autoneg == AUTONEG_ENABLE) {
64e4bfb4
FR
760 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
761 ADVERTISE_100HALF | ADVERTISE_100FULL);
762 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
763 } else {
764 if (speed == SPEED_10)
64e4bfb4 765 auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
1da177e4 766 else if (speed == SPEED_100)
64e4bfb4 767 auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
1da177e4 768 else if (speed == SPEED_1000)
64e4bfb4 769 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
1da177e4
LT
770
771 if (duplex == DUPLEX_HALF)
64e4bfb4 772 auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
726ecdcf
AG
773
774 if (duplex == DUPLEX_FULL)
64e4bfb4 775 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
bcf0bf90
FR
776
777 /* This tweak comes straight from Realtek's driver. */
778 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
779 (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
64e4bfb4 780 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
bcf0bf90
FR
781 }
782 }
783
784 /* The 8100e/8101e do Fast Ethernet only. */
785 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
786 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
787 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
64e4bfb4 788 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
bcf0bf90
FR
789 netif_msg_link(tp)) {
790 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
791 dev->name);
792 }
64e4bfb4 793 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1da177e4
LT
794 }
795
623a1593
FR
796 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
797
1da177e4
LT
798 tp->phy_auto_nego_reg = auto_nego;
799 tp->phy_1000_ctrl_reg = giga_ctrl;
800
64e4bfb4
FR
801 mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
802 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
803 mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1da177e4
LT
804 return 0;
805}
806
807static int rtl8169_set_speed(struct net_device *dev,
808 u8 autoneg, u16 speed, u8 duplex)
809{
810 struct rtl8169_private *tp = netdev_priv(dev);
811 int ret;
812
813 ret = tp->set_speed(dev, autoneg, speed, duplex);
814
64e4bfb4 815 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
816 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
817
818 return ret;
819}
820
821static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
822{
823 struct rtl8169_private *tp = netdev_priv(dev);
824 unsigned long flags;
825 int ret;
826
827 spin_lock_irqsave(&tp->lock, flags);
828 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
829 spin_unlock_irqrestore(&tp->lock, flags);
5b0384f4 830
1da177e4
LT
831 return ret;
832}
833
834static u32 rtl8169_get_rx_csum(struct net_device *dev)
835{
836 struct rtl8169_private *tp = netdev_priv(dev);
837
838 return tp->cp_cmd & RxChkSum;
839}
840
841static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
842{
843 struct rtl8169_private *tp = netdev_priv(dev);
844 void __iomem *ioaddr = tp->mmio_addr;
845 unsigned long flags;
846
847 spin_lock_irqsave(&tp->lock, flags);
848
849 if (data)
850 tp->cp_cmd |= RxChkSum;
851 else
852 tp->cp_cmd &= ~RxChkSum;
853
854 RTL_W16(CPlusCmd, tp->cp_cmd);
855 RTL_R16(CPlusCmd);
856
857 spin_unlock_irqrestore(&tp->lock, flags);
858
859 return 0;
860}
861
862#ifdef CONFIG_R8169_VLAN
863
864static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
865 struct sk_buff *skb)
866{
867 return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
868 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
869}
870
871static void rtl8169_vlan_rx_register(struct net_device *dev,
872 struct vlan_group *grp)
873{
874 struct rtl8169_private *tp = netdev_priv(dev);
875 void __iomem *ioaddr = tp->mmio_addr;
876 unsigned long flags;
877
878 spin_lock_irqsave(&tp->lock, flags);
879 tp->vlgrp = grp;
880 if (tp->vlgrp)
881 tp->cp_cmd |= RxVlan;
882 else
883 tp->cp_cmd &= ~RxVlan;
884 RTL_W16(CPlusCmd, tp->cp_cmd);
885 RTL_R16(CPlusCmd);
886 spin_unlock_irqrestore(&tp->lock, flags);
887}
888
1da177e4
LT
889static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
890 struct sk_buff *skb)
891{
892 u32 opts2 = le32_to_cpu(desc->opts2);
893 int ret;
894
895 if (tp->vlgrp && (opts2 & RxVlanTag)) {
896 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
897 swab16(opts2 & 0xffff));
898 ret = 0;
899 } else
900 ret = -1;
901 desc->opts2 = 0;
902 return ret;
903}
904
905#else /* !CONFIG_R8169_VLAN */
906
907static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
908 struct sk_buff *skb)
909{
910 return 0;
911}
912
913static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
914 struct sk_buff *skb)
915{
916 return -1;
917}
918
919#endif
920
921static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
922{
923 struct rtl8169_private *tp = netdev_priv(dev);
924 void __iomem *ioaddr = tp->mmio_addr;
925 u32 status;
926
927 cmd->supported =
928 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
929 cmd->port = PORT_FIBRE;
930 cmd->transceiver = XCVR_INTERNAL;
931
932 status = RTL_R32(TBICSR);
933 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
934 cmd->autoneg = !!(status & TBINwEnable);
935
936 cmd->speed = SPEED_1000;
937 cmd->duplex = DUPLEX_FULL; /* Always set */
938}
939
940static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
941{
942 struct rtl8169_private *tp = netdev_priv(dev);
943 void __iomem *ioaddr = tp->mmio_addr;
944 u8 status;
945
946 cmd->supported = SUPPORTED_10baseT_Half |
947 SUPPORTED_10baseT_Full |
948 SUPPORTED_100baseT_Half |
949 SUPPORTED_100baseT_Full |
950 SUPPORTED_1000baseT_Full |
951 SUPPORTED_Autoneg |
5b0384f4 952 SUPPORTED_TP;
1da177e4
LT
953
954 cmd->autoneg = 1;
955 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
956
64e4bfb4 957 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
1da177e4 958 cmd->advertising |= ADVERTISED_10baseT_Half;
64e4bfb4 959 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
1da177e4 960 cmd->advertising |= ADVERTISED_10baseT_Full;
64e4bfb4 961 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
1da177e4 962 cmd->advertising |= ADVERTISED_100baseT_Half;
64e4bfb4 963 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
1da177e4 964 cmd->advertising |= ADVERTISED_100baseT_Full;
64e4bfb4 965 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
1da177e4
LT
966 cmd->advertising |= ADVERTISED_1000baseT_Full;
967
968 status = RTL_R8(PHYstatus);
969
970 if (status & _1000bpsF)
971 cmd->speed = SPEED_1000;
972 else if (status & _100bps)
973 cmd->speed = SPEED_100;
974 else if (status & _10bps)
975 cmd->speed = SPEED_10;
976
623a1593
FR
977 if (status & TxFlowCtrl)
978 cmd->advertising |= ADVERTISED_Asym_Pause;
979 if (status & RxFlowCtrl)
980 cmd->advertising |= ADVERTISED_Pause;
981
1da177e4
LT
982 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
983 DUPLEX_FULL : DUPLEX_HALF;
984}
985
986static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
987{
988 struct rtl8169_private *tp = netdev_priv(dev);
989 unsigned long flags;
990
991 spin_lock_irqsave(&tp->lock, flags);
992
993 tp->get_settings(dev, cmd);
994
995 spin_unlock_irqrestore(&tp->lock, flags);
996 return 0;
997}
998
999static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1000 void *p)
1001{
5b0384f4
FR
1002 struct rtl8169_private *tp = netdev_priv(dev);
1003 unsigned long flags;
1da177e4 1004
5b0384f4
FR
1005 if (regs->len > R8169_REGS_SIZE)
1006 regs->len = R8169_REGS_SIZE;
1da177e4 1007
5b0384f4
FR
1008 spin_lock_irqsave(&tp->lock, flags);
1009 memcpy_fromio(p, tp->mmio_addr, regs->len);
1010 spin_unlock_irqrestore(&tp->lock, flags);
1da177e4
LT
1011}
1012
b57b7e5a
SH
1013static u32 rtl8169_get_msglevel(struct net_device *dev)
1014{
1015 struct rtl8169_private *tp = netdev_priv(dev);
1016
1017 return tp->msg_enable;
1018}
1019
1020static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1021{
1022 struct rtl8169_private *tp = netdev_priv(dev);
1023
1024 tp->msg_enable = value;
1025}
1026
d4a3a0fc
SH
1027static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1028 "tx_packets",
1029 "rx_packets",
1030 "tx_errors",
1031 "rx_errors",
1032 "rx_missed",
1033 "align_errors",
1034 "tx_single_collisions",
1035 "tx_multi_collisions",
1036 "unicast",
1037 "broadcast",
1038 "multicast",
1039 "tx_aborted",
1040 "tx_underrun",
1041};
1042
1043struct rtl8169_counters {
1044 u64 tx_packets;
1045 u64 rx_packets;
1046 u64 tx_errors;
1047 u32 rx_errors;
1048 u16 rx_missed;
1049 u16 align_errors;
1050 u32 tx_one_collision;
1051 u32 tx_multi_collision;
1052 u64 rx_unicast;
1053 u64 rx_broadcast;
1054 u32 rx_multicast;
1055 u16 tx_aborted;
1056 u16 tx_underun;
1057};
1058
1059static int rtl8169_get_stats_count(struct net_device *dev)
1060{
1061 return ARRAY_SIZE(rtl8169_gstrings);
1062}
1063
1064static void rtl8169_get_ethtool_stats(struct net_device *dev,
1065 struct ethtool_stats *stats, u64 *data)
1066{
1067 struct rtl8169_private *tp = netdev_priv(dev);
1068 void __iomem *ioaddr = tp->mmio_addr;
1069 struct rtl8169_counters *counters;
1070 dma_addr_t paddr;
1071 u32 cmd;
1072
1073 ASSERT_RTNL();
1074
1075 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1076 if (!counters)
1077 return;
1078
1079 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1080 cmd = (u64)paddr & DMA_32BIT_MASK;
1081 RTL_W32(CounterAddrLow, cmd);
1082 RTL_W32(CounterAddrLow, cmd | CounterDump);
1083
1084 while (RTL_R32(CounterAddrLow) & CounterDump) {
1085 if (msleep_interruptible(1))
1086 break;
1087 }
1088
1089 RTL_W32(CounterAddrLow, 0);
1090 RTL_W32(CounterAddrHigh, 0);
1091
5b0384f4 1092 data[0] = le64_to_cpu(counters->tx_packets);
d4a3a0fc
SH
1093 data[1] = le64_to_cpu(counters->rx_packets);
1094 data[2] = le64_to_cpu(counters->tx_errors);
1095 data[3] = le32_to_cpu(counters->rx_errors);
1096 data[4] = le16_to_cpu(counters->rx_missed);
1097 data[5] = le16_to_cpu(counters->align_errors);
1098 data[6] = le32_to_cpu(counters->tx_one_collision);
1099 data[7] = le32_to_cpu(counters->tx_multi_collision);
1100 data[8] = le64_to_cpu(counters->rx_unicast);
1101 data[9] = le64_to_cpu(counters->rx_broadcast);
1102 data[10] = le32_to_cpu(counters->rx_multicast);
1103 data[11] = le16_to_cpu(counters->tx_aborted);
1104 data[12] = le16_to_cpu(counters->tx_underun);
1105
1106 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1107}
1108
1109static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1110{
1111 switch(stringset) {
1112 case ETH_SS_STATS:
1113 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1114 break;
1115 }
1116}
1117
1118
7282d491 1119static const struct ethtool_ops rtl8169_ethtool_ops = {
1da177e4
LT
1120 .get_drvinfo = rtl8169_get_drvinfo,
1121 .get_regs_len = rtl8169_get_regs_len,
1122 .get_link = ethtool_op_get_link,
1123 .get_settings = rtl8169_get_settings,
1124 .set_settings = rtl8169_set_settings,
b57b7e5a
SH
1125 .get_msglevel = rtl8169_get_msglevel,
1126 .set_msglevel = rtl8169_set_msglevel,
1da177e4
LT
1127 .get_rx_csum = rtl8169_get_rx_csum,
1128 .set_rx_csum = rtl8169_set_rx_csum,
1129 .get_tx_csum = ethtool_op_get_tx_csum,
1130 .set_tx_csum = ethtool_op_set_tx_csum,
1131 .get_sg = ethtool_op_get_sg,
1132 .set_sg = ethtool_op_set_sg,
1133 .get_tso = ethtool_op_get_tso,
1134 .set_tso = ethtool_op_set_tso,
1135 .get_regs = rtl8169_get_regs,
61a4dcc2
FR
1136 .get_wol = rtl8169_get_wol,
1137 .set_wol = rtl8169_set_wol,
d4a3a0fc
SH
1138 .get_strings = rtl8169_get_strings,
1139 .get_stats_count = rtl8169_get_stats_count,
1140 .get_ethtool_stats = rtl8169_get_ethtool_stats,
6d6525b7 1141 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1142};
1143
1144static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
1145 int bitval)
1146{
1147 int val;
1148
1149 val = mdio_read(ioaddr, reg);
1150 val = (bitval == 1) ?
1151 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
5b0384f4 1152 mdio_write(ioaddr, reg, val & 0xffff);
1da177e4
LT
1153}
1154
1155static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1156{
1157 const struct {
1158 u32 mask;
1159 int mac_version;
1160 } mac_info[] = {
bcf0bf90
FR
1161 { 0x38800000, RTL_GIGA_MAC_VER_15 },
1162 { 0x38000000, RTL_GIGA_MAC_VER_12 },
1163 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1164 { 0x30800000, RTL_GIGA_MAC_VER_14 },
5b0384f4 1165 { 0x30000000, RTL_GIGA_MAC_VER_11 },
bcf0bf90
FR
1166 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1167 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1168 { 0x04000000, RTL_GIGA_MAC_VER_03 },
1169 { 0x00800000, RTL_GIGA_MAC_VER_02 },
1170 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1da177e4
LT
1171 }, *p = mac_info;
1172 u32 reg;
1173
1174 reg = RTL_R32(TxConfig) & 0x7c800000;
1175 while ((reg & p->mask) != p->mask)
1176 p++;
1177 tp->mac_version = p->mac_version;
1178}
1179
1180static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1181{
bcf0bf90 1182 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1da177e4
LT
1183}
1184
1185static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
1186{
1187 const struct {
1188 u16 mask;
1189 u16 set;
1190 int phy_version;
1191 } phy_info[] = {
1192 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1193 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1194 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1195 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1196 }, *p = phy_info;
1197 u16 reg;
1198
64e4bfb4 1199 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
1da177e4
LT
1200 while ((reg & p->mask) != p->set)
1201 p++;
1202 tp->phy_version = p->phy_version;
1203}
1204
1205static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1206{
1207 struct {
1208 int version;
1209 char *msg;
1210 u32 reg;
1211 } phy_print[] = {
1212 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1213 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1214 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1215 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1216 { 0, NULL, 0x0000 }
1217 }, *p;
1218
1219 for (p = phy_print; p->msg; p++) {
1220 if (tp->phy_version == p->version) {
1221 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1222 return;
1223 }
1224 }
1225 dprintk("phy_version == Unknown\n");
1226}
1227
1228static void rtl8169_hw_phy_config(struct net_device *dev)
1229{
1230 struct rtl8169_private *tp = netdev_priv(dev);
1231 void __iomem *ioaddr = tp->mmio_addr;
1232 struct {
1233 u16 regs[5]; /* Beware of bit-sign propagation */
1234 } phy_magic[5] = { {
1235 { 0x0000, //w 4 15 12 0
1236 0x00a1, //w 3 15 0 00a1
1237 0x0008, //w 2 15 0 0008
1238 0x1020, //w 1 15 0 1020
1239 0x1000 } },{ //w 0 15 0 1000
1240 { 0x7000, //w 4 15 12 7
1241 0xff41, //w 3 15 0 ff41
1242 0xde60, //w 2 15 0 de60
1243 0x0140, //w 1 15 0 0140
1244 0x0077 } },{ //w 0 15 0 0077
1245 { 0xa000, //w 4 15 12 a
1246 0xdf01, //w 3 15 0 df01
1247 0xdf20, //w 2 15 0 df20
1248 0xff95, //w 1 15 0 ff95
1249 0xfa00 } },{ //w 0 15 0 fa00
1250 { 0xb000, //w 4 15 12 b
1251 0xff41, //w 3 15 0 ff41
1252 0xde20, //w 2 15 0 de20
1253 0x0140, //w 1 15 0 0140
1254 0x00bb } },{ //w 0 15 0 00bb
1255 { 0xf000, //w 4 15 12 f
1256 0xdf01, //w 3 15 0 df01
1257 0xdf20, //w 2 15 0 df20
1258 0xff95, //w 1 15 0 ff95
1259 0xbf00 } //w 0 15 0 bf00
1260 }
1261 }, *p = phy_magic;
1262 int i;
1263
1264 rtl8169_print_mac_version(tp);
1265 rtl8169_print_phy_version(tp);
1266
bcf0bf90 1267 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1da177e4
LT
1268 return;
1269 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1270 return;
1271
1272 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1273 dprintk("Do final_reg2.cfg\n");
1274
1275 /* Shazam ! */
1276
bcf0bf90 1277 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1da177e4
LT
1278 mdio_write(ioaddr, 31, 0x0002);
1279 mdio_write(ioaddr, 1, 0x90d0);
1280 mdio_write(ioaddr, 31, 0x0000);
1281 return;
1282 }
1283
1284 /* phy config for RTL8169s mac_version C chip */
1285 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1286 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1287 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1288 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1289
1290 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
1291 int val, pos = 4;
1292
1293 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
1294 mdio_write(ioaddr, pos, val);
1295 while (--pos >= 0)
1296 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
1297 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1298 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1299 }
1300 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
1301}
1302
1303static void rtl8169_phy_timer(unsigned long __opaque)
1304{
1305 struct net_device *dev = (struct net_device *)__opaque;
1306 struct rtl8169_private *tp = netdev_priv(dev);
1307 struct timer_list *timer = &tp->timer;
1308 void __iomem *ioaddr = tp->mmio_addr;
1309 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1310
bcf0bf90 1311 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1da177e4
LT
1312 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1313
64e4bfb4 1314 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1da177e4
LT
1315 return;
1316
1317 spin_lock_irq(&tp->lock);
1318
1319 if (tp->phy_reset_pending(ioaddr)) {
5b0384f4 1320 /*
1da177e4
LT
1321 * A busy loop could burn quite a few cycles on nowadays CPU.
1322 * Let's delay the execution of the timer for a few ticks.
1323 */
1324 timeout = HZ/10;
1325 goto out_mod_timer;
1326 }
1327
1328 if (tp->link_ok(ioaddr))
1329 goto out_unlock;
1330
b57b7e5a
SH
1331 if (netif_msg_link(tp))
1332 printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
1da177e4
LT
1333
1334 tp->phy_reset_enable(ioaddr);
1335
1336out_mod_timer:
1337 mod_timer(timer, jiffies + timeout);
1338out_unlock:
1339 spin_unlock_irq(&tp->lock);
1340}
1341
1342static inline void rtl8169_delete_timer(struct net_device *dev)
1343{
1344 struct rtl8169_private *tp = netdev_priv(dev);
1345 struct timer_list *timer = &tp->timer;
1346
bcf0bf90 1347 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1348 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1349 return;
1350
1351 del_timer_sync(timer);
1352}
1353
1354static inline void rtl8169_request_timer(struct net_device *dev)
1355{
1356 struct rtl8169_private *tp = netdev_priv(dev);
1357 struct timer_list *timer = &tp->timer;
1358
bcf0bf90 1359 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
1da177e4
LT
1360 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1361 return;
1362
2efa53f3 1363 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
1da177e4
LT
1364}
1365
1366#ifdef CONFIG_NET_POLL_CONTROLLER
1367/*
1368 * Polling 'interrupt' - used by things like netconsole to send skbs
1369 * without having to re-enable interrupts. It's not called while
1370 * the interrupt routine is executing.
1371 */
1372static void rtl8169_netpoll(struct net_device *dev)
1373{
1374 struct rtl8169_private *tp = netdev_priv(dev);
1375 struct pci_dev *pdev = tp->pci_dev;
1376
1377 disable_irq(pdev->irq);
7d12e780 1378 rtl8169_interrupt(pdev->irq, dev);
1da177e4
LT
1379 enable_irq(pdev->irq);
1380}
1381#endif
1382
1383static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
1384 void __iomem *ioaddr)
1385{
1386 iounmap(ioaddr);
1387 pci_release_regions(pdev);
1388 pci_disable_device(pdev);
1389 free_netdev(dev);
1390}
1391
bf793295
FR
1392static void rtl8169_phy_reset(struct net_device *dev,
1393 struct rtl8169_private *tp)
1394{
1395 void __iomem *ioaddr = tp->mmio_addr;
1396 int i;
1397
1398 tp->phy_reset_enable(ioaddr);
1399 for (i = 0; i < 100; i++) {
1400 if (!tp->phy_reset_pending(ioaddr))
1401 return;
1402 msleep(1);
1403 }
1404 if (netif_msg_link(tp))
1405 printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
1406}
1407
4ff96fa6
FR
1408static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1409{
1410 void __iomem *ioaddr = tp->mmio_addr;
1411 static int board_idx = -1;
1412 u8 autoneg, duplex;
1413 u16 speed;
1414
1415 board_idx++;
1416
1417 rtl8169_hw_phy_config(dev);
1418
1419 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1420 RTL_W8(0x82, 0x01);
1421
bcf0bf90 1422 if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
4ff96fa6
FR
1423 dprintk("Set PCI Latency=0x40\n");
1424 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1425 }
1426
bcf0bf90 1427 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4ff96fa6
FR
1428 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1429 RTL_W8(0x82, 0x01);
1430 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
1431 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1432 }
1433
1434 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1435
bf793295
FR
1436 rtl8169_phy_reset(dev, tp);
1437
4ff96fa6
FR
1438 rtl8169_set_speed(dev, autoneg, speed, duplex);
1439
1440 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1441 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1442}
1443
5f787a1a
FR
1444static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1445{
1446 struct rtl8169_private *tp = netdev_priv(dev);
1447 struct mii_ioctl_data *data = if_mii(ifr);
1448
1449 if (!netif_running(dev))
1450 return -ENODEV;
1451
1452 switch (cmd) {
1453 case SIOCGMIIPHY:
1454 data->phy_id = 32; /* Internal PHY */
1455 return 0;
1456
1457 case SIOCGMIIREG:
1458 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
1459 return 0;
1460
1461 case SIOCSMIIREG:
1462 if (!capable(CAP_NET_ADMIN))
1463 return -EPERM;
1464 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
1465 return 0;
1466 }
1467 return -EOPNOTSUPP;
1468}
1469
1da177e4 1470static int __devinit
4ff96fa6 1471rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 1472{
bcf0bf90 1473 const unsigned int region = rtl_cfg_info[ent->driver_data].region;
1da177e4 1474 struct rtl8169_private *tp;
4ff96fa6
FR
1475 struct net_device *dev;
1476 void __iomem *ioaddr;
315917d2
FR
1477 unsigned int pm_cap;
1478 int i, rc;
1da177e4 1479
4ff96fa6
FR
1480 if (netif_msg_drv(&debug)) {
1481 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
1482 MODULENAME, RTL8169_VERSION);
1483 }
1da177e4 1484
1da177e4 1485 dev = alloc_etherdev(sizeof (*tp));
4ff96fa6 1486 if (!dev) {
b57b7e5a 1487 if (netif_msg_drv(&debug))
9b91cf9d 1488 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
4ff96fa6
FR
1489 rc = -ENOMEM;
1490 goto out;
1da177e4
LT
1491 }
1492
1493 SET_MODULE_OWNER(dev);
1494 SET_NETDEV_DEV(dev, &pdev->dev);
1495 tp = netdev_priv(dev);
c4028958 1496 tp->dev = dev;
b57b7e5a 1497 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1da177e4
LT
1498
1499 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1500 rc = pci_enable_device(pdev);
b57b7e5a 1501 if (rc < 0) {
2e8a538d 1502 if (netif_msg_probe(tp))
9b91cf9d 1503 dev_err(&pdev->dev, "enable failure\n");
4ff96fa6 1504 goto err_out_free_dev_1;
1da177e4
LT
1505 }
1506
1507 rc = pci_set_mwi(pdev);
1508 if (rc < 0)
4ff96fa6 1509 goto err_out_disable_2;
1da177e4
LT
1510
1511 /* save power state before pci_enable_device overwrites it */
1512 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1513 if (pm_cap) {
4ff96fa6 1514 u16 pwr_command, acpi_idle_state;
1da177e4
LT
1515
1516 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1517 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1518 } else {
4ff96fa6 1519 if (netif_msg_probe(tp)) {
9b91cf9d 1520 dev_err(&pdev->dev,
4ff96fa6
FR
1521 "PowerManagement capability not found.\n");
1522 }
1da177e4
LT
1523 }
1524
1525 /* make sure PCI base addr 1 is MMIO */
bcf0bf90 1526 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
4ff96fa6 1527 if (netif_msg_probe(tp)) {
9b91cf9d 1528 dev_err(&pdev->dev,
bcf0bf90
FR
1529 "region #%d not an MMIO resource, aborting\n",
1530 region);
4ff96fa6 1531 }
1da177e4 1532 rc = -ENODEV;
4ff96fa6 1533 goto err_out_mwi_3;
1da177e4 1534 }
4ff96fa6 1535
1da177e4 1536 /* check for weird/broken PCI region reporting */
bcf0bf90 1537 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
4ff96fa6 1538 if (netif_msg_probe(tp)) {
9b91cf9d 1539 dev_err(&pdev->dev,
4ff96fa6
FR
1540 "Invalid PCI region size(s), aborting\n");
1541 }
1da177e4 1542 rc = -ENODEV;
4ff96fa6 1543 goto err_out_mwi_3;
1da177e4
LT
1544 }
1545
1546 rc = pci_request_regions(pdev, MODULENAME);
b57b7e5a 1547 if (rc < 0) {
2e8a538d 1548 if (netif_msg_probe(tp))
9b91cf9d 1549 dev_err(&pdev->dev, "could not request regions.\n");
4ff96fa6 1550 goto err_out_mwi_3;
1da177e4
LT
1551 }
1552
1553 tp->cp_cmd = PCIMulRW | RxChkSum;
1554
1555 if ((sizeof(dma_addr_t) > 4) &&
1556 !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
1557 tp->cp_cmd |= PCIDAC;
1558 dev->features |= NETIF_F_HIGHDMA;
1559 } else {
1560 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1561 if (rc < 0) {
4ff96fa6 1562 if (netif_msg_probe(tp)) {
9b91cf9d 1563 dev_err(&pdev->dev,
4ff96fa6
FR
1564 "DMA configuration failed.\n");
1565 }
1566 goto err_out_free_res_4;
1da177e4
LT
1567 }
1568 }
1569
1570 pci_set_master(pdev);
1571
1572 /* ioremap MMIO region */
bcf0bf90 1573 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
4ff96fa6 1574 if (!ioaddr) {
b57b7e5a 1575 if (netif_msg_probe(tp))
9b91cf9d 1576 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
1da177e4 1577 rc = -EIO;
4ff96fa6 1578 goto err_out_free_res_4;
1da177e4
LT
1579 }
1580
1581 /* Unneeded ? Don't mess with Mrs. Murphy. */
1582 rtl8169_irq_mask_and_ack(ioaddr);
1583
1584 /* Soft reset the chip. */
1585 RTL_W8(ChipCmd, CmdReset);
1586
1587 /* Check that the chip has finished the reset. */
b518fa8e 1588 for (i = 100; i > 0; i--) {
1da177e4
LT
1589 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1590 break;
b518fa8e 1591 msleep_interruptible(1);
1da177e4
LT
1592 }
1593
1594 /* Identify chip attached to board */
1595 rtl8169_get_mac_version(tp, ioaddr);
1596 rtl8169_get_phy_version(tp, ioaddr);
1597
1598 rtl8169_print_mac_version(tp);
1599 rtl8169_print_phy_version(tp);
1600
1601 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1602 if (tp->mac_version == rtl_chip_info[i].mac_version)
1603 break;
1604 }
1605 if (i < 0) {
1606 /* Unknown chip: assume array element #0, original RTL-8169 */
b57b7e5a 1607 if (netif_msg_probe(tp)) {
2e8a538d 1608 dev_printk(KERN_DEBUG, &pdev->dev,
4ff96fa6
FR
1609 "unknown chip version, assuming %s\n",
1610 rtl_chip_info[0].name);
b57b7e5a 1611 }
1da177e4
LT
1612 i++;
1613 }
1614 tp->chipset = i;
1615
5d06a99f
FR
1616 RTL_W8(Cfg9346, Cfg9346_Unlock);
1617 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1618 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1619 RTL_W8(Cfg9346, Cfg9346_Lock);
1620
1da177e4
LT
1621 if (RTL_R8(PHYstatus) & TBI_Enable) {
1622 tp->set_speed = rtl8169_set_speed_tbi;
1623 tp->get_settings = rtl8169_gset_tbi;
1624 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
1625 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
1626 tp->link_ok = rtl8169_tbi_link_ok;
1627
64e4bfb4 1628 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
1da177e4
LT
1629 } else {
1630 tp->set_speed = rtl8169_set_speed_xmii;
1631 tp->get_settings = rtl8169_gset_xmii;
1632 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
1633 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
1634 tp->link_ok = rtl8169_xmii_link_ok;
5f787a1a
FR
1635
1636 dev->do_ioctl = rtl8169_ioctl;
1da177e4
LT
1637 }
1638
1639 /* Get MAC address. FIXME: read EEPROM */
1640 for (i = 0; i < MAC_ADDR_LEN; i++)
1641 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6d6525b7 1642 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1643
1644 dev->open = rtl8169_open;
1645 dev->hard_start_xmit = rtl8169_start_xmit;
1646 dev->get_stats = rtl8169_get_stats;
1647 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1648 dev->stop = rtl8169_close;
1649 dev->tx_timeout = rtl8169_tx_timeout;
1650 dev->set_multicast_list = rtl8169_set_rx_mode;
1651 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1652 dev->irq = pdev->irq;
1653 dev->base_addr = (unsigned long) ioaddr;
1654 dev->change_mtu = rtl8169_change_mtu;
1655
1656#ifdef CONFIG_R8169_NAPI
1657 dev->poll = rtl8169_poll;
1658 dev->weight = R8169_NAPI_WEIGHT;
1da177e4
LT
1659#endif
1660
1661#ifdef CONFIG_R8169_VLAN
1662 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1663 dev->vlan_rx_register = rtl8169_vlan_rx_register;
1da177e4
LT
1664#endif
1665
1666#ifdef CONFIG_NET_POLL_CONTROLLER
1667 dev->poll_controller = rtl8169_netpoll;
1668#endif
1669
1670 tp->intr_mask = 0xffff;
1671 tp->pci_dev = pdev;
1672 tp->mmio_addr = ioaddr;
bcf0bf90 1673 tp->align = rtl_cfg_info[ent->driver_data].align;
1da177e4 1674
2efa53f3
FR
1675 init_timer(&tp->timer);
1676 tp->timer.data = (unsigned long) dev;
1677 tp->timer.function = rtl8169_phy_timer;
1678
1da177e4
LT
1679 spin_lock_init(&tp->lock);
1680
1681 rc = register_netdev(dev);
4ff96fa6
FR
1682 if (rc < 0)
1683 goto err_out_unmap_5;
1da177e4
LT
1684
1685 pci_set_drvdata(pdev, dev);
1686
b57b7e5a
SH
1687 if (netif_msg_probe(tp)) {
1688 printk(KERN_INFO "%s: %s at 0x%lx, "
1689 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1690 "IRQ %d\n",
1691 dev->name,
bcf0bf90 1692 rtl_chip_info[tp->chipset].name,
b57b7e5a
SH
1693 dev->base_addr,
1694 dev->dev_addr[0], dev->dev_addr[1],
1695 dev->dev_addr[2], dev->dev_addr[3],
1696 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
1697 }
1da177e4 1698
4ff96fa6 1699 rtl8169_init_phy(dev, tp);
1da177e4 1700
4ff96fa6
FR
1701out:
1702 return rc;
1da177e4 1703
4ff96fa6
FR
1704err_out_unmap_5:
1705 iounmap(ioaddr);
1706err_out_free_res_4:
1707 pci_release_regions(pdev);
1708err_out_mwi_3:
1709 pci_clear_mwi(pdev);
1710err_out_disable_2:
1711 pci_disable_device(pdev);
1712err_out_free_dev_1:
1713 free_netdev(dev);
1714 goto out;
1da177e4
LT
1715}
1716
1717static void __devexit
1718rtl8169_remove_one(struct pci_dev *pdev)
1719{
1720 struct net_device *dev = pci_get_drvdata(pdev);
1721 struct rtl8169_private *tp = netdev_priv(dev);
1722
1723 assert(dev != NULL);
1724 assert(tp != NULL);
1725
eb2a021c
FR
1726 flush_scheduled_work();
1727
1da177e4
LT
1728 unregister_netdev(dev);
1729 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1730 pci_set_drvdata(pdev, NULL);
1731}
1732
1da177e4
LT
1733static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1734 struct net_device *dev)
1735{
1736 unsigned int mtu = dev->mtu;
1737
1738 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1739}
1740
1741static int rtl8169_open(struct net_device *dev)
1742{
1743 struct rtl8169_private *tp = netdev_priv(dev);
1744 struct pci_dev *pdev = tp->pci_dev;
99f252b0 1745 int retval = -ENOMEM;
1da177e4 1746
1da177e4 1747
99f252b0 1748 rtl8169_set_rxbufsize(tp, dev);
1da177e4
LT
1749
1750 /*
1751 * Rx and Tx desscriptors needs 256 bytes alignment.
1752 * pci_alloc_consistent provides more.
1753 */
1754 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1755 &tp->TxPhyAddr);
1756 if (!tp->TxDescArray)
99f252b0 1757 goto out;
1da177e4
LT
1758
1759 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1760 &tp->RxPhyAddr);
1761 if (!tp->RxDescArray)
99f252b0 1762 goto err_free_tx_0;
1da177e4
LT
1763
1764 retval = rtl8169_init_ring(dev);
1765 if (retval < 0)
99f252b0 1766 goto err_free_rx_1;
1da177e4 1767
c4028958 1768 INIT_DELAYED_WORK(&tp->task, NULL);
1da177e4 1769
99f252b0
FR
1770 smp_mb();
1771
1772 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED,
1773 dev->name, dev);
1774 if (retval < 0)
1775 goto err_release_ring_2;
1776
1da177e4
LT
1777 rtl8169_hw_start(dev);
1778
1779 rtl8169_request_timer(dev);
1780
1781 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
1782out:
1783 return retval;
1784
99f252b0
FR
1785err_release_ring_2:
1786 rtl8169_rx_clear(tp);
1787err_free_rx_1:
1da177e4
LT
1788 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1789 tp->RxPhyAddr);
99f252b0 1790err_free_tx_0:
1da177e4
LT
1791 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1792 tp->TxPhyAddr);
1da177e4
LT
1793 goto out;
1794}
1795
1796static void rtl8169_hw_reset(void __iomem *ioaddr)
1797{
1798 /* Disable interrupts */
1799 rtl8169_irq_mask_and_ack(ioaddr);
1800
1801 /* Reset the chipset */
1802 RTL_W8(ChipCmd, CmdReset);
1803
1804 /* PCI commit */
1805 RTL_R8(ChipCmd);
1806}
1807
9cb427b6
FR
1808static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
1809{
1810 void __iomem *ioaddr = tp->mmio_addr;
1811 u32 cfg = rtl8169_rx_config;
1812
1813 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
1814 RTL_W32(RxConfig, cfg);
1815
1816 /* Set DMA burst size and Interframe Gap Time */
1817 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
1818 (InterFrameGap << TxInterFrameGapShift));
1819}
1820
1821static void rtl8169_hw_start(struct net_device *dev)
1da177e4
LT
1822{
1823 struct rtl8169_private *tp = netdev_priv(dev);
1824 void __iomem *ioaddr = tp->mmio_addr;
bcf0bf90 1825 struct pci_dev *pdev = tp->pci_dev;
9cb427b6 1826 u16 cmd;
1da177e4
LT
1827 u32 i;
1828
1829 /* Soft reset the chip. */
1830 RTL_W8(ChipCmd, CmdReset);
1831
1832 /* Check that the chip has finished the reset. */
b518fa8e 1833 for (i = 100; i > 0; i--) {
1da177e4
LT
1834 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1835 break;
b518fa8e 1836 msleep_interruptible(1);
1da177e4
LT
1837 }
1838
9cb427b6
FR
1839 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1840 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1841 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1842 }
1843
bcf0bf90
FR
1844 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
1845 pci_write_config_word(pdev, 0x68, 0x00);
1846 pci_write_config_word(pdev, 0x69, 0x08);
1847 }
1848
1849 /* Undocumented stuff. */
1850 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
bcf0bf90
FR
1851 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1852 if ((RTL_R8(Config2) & 0x07) & 0x01)
1853 RTL_W32(0x7c, 0x0007ffff);
1854
1855 RTL_W32(0x7c, 0x0007ff00);
1856
1857 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1858 cmd = cmd & 0xef;
1859 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1da177e4
LT
1860 }
1861
1862 RTL_W8(Cfg9346, Cfg9346_Unlock);
9cb427b6
FR
1863 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1864 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1865 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1866 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1867 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1868
1da177e4
LT
1869 RTL_W8(EarlyTxThres, EarlyTxThld);
1870
126fa4b9
FR
1871 /* Low hurts. Let's disable the filtering. */
1872 RTL_W16(RxMaxSize, 16383);
1da177e4 1873
9cb427b6
FR
1874 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1875 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1876 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1877 (tp->mac_version == RTL_GIGA_MAC_VER_04))
9cb427b6 1878 rtl8169_set_rx_tx_config_registers(tp);
1da177e4 1879
9cb427b6
FR
1880 cmd = RTL_R16(CPlusCmd);
1881 RTL_W16(CPlusCmd, cmd);
1da177e4 1882
9cb427b6 1883 tp->cp_cmd |= cmd | PCIMulRW;
1da177e4 1884
bcf0bf90
FR
1885 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1886 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1da177e4
LT
1887 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
1888 "Bit-3 and bit-14 MUST be 1\n");
bcf0bf90 1889 tp->cp_cmd |= (1 << 14);
1da177e4
LT
1890 }
1891
bcf0bf90
FR
1892 RTL_W16(CPlusCmd, tp->cp_cmd);
1893
1da177e4
LT
1894 /*
1895 * Undocumented corner. Supposedly:
1896 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1897 */
1898 RTL_W16(IntrMitigate, 0x0000);
1899
b39fe41f
FR
1900 /*
1901 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1902 * register to be written before TxDescAddrLow to work.
1903 * Switching from MMIO to I/O access fixes the issue as well.
1904 */
1da177e4 1905 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
b39fe41f 1906 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
1da177e4 1907 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
b39fe41f 1908 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
9cb427b6
FR
1909
1910 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1911 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1912 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1913 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1914 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1915 rtl8169_set_rx_tx_config_registers(tp);
1916 }
1917
1da177e4 1918 RTL_W8(Cfg9346, Cfg9346_Lock);
b518fa8e
FR
1919
1920 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1921 RTL_R8(IntrMask);
1da177e4
LT
1922
1923 RTL_W32(RxMissed, 0);
1924
1925 rtl8169_set_rx_mode(dev);
1926
1927 /* no early-rx interrupts */
1928 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1929
1930 /* Enable all known interrupts by setting the interrupt mask. */
1931 RTL_W16(IntrMask, rtl8169_intr_mask);
1932
1933 netif_start_queue(dev);
1934}
1935
1936static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1937{
1938 struct rtl8169_private *tp = netdev_priv(dev);
1939 int ret = 0;
1940
1941 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
1942 return -EINVAL;
1943
1944 dev->mtu = new_mtu;
1945
1946 if (!netif_running(dev))
1947 goto out;
1948
1949 rtl8169_down(dev);
1950
1951 rtl8169_set_rxbufsize(tp, dev);
1952
1953 ret = rtl8169_init_ring(dev);
1954 if (ret < 0)
1955 goto out;
1956
1957 netif_poll_enable(dev);
1958
1959 rtl8169_hw_start(dev);
1960
1961 rtl8169_request_timer(dev);
1962
1963out:
1964 return ret;
1965}
1966
1967static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
1968{
1969 desc->addr = 0x0badbadbadbadbadull;
1970 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
1971}
1972
1973static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
1974 struct sk_buff **sk_buff, struct RxDesc *desc)
1975{
1976 struct pci_dev *pdev = tp->pci_dev;
1977
1978 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
1979 PCI_DMA_FROMDEVICE);
1980 dev_kfree_skb(*sk_buff);
1981 *sk_buff = NULL;
1982 rtl8169_make_unusable_by_asic(desc);
1983}
1984
1985static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
1986{
1987 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
1988
1989 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
1990}
1991
1992static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
1993 u32 rx_buf_sz)
1994{
1995 desc->addr = cpu_to_le64(mapping);
1996 wmb();
1997 rtl8169_mark_to_asic(desc, rx_buf_sz);
1998}
1999
15d31758
SH
2000static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2001 struct net_device *dev,
2002 struct RxDesc *desc, int rx_buf_sz,
2003 unsigned int align)
1da177e4
LT
2004{
2005 struct sk_buff *skb;
2006 dma_addr_t mapping;
1da177e4 2007
15d31758 2008 skb = netdev_alloc_skb(dev, rx_buf_sz + align);
1da177e4
LT
2009 if (!skb)
2010 goto err_out;
2011
dcb92f88 2012 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
1da177e4 2013
689be439 2014 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1da177e4
LT
2015 PCI_DMA_FROMDEVICE);
2016
2017 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
1da177e4 2018out:
15d31758 2019 return skb;
1da177e4
LT
2020
2021err_out:
1da177e4
LT
2022 rtl8169_make_unusable_by_asic(desc);
2023 goto out;
2024}
2025
2026static void rtl8169_rx_clear(struct rtl8169_private *tp)
2027{
2028 int i;
2029
2030 for (i = 0; i < NUM_RX_DESC; i++) {
2031 if (tp->Rx_skbuff[i]) {
2032 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
2033 tp->RxDescArray + i);
2034 }
2035 }
2036}
2037
2038static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2039 u32 start, u32 end)
2040{
2041 u32 cur;
5b0384f4 2042
1da177e4 2043 for (cur = start; end - cur > 0; cur++) {
15d31758
SH
2044 struct sk_buff *skb;
2045 unsigned int i = cur % NUM_RX_DESC;
1da177e4
LT
2046
2047 if (tp->Rx_skbuff[i])
2048 continue;
bcf0bf90 2049
15d31758
SH
2050 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
2051 tp->RxDescArray + i,
2052 tp->rx_buf_sz, tp->align);
2053 if (!skb)
1da177e4 2054 break;
15d31758
SH
2055
2056 tp->Rx_skbuff[i] = skb;
1da177e4
LT
2057 }
2058 return cur - start;
2059}
2060
2061static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
2062{
2063 desc->opts1 |= cpu_to_le32(RingEnd);
2064}
2065
2066static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2067{
2068 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
2069}
2070
2071static int rtl8169_init_ring(struct net_device *dev)
2072{
2073 struct rtl8169_private *tp = netdev_priv(dev);
2074
2075 rtl8169_init_ring_indexes(tp);
2076
2077 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2078 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2079
2080 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2081 goto err_out;
2082
2083 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2084
2085 return 0;
2086
2087err_out:
2088 rtl8169_rx_clear(tp);
2089 return -ENOMEM;
2090}
2091
2092static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
2093 struct TxDesc *desc)
2094{
2095 unsigned int len = tx_skb->len;
2096
2097 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
2098 desc->opts1 = 0x00;
2099 desc->opts2 = 0x00;
2100 desc->addr = 0x00;
2101 tx_skb->len = 0;
2102}
2103
2104static void rtl8169_tx_clear(struct rtl8169_private *tp)
2105{
2106 unsigned int i;
2107
2108 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
2109 unsigned int entry = i % NUM_TX_DESC;
2110 struct ring_info *tx_skb = tp->tx_skb + entry;
2111 unsigned int len = tx_skb->len;
2112
2113 if (len) {
2114 struct sk_buff *skb = tx_skb->skb;
2115
2116 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
2117 tp->TxDescArray + entry);
2118 if (skb) {
2119 dev_kfree_skb(skb);
2120 tx_skb->skb = NULL;
2121 }
2122 tp->stats.tx_dropped++;
2123 }
2124 }
2125 tp->cur_tx = tp->dirty_tx = 0;
2126}
2127
c4028958 2128static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
1da177e4
LT
2129{
2130 struct rtl8169_private *tp = netdev_priv(dev);
2131
c4028958 2132 PREPARE_DELAYED_WORK(&tp->task, task);
1da177e4
LT
2133 schedule_delayed_work(&tp->task, 4);
2134}
2135
2136static void rtl8169_wait_for_quiescence(struct net_device *dev)
2137{
2138 struct rtl8169_private *tp = netdev_priv(dev);
2139 void __iomem *ioaddr = tp->mmio_addr;
2140
2141 synchronize_irq(dev->irq);
2142
2143 /* Wait for any pending NAPI task to complete */
2144 netif_poll_disable(dev);
2145
2146 rtl8169_irq_mask_and_ack(ioaddr);
2147
2148 netif_poll_enable(dev);
2149}
2150
c4028958 2151static void rtl8169_reinit_task(struct work_struct *work)
1da177e4 2152{
c4028958
DH
2153 struct rtl8169_private *tp =
2154 container_of(work, struct rtl8169_private, task.work);
2155 struct net_device *dev = tp->dev;
1da177e4
LT
2156 int ret;
2157
eb2a021c
FR
2158 rtnl_lock();
2159
2160 if (!netif_running(dev))
2161 goto out_unlock;
2162
2163 rtl8169_wait_for_quiescence(dev);
2164 rtl8169_close(dev);
1da177e4
LT
2165
2166 ret = rtl8169_open(dev);
2167 if (unlikely(ret < 0)) {
2168 if (net_ratelimit()) {
b57b7e5a
SH
2169 struct rtl8169_private *tp = netdev_priv(dev);
2170
2171 if (netif_msg_drv(tp)) {
2172 printk(PFX KERN_ERR
2173 "%s: reinit failure (status = %d)."
2174 " Rescheduling.\n", dev->name, ret);
2175 }
1da177e4
LT
2176 }
2177 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2178 }
eb2a021c
FR
2179
2180out_unlock:
2181 rtnl_unlock();
1da177e4
LT
2182}
2183
c4028958 2184static void rtl8169_reset_task(struct work_struct *work)
1da177e4 2185{
c4028958
DH
2186 struct rtl8169_private *tp =
2187 container_of(work, struct rtl8169_private, task.work);
2188 struct net_device *dev = tp->dev;
1da177e4 2189
eb2a021c
FR
2190 rtnl_lock();
2191
1da177e4 2192 if (!netif_running(dev))
eb2a021c 2193 goto out_unlock;
1da177e4
LT
2194
2195 rtl8169_wait_for_quiescence(dev);
2196
2197 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
2198 rtl8169_tx_clear(tp);
2199
2200 if (tp->dirty_rx == tp->cur_rx) {
2201 rtl8169_init_ring_indexes(tp);
2202 rtl8169_hw_start(dev);
2203 netif_wake_queue(dev);
2204 } else {
2205 if (net_ratelimit()) {
b57b7e5a
SH
2206 struct rtl8169_private *tp = netdev_priv(dev);
2207
2208 if (netif_msg_intr(tp)) {
2209 printk(PFX KERN_EMERG
2210 "%s: Rx buffers shortage\n", dev->name);
2211 }
1da177e4
LT
2212 }
2213 rtl8169_schedule_work(dev, rtl8169_reset_task);
2214 }
eb2a021c
FR
2215
2216out_unlock:
2217 rtnl_unlock();
1da177e4
LT
2218}
2219
2220static void rtl8169_tx_timeout(struct net_device *dev)
2221{
2222 struct rtl8169_private *tp = netdev_priv(dev);
2223
2224 rtl8169_hw_reset(tp->mmio_addr);
2225
2226 /* Let's wait a bit while any (async) irq lands on */
2227 rtl8169_schedule_work(dev, rtl8169_reset_task);
2228}
2229
2230static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2231 u32 opts1)
2232{
2233 struct skb_shared_info *info = skb_shinfo(skb);
2234 unsigned int cur_frag, entry;
2235 struct TxDesc *txd;
2236
2237 entry = tp->cur_tx;
2238 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
2239 skb_frag_t *frag = info->frags + cur_frag;
2240 dma_addr_t mapping;
2241 u32 status, len;
2242 void *addr;
2243
2244 entry = (entry + 1) % NUM_TX_DESC;
2245
2246 txd = tp->TxDescArray + entry;
2247 len = frag->size;
2248 addr = ((void *) page_address(frag->page)) + frag->page_offset;
2249 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
2250
2251 /* anti gcc 2.95.3 bugware (sic) */
2252 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2253
2254 txd->opts1 = cpu_to_le32(status);
2255 txd->addr = cpu_to_le64(mapping);
2256
2257 tp->tx_skb[entry].len = len;
2258 }
2259
2260 if (cur_frag) {
2261 tp->tx_skb[entry].skb = skb;
2262 txd->opts1 |= cpu_to_le32(LastFrag);
2263 }
2264
2265 return cur_frag;
2266}
2267
2268static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2269{
2270 if (dev->features & NETIF_F_TSO) {
7967168c 2271 u32 mss = skb_shinfo(skb)->gso_size;
1da177e4
LT
2272
2273 if (mss)
2274 return LargeSend | ((mss & MSSMask) << MSSShift);
2275 }
84fa7933 2276 if (skb->ip_summed == CHECKSUM_PARTIAL) {
eddc9ec5 2277 const struct iphdr *ip = ip_hdr(skb);
1da177e4
LT
2278
2279 if (ip->protocol == IPPROTO_TCP)
2280 return IPCS | TCPCS;
2281 else if (ip->protocol == IPPROTO_UDP)
2282 return IPCS | UDPCS;
2283 WARN_ON(1); /* we need a WARN() */
2284 }
2285 return 0;
2286}
2287
2288static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2289{
2290 struct rtl8169_private *tp = netdev_priv(dev);
2291 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
2292 struct TxDesc *txd = tp->TxDescArray + entry;
2293 void __iomem *ioaddr = tp->mmio_addr;
2294 dma_addr_t mapping;
2295 u32 status, len;
2296 u32 opts1;
188f4af0 2297 int ret = NETDEV_TX_OK;
5b0384f4 2298
1da177e4 2299 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
b57b7e5a
SH
2300 if (netif_msg_drv(tp)) {
2301 printk(KERN_ERR
2302 "%s: BUG! Tx Ring full when queue awake!\n",
2303 dev->name);
2304 }
1da177e4
LT
2305 goto err_stop;
2306 }
2307
2308 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2309 goto err_stop;
2310
2311 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
2312
2313 frags = rtl8169_xmit_frags(tp, skb, opts1);
2314 if (frags) {
2315 len = skb_headlen(skb);
2316 opts1 |= FirstFrag;
2317 } else {
2318 len = skb->len;
2319
2320 if (unlikely(len < ETH_ZLEN)) {
5b057c6b 2321 if (skb_padto(skb, ETH_ZLEN))
1da177e4
LT
2322 goto err_update_stats;
2323 len = ETH_ZLEN;
2324 }
2325
2326 opts1 |= FirstFrag | LastFrag;
2327 tp->tx_skb[entry].skb = skb;
2328 }
2329
2330 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
2331
2332 tp->tx_skb[entry].len = len;
2333 txd->addr = cpu_to_le64(mapping);
2334 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
2335
2336 wmb();
2337
2338 /* anti gcc 2.95.3 bugware (sic) */
2339 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
2340 txd->opts1 = cpu_to_le32(status);
2341
2342 dev->trans_start = jiffies;
2343
2344 tp->cur_tx += frags + 1;
2345
2346 smp_wmb();
2347
2348 RTL_W8(TxPoll, 0x40); /* set polling bit */
2349
2350 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2351 netif_stop_queue(dev);
2352 smp_rmb();
2353 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
2354 netif_wake_queue(dev);
2355 }
2356
2357out:
2358 return ret;
2359
2360err_stop:
2361 netif_stop_queue(dev);
188f4af0 2362 ret = NETDEV_TX_BUSY;
1da177e4
LT
2363err_update_stats:
2364 tp->stats.tx_dropped++;
2365 goto out;
2366}
2367
2368static void rtl8169_pcierr_interrupt(struct net_device *dev)
2369{
2370 struct rtl8169_private *tp = netdev_priv(dev);
2371 struct pci_dev *pdev = tp->pci_dev;
2372 void __iomem *ioaddr = tp->mmio_addr;
2373 u16 pci_status, pci_cmd;
2374
2375 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2376 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
2377
b57b7e5a
SH
2378 if (netif_msg_intr(tp)) {
2379 printk(KERN_ERR
2380 "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
2381 dev->name, pci_cmd, pci_status);
2382 }
1da177e4
LT
2383
2384 /*
2385 * The recovery sequence below admits a very elaborated explanation:
2386 * - it seems to work;
d03902b8
FR
2387 * - I did not see what else could be done;
2388 * - it makes iop3xx happy.
1da177e4
LT
2389 *
2390 * Feel free to adjust to your needs.
2391 */
a27993f3 2392 if (pdev->broken_parity_status)
d03902b8
FR
2393 pci_cmd &= ~PCI_COMMAND_PARITY;
2394 else
2395 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
2396
2397 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1da177e4
LT
2398
2399 pci_write_config_word(pdev, PCI_STATUS,
2400 pci_status & (PCI_STATUS_DETECTED_PARITY |
2401 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
2402 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
2403
2404 /* The infamous DAC f*ckup only happens at boot time */
2405 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
b57b7e5a
SH
2406 if (netif_msg_intr(tp))
2407 printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
1da177e4
LT
2408 tp->cp_cmd &= ~PCIDAC;
2409 RTL_W16(CPlusCmd, tp->cp_cmd);
2410 dev->features &= ~NETIF_F_HIGHDMA;
1da177e4
LT
2411 }
2412
2413 rtl8169_hw_reset(ioaddr);
d03902b8
FR
2414
2415 rtl8169_schedule_work(dev, rtl8169_reinit_task);
1da177e4
LT
2416}
2417
2418static void
2419rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2420 void __iomem *ioaddr)
2421{
2422 unsigned int dirty_tx, tx_left;
2423
2424 assert(dev != NULL);
2425 assert(tp != NULL);
2426 assert(ioaddr != NULL);
2427
2428 dirty_tx = tp->dirty_tx;
2429 smp_rmb();
2430 tx_left = tp->cur_tx - dirty_tx;
2431
2432 while (tx_left > 0) {
2433 unsigned int entry = dirty_tx % NUM_TX_DESC;
2434 struct ring_info *tx_skb = tp->tx_skb + entry;
2435 u32 len = tx_skb->len;
2436 u32 status;
2437
2438 rmb();
2439 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
2440 if (status & DescOwn)
2441 break;
2442
2443 tp->stats.tx_bytes += len;
2444 tp->stats.tx_packets++;
2445
2446 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2447
2448 if (status & LastFrag) {
2449 dev_kfree_skb_irq(tx_skb->skb);
2450 tx_skb->skb = NULL;
2451 }
2452 dirty_tx++;
2453 tx_left--;
2454 }
2455
2456 if (tp->dirty_tx != dirty_tx) {
2457 tp->dirty_tx = dirty_tx;
2458 smp_wmb();
2459 if (netif_queue_stopped(dev) &&
2460 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2461 netif_wake_queue(dev);
2462 }
2463 }
2464}
2465
126fa4b9
FR
2466static inline int rtl8169_fragmented_frame(u32 status)
2467{
2468 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
2469}
2470
1da177e4
LT
2471static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2472{
2473 u32 opts1 = le32_to_cpu(desc->opts1);
2474 u32 status = opts1 & RxProtoMask;
2475
2476 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
2477 ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
2478 ((status == RxProtoIP) && !(opts1 & IPFail)))
2479 skb->ip_summed = CHECKSUM_UNNECESSARY;
2480 else
2481 skb->ip_summed = CHECKSUM_NONE;
2482}
2483
2484static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
bcf0bf90
FR
2485 struct RxDesc *desc, int rx_buf_sz,
2486 unsigned int align)
1da177e4
LT
2487{
2488 int ret = -1;
2489
2490 if (pkt_size < rx_copybreak) {
2491 struct sk_buff *skb;
2492
bcf0bf90 2493 skb = dev_alloc_skb(pkt_size + align);
1da177e4 2494 if (skb) {
dcb92f88 2495 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
689be439 2496 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
1da177e4
LT
2497 *sk_buff = skb;
2498 rtl8169_mark_to_asic(desc, rx_buf_sz);
2499 ret = 0;
2500 }
2501 }
2502 return ret;
2503}
2504
2505static int
2506rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2507 void __iomem *ioaddr)
2508{
2509 unsigned int cur_rx, rx_left;
2510 unsigned int delta, count;
2511
2512 assert(dev != NULL);
2513 assert(tp != NULL);
2514 assert(ioaddr != NULL);
2515
2516 cur_rx = tp->cur_rx;
2517 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2518 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
2519
4dcb7d33 2520 for (; rx_left > 0; rx_left--, cur_rx++) {
1da177e4 2521 unsigned int entry = cur_rx % NUM_RX_DESC;
126fa4b9 2522 struct RxDesc *desc = tp->RxDescArray + entry;
1da177e4
LT
2523 u32 status;
2524
2525 rmb();
126fa4b9 2526 status = le32_to_cpu(desc->opts1);
1da177e4
LT
2527
2528 if (status & DescOwn)
2529 break;
4dcb7d33 2530 if (unlikely(status & RxRES)) {
b57b7e5a
SH
2531 if (netif_msg_rx_err(tp)) {
2532 printk(KERN_INFO
2533 "%s: Rx ERROR. status = %08x\n",
2534 dev->name, status);
2535 }
1da177e4
LT
2536 tp->stats.rx_errors++;
2537 if (status & (RxRWT | RxRUNT))
2538 tp->stats.rx_length_errors++;
2539 if (status & RxCRC)
2540 tp->stats.rx_crc_errors++;
9dccf611
FR
2541 if (status & RxFOVF) {
2542 rtl8169_schedule_work(dev, rtl8169_reset_task);
2543 tp->stats.rx_fifo_errors++;
2544 }
126fa4b9 2545 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
1da177e4 2546 } else {
1da177e4
LT
2547 struct sk_buff *skb = tp->Rx_skbuff[entry];
2548 int pkt_size = (status & 0x00001FFF) - 4;
2549 void (*pci_action)(struct pci_dev *, dma_addr_t,
2550 size_t, int) = pci_dma_sync_single_for_device;
2551
126fa4b9
FR
2552 /*
2553 * The driver does not support incoming fragmented
2554 * frames. They are seen as a symptom of over-mtu
2555 * sized frames.
2556 */
2557 if (unlikely(rtl8169_fragmented_frame(status))) {
2558 tp->stats.rx_dropped++;
2559 tp->stats.rx_length_errors++;
2560 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4dcb7d33 2561 continue;
126fa4b9
FR
2562 }
2563
1da177e4 2564 rtl8169_rx_csum(skb, desc);
bcf0bf90 2565
1da177e4
LT
2566 pci_dma_sync_single_for_cpu(tp->pci_dev,
2567 le64_to_cpu(desc->addr), tp->rx_buf_sz,
2568 PCI_DMA_FROMDEVICE);
2569
2570 if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
bcf0bf90 2571 tp->rx_buf_sz, tp->align)) {
1da177e4
LT
2572 pci_action = pci_unmap_single;
2573 tp->Rx_skbuff[entry] = NULL;
2574 }
2575
2576 pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
2577 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2578
1da177e4
LT
2579 skb_put(skb, pkt_size);
2580 skb->protocol = eth_type_trans(skb, dev);
2581
2582 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
2583 rtl8169_rx_skb(skb);
2584
2585 dev->last_rx = jiffies;
2586 tp->stats.rx_bytes += pkt_size;
2587 tp->stats.rx_packets++;
2588 }
1da177e4
LT
2589 }
2590
2591 count = cur_rx - tp->cur_rx;
2592 tp->cur_rx = cur_rx;
2593
2594 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
b57b7e5a 2595 if (!delta && count && netif_msg_intr(tp))
1da177e4
LT
2596 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
2597 tp->dirty_rx += delta;
2598
2599 /*
2600 * FIXME: until there is periodic timer to try and refill the ring,
2601 * a temporary shortage may definitely kill the Rx process.
2602 * - disable the asic to try and avoid an overflow and kick it again
2603 * after refill ?
2604 * - how do others driver handle this condition (Uh oh...).
2605 */
b57b7e5a 2606 if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
1da177e4
LT
2607 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
2608
2609 return count;
2610}
2611
2612/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
2613static irqreturn_t
7d12e780 2614rtl8169_interrupt(int irq, void *dev_instance)
1da177e4
LT
2615{
2616 struct net_device *dev = (struct net_device *) dev_instance;
2617 struct rtl8169_private *tp = netdev_priv(dev);
2618 int boguscnt = max_interrupt_work;
2619 void __iomem *ioaddr = tp->mmio_addr;
2620 int status;
2621 int handled = 0;
2622
2623 do {
2624 status = RTL_R16(IntrStatus);
2625
2626 /* hotplug/major error/no more work/shared irq */
2627 if ((status == 0xFFFF) || !status)
2628 break;
2629
2630 handled = 1;
2631
2632 if (unlikely(!netif_running(dev))) {
2633 rtl8169_asic_down(ioaddr);
2634 goto out;
2635 }
2636
2637 status &= tp->intr_mask;
2638 RTL_W16(IntrStatus,
2639 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2640
2641 if (!(status & rtl8169_intr_mask))
2642 break;
2643
2644 if (unlikely(status & SYSErr)) {
2645 rtl8169_pcierr_interrupt(dev);
2646 break;
2647 }
2648
2649 if (status & LinkChg)
2650 rtl8169_check_link_status(dev, tp, ioaddr);
2651
2652#ifdef CONFIG_R8169_NAPI
2653 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
2654 tp->intr_mask = ~rtl8169_napi_event;
2655
2656 if (likely(netif_rx_schedule_prep(dev)))
2657 __netif_rx_schedule(dev);
b57b7e5a 2658 else if (netif_msg_intr(tp)) {
1da177e4 2659 printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
5b0384f4 2660 dev->name, status);
1da177e4
LT
2661 }
2662 break;
2663#else
2664 /* Rx interrupt */
2665 if (status & (RxOK | RxOverflow | RxFIFOOver)) {
2666 rtl8169_rx_interrupt(dev, tp, ioaddr);
2667 }
2668 /* Tx interrupt */
2669 if (status & (TxOK | TxErr))
2670 rtl8169_tx_interrupt(dev, tp, ioaddr);
2671#endif
2672
2673 boguscnt--;
2674 } while (boguscnt > 0);
2675
2676 if (boguscnt <= 0) {
7c8b2eb4 2677 if (netif_msg_intr(tp) && net_ratelimit() ) {
b57b7e5a
SH
2678 printk(KERN_WARNING
2679 "%s: Too much work at interrupt!\n", dev->name);
2680 }
1da177e4
LT
2681 /* Clear all interrupt sources. */
2682 RTL_W16(IntrStatus, 0xffff);
2683 }
2684out:
2685 return IRQ_RETVAL(handled);
2686}
2687
2688#ifdef CONFIG_R8169_NAPI
2689static int rtl8169_poll(struct net_device *dev, int *budget)
2690{
2691 unsigned int work_done, work_to_do = min(*budget, dev->quota);
2692 struct rtl8169_private *tp = netdev_priv(dev);
2693 void __iomem *ioaddr = tp->mmio_addr;
2694
2695 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
2696 rtl8169_tx_interrupt(dev, tp, ioaddr);
2697
2698 *budget -= work_done;
2699 dev->quota -= work_done;
2700
2701 if (work_done < work_to_do) {
2702 netif_rx_complete(dev);
2703 tp->intr_mask = 0xffff;
2704 /*
2705 * 20040426: the barrier is not strictly required but the
2706 * behavior of the irq handler could be less predictable
2707 * without it. Btw, the lack of flush for the posted pci
2708 * write is safe - FR
2709 */
2710 smp_wmb();
2711 RTL_W16(IntrMask, rtl8169_intr_mask);
2712 }
2713
2714 return (work_done >= work_to_do);
2715}
2716#endif
2717
2718static void rtl8169_down(struct net_device *dev)
2719{
2720 struct rtl8169_private *tp = netdev_priv(dev);
2721 void __iomem *ioaddr = tp->mmio_addr;
2722 unsigned int poll_locked = 0;
733b736c 2723 unsigned int intrmask;
1da177e4
LT
2724
2725 rtl8169_delete_timer(dev);
2726
2727 netif_stop_queue(dev);
2728
1da177e4
LT
2729core_down:
2730 spin_lock_irq(&tp->lock);
2731
2732 rtl8169_asic_down(ioaddr);
2733
2734 /* Update the error counts. */
2735 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2736 RTL_W32(RxMissed, 0);
2737
2738 spin_unlock_irq(&tp->lock);
2739
2740 synchronize_irq(dev->irq);
2741
2742 if (!poll_locked) {
2743 netif_poll_disable(dev);
2744 poll_locked++;
2745 }
2746
2747 /* Give a racing hard_start_xmit a few cycles to complete. */
fbd568a3 2748 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
1da177e4
LT
2749
2750 /*
2751 * And now for the 50k$ question: are IRQ disabled or not ?
2752 *
2753 * Two paths lead here:
2754 * 1) dev->close
2755 * -> netif_running() is available to sync the current code and the
2756 * IRQ handler. See rtl8169_interrupt for details.
2757 * 2) dev->change_mtu
2758 * -> rtl8169_poll can not be issued again and re-enable the
2759 * interruptions. Let's simply issue the IRQ down sequence again.
733b736c
AP
2760 *
2761 * No loop if hotpluged or major error (0xffff).
1da177e4 2762 */
733b736c
AP
2763 intrmask = RTL_R16(IntrMask);
2764 if (intrmask && (intrmask != 0xffff))
1da177e4
LT
2765 goto core_down;
2766
2767 rtl8169_tx_clear(tp);
2768
2769 rtl8169_rx_clear(tp);
2770}
2771
2772static int rtl8169_close(struct net_device *dev)
2773{
2774 struct rtl8169_private *tp = netdev_priv(dev);
2775 struct pci_dev *pdev = tp->pci_dev;
2776
2777 rtl8169_down(dev);
2778
2779 free_irq(dev->irq, dev);
2780
2781 netif_poll_enable(dev);
2782
2783 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
2784 tp->RxPhyAddr);
2785 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
2786 tp->TxPhyAddr);
2787 tp->TxDescArray = NULL;
2788 tp->RxDescArray = NULL;
2789
2790 return 0;
2791}
2792
2793static void
2794rtl8169_set_rx_mode(struct net_device *dev)
2795{
2796 struct rtl8169_private *tp = netdev_priv(dev);
2797 void __iomem *ioaddr = tp->mmio_addr;
2798 unsigned long flags;
2799 u32 mc_filter[2]; /* Multicast hash filter */
2800 int i, rx_mode;
2801 u32 tmp = 0;
2802
2803 if (dev->flags & IFF_PROMISC) {
2804 /* Unconditionally log net taps. */
b57b7e5a
SH
2805 if (netif_msg_link(tp)) {
2806 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2807 dev->name);
2808 }
1da177e4
LT
2809 rx_mode =
2810 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2811 AcceptAllPhys;
2812 mc_filter[1] = mc_filter[0] = 0xffffffff;
2813 } else if ((dev->mc_count > multicast_filter_limit)
2814 || (dev->flags & IFF_ALLMULTI)) {
2815 /* Too many to filter perfectly -- accept all multicasts. */
2816 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2817 mc_filter[1] = mc_filter[0] = 0xffffffff;
2818 } else {
2819 struct dev_mc_list *mclist;
2820 rx_mode = AcceptBroadcast | AcceptMyPhys;
2821 mc_filter[1] = mc_filter[0] = 0;
2822 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2823 i++, mclist = mclist->next) {
2824 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
2825 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2826 rx_mode |= AcceptMulticast;
2827 }
2828 }
2829
2830 spin_lock_irqsave(&tp->lock, flags);
2831
2832 tmp = rtl8169_rx_config | rx_mode |
2833 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2834
bcf0bf90
FR
2835 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
2836 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2837 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2838 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2839 (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
2840 mc_filter[0] = 0xffffffff;
2841 mc_filter[1] = 0xffffffff;
2842 }
2843
1da177e4
LT
2844 RTL_W32(RxConfig, tmp);
2845 RTL_W32(MAR0 + 0, mc_filter[0]);
2846 RTL_W32(MAR0 + 4, mc_filter[1]);
2847
2848 spin_unlock_irqrestore(&tp->lock, flags);
2849}
2850
2851/**
2852 * rtl8169_get_stats - Get rtl8169 read/write statistics
2853 * @dev: The Ethernet Device to get statistics for
2854 *
2855 * Get TX/RX statistics for rtl8169
2856 */
2857static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2858{
2859 struct rtl8169_private *tp = netdev_priv(dev);
2860 void __iomem *ioaddr = tp->mmio_addr;
2861 unsigned long flags;
2862
2863 if (netif_running(dev)) {
2864 spin_lock_irqsave(&tp->lock, flags);
2865 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2866 RTL_W32(RxMissed, 0);
2867 spin_unlock_irqrestore(&tp->lock, flags);
2868 }
5b0384f4 2869
1da177e4
LT
2870 return &tp->stats;
2871}
2872
5d06a99f
FR
2873#ifdef CONFIG_PM
2874
2875static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2876{
2877 struct net_device *dev = pci_get_drvdata(pdev);
2878 struct rtl8169_private *tp = netdev_priv(dev);
2879 void __iomem *ioaddr = tp->mmio_addr;
2880
2881 if (!netif_running(dev))
1371fa6d 2882 goto out_pci_suspend;
5d06a99f
FR
2883
2884 netif_device_detach(dev);
2885 netif_stop_queue(dev);
2886
2887 spin_lock_irq(&tp->lock);
2888
2889 rtl8169_asic_down(ioaddr);
2890
2891 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2892 RTL_W32(RxMissed, 0);
2893
2894 spin_unlock_irq(&tp->lock);
2895
1371fa6d 2896out_pci_suspend:
5d06a99f 2897 pci_save_state(pdev);
61a4dcc2 2898 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
5d06a99f 2899 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1371fa6d 2900
5d06a99f
FR
2901 return 0;
2902}
2903
2904static int rtl8169_resume(struct pci_dev *pdev)
2905{
2906 struct net_device *dev = pci_get_drvdata(pdev);
2907
1371fa6d
FR
2908 pci_set_power_state(pdev, PCI_D0);
2909 pci_restore_state(pdev);
2910 pci_enable_wake(pdev, PCI_D0, 0);
2911
5d06a99f
FR
2912 if (!netif_running(dev))
2913 goto out;
2914
2915 netif_device_attach(dev);
2916
5d06a99f
FR
2917 rtl8169_schedule_work(dev, rtl8169_reset_task);
2918out:
2919 return 0;
2920}
2921
2922#endif /* CONFIG_PM */
2923
1da177e4
LT
2924static struct pci_driver rtl8169_pci_driver = {
2925 .name = MODULENAME,
2926 .id_table = rtl8169_pci_tbl,
2927 .probe = rtl8169_init_one,
2928 .remove = __devexit_p(rtl8169_remove_one),
2929#ifdef CONFIG_PM
2930 .suspend = rtl8169_suspend,
2931 .resume = rtl8169_resume,
2932#endif
2933};
2934
2935static int __init
2936rtl8169_init_module(void)
2937{
29917620 2938 return pci_register_driver(&rtl8169_pci_driver);
1da177e4
LT
2939}
2940
2941static void __exit
2942rtl8169_cleanup_module(void)
2943{
2944 pci_unregister_driver(&rtl8169_pci_driver);
2945}
2946
2947module_init(rtl8169_init_module);
2948module_exit(rtl8169_cleanup_module);