]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ |
2 | /* | |
3 | Written 1998-2001 by Donald Becker. | |
4 | ||
5 | Current Maintainer: Roger Luethi <rl@hellgate.ch> | |
6 | ||
7 | This software may be used and distributed according to the terms of | |
8 | the GNU General Public License (GPL), incorporated herein by reference. | |
9 | Drivers based on or derived from this code fall under the GPL and must | |
10 | retain the authorship, copyright and license notice. This file is not | |
11 | a complete program and may only be used when the entire operating | |
12 | system is licensed under the GPL. | |
13 | ||
14 | This driver is designed for the VIA VT86C100A Rhine-I. | |
15 | It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM | |
16 | and management NIC 6105M). | |
17 | ||
18 | The author may be reached as becker@scyld.com, or C/O | |
19 | Scyld Computing Corporation | |
20 | 410 Severn Ave., Suite 210 | |
21 | Annapolis MD 21403 | |
22 | ||
23 | ||
24 | This driver contains some changes from the original Donald Becker | |
25 | version. He may or may not be interested in bug reports on this | |
26 | code. You can find his versions at: | |
27 | http://www.scyld.com/network/via-rhine.html | |
03a8c661 | 28 | [link no longer provides useful info -jgarzik] |
1da177e4 LT |
29 | |
30 | */ | |
31 | ||
df4511fe JP |
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | ||
1da177e4 | 34 | #define DRV_NAME "via-rhine" |
207070f5 | 35 | #define DRV_VERSION "1.5.1" |
38f49e88 | 36 | #define DRV_RELDATE "2010-10-09" |
1da177e4 | 37 | |
eb939922 | 38 | #include <linux/types.h> |
1da177e4 LT |
39 | |
40 | /* A few user-configurable values. | |
41 | These may be modified when a driver module is loaded. */ | |
fc3e0f8a FR |
42 | static int debug = 0; |
43 | #define RHINE_MSG_DEFAULT \ | |
44 | (0x0000) | |
1da177e4 LT |
45 | |
46 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | |
47 | Setting to > 1518 effectively disables this feature. */ | |
8e95a202 JP |
48 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ |
49 | defined(CONFIG_SPARC) || defined(__ia64__) || \ | |
50 | defined(__sh__) || defined(__mips__) | |
b47157f0 DM |
51 | static int rx_copybreak = 1518; |
52 | #else | |
1da177e4 | 53 | static int rx_copybreak; |
b47157f0 | 54 | #endif |
1da177e4 | 55 | |
b933b4d9 RL |
56 | /* Work-around for broken BIOSes: they are unable to get the chip back out of |
57 | power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ | |
eb939922 | 58 | static bool avoid_D3; |
b933b4d9 | 59 | |
1da177e4 LT |
60 | /* |
61 | * In case you are looking for 'options[]' or 'full_duplex[]', they | |
62 | * are gone. Use ethtool(8) instead. | |
63 | */ | |
64 | ||
65 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | |
66 | The Rhine has a 64 element 8390-like hash table. */ | |
67 | static const int multicast_filter_limit = 32; | |
68 | ||
69 | ||
70 | /* Operational parameters that are set at compile time. */ | |
71 | ||
72 | /* Keep the ring sizes a power of two for compile efficiency. | |
73 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | |
74 | Making the Tx ring too large decreases the effectiveness of channel | |
75 | bonding and packet priority. | |
76 | There are no ill effects from too-large receive rings. */ | |
77 | #define TX_RING_SIZE 16 | |
78 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | |
633949a1 | 79 | #define RX_RING_SIZE 64 |
1da177e4 LT |
80 | |
81 | /* Operational parameters that usually are not changed. */ | |
82 | ||
83 | /* Time in jiffies before concluding the transmitter is hung. */ | |
84 | #define TX_TIMEOUT (2*HZ) | |
85 | ||
86 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | |
87 | ||
88 | #include <linux/module.h> | |
89 | #include <linux/moduleparam.h> | |
90 | #include <linux/kernel.h> | |
91 | #include <linux/string.h> | |
92 | #include <linux/timer.h> | |
93 | #include <linux/errno.h> | |
94 | #include <linux/ioport.h> | |
1da177e4 LT |
95 | #include <linux/interrupt.h> |
96 | #include <linux/pci.h> | |
1e7f0bd8 | 97 | #include <linux/dma-mapping.h> |
1da177e4 LT |
98 | #include <linux/netdevice.h> |
99 | #include <linux/etherdevice.h> | |
100 | #include <linux/skbuff.h> | |
101 | #include <linux/init.h> | |
102 | #include <linux/delay.h> | |
103 | #include <linux/mii.h> | |
104 | #include <linux/ethtool.h> | |
105 | #include <linux/crc32.h> | |
38f49e88 | 106 | #include <linux/if_vlan.h> |
1da177e4 | 107 | #include <linux/bitops.h> |
c0d7a021 | 108 | #include <linux/workqueue.h> |
1da177e4 LT |
109 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
110 | #include <asm/io.h> | |
111 | #include <asm/irq.h> | |
112 | #include <asm/uaccess.h> | |
e84df485 | 113 | #include <linux/dmi.h> |
1da177e4 LT |
114 | |
115 | /* These identify the driver base version and may not be removed. */ | |
76e239e1 | 116 | static const char version[] = |
df4511fe | 117 | "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; |
1da177e4 LT |
118 | |
119 | /* This driver was written to use PCI memory space. Some early versions | |
120 | of the Rhine may only work correctly with I/O space accesses. */ | |
121 | #ifdef CONFIG_VIA_RHINE_MMIO | |
122 | #define USE_MMIO | |
123 | #else | |
124 | #endif | |
125 | ||
126 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |
127 | MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); | |
128 | MODULE_LICENSE("GPL"); | |
129 | ||
1da177e4 LT |
130 | module_param(debug, int, 0); |
131 | module_param(rx_copybreak, int, 0); | |
b933b4d9 | 132 | module_param(avoid_D3, bool, 0); |
fc3e0f8a | 133 | MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); |
1da177e4 | 134 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); |
b933b4d9 | 135 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); |
1da177e4 | 136 | |
38f49e88 RL |
137 | #define MCAM_SIZE 32 |
138 | #define VCAM_SIZE 32 | |
139 | ||
1da177e4 LT |
140 | /* |
141 | Theory of Operation | |
142 | ||
143 | I. Board Compatibility | |
144 | ||
145 | This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet | |
146 | controller. | |
147 | ||
148 | II. Board-specific settings | |
149 | ||
150 | Boards with this chip are functional only in a bus-master PCI slot. | |
151 | ||
152 | Many operational settings are loaded from the EEPROM to the Config word at | |
153 | offset 0x78. For most of these settings, this driver assumes that they are | |
154 | correct. | |
155 | If this driver is compiled to use PCI memory space operations the EEPROM | |
156 | must be configured to enable memory ops. | |
157 | ||
158 | III. Driver operation | |
159 | ||
160 | IIIa. Ring buffers | |
161 | ||
162 | This driver uses two statically allocated fixed-size descriptor lists | |
163 | formed into rings by a branch from the final descriptor to the beginning of | |
164 | the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. | |
165 | ||
166 | IIIb/c. Transmit/Receive Structure | |
167 | ||
168 | This driver attempts to use a zero-copy receive and transmit scheme. | |
169 | ||
170 | Alas, all data buffers are required to start on a 32 bit boundary, so | |
171 | the driver must often copy transmit packets into bounce buffers. | |
172 | ||
173 | The driver allocates full frame size skbuffs for the Rx ring buffers at | |
174 | open() time and passes the skb->data field to the chip as receive data | |
175 | buffers. When an incoming frame is less than RX_COPYBREAK bytes long, | |
176 | a fresh skbuff is allocated and the frame is copied to the new skbuff. | |
177 | When the incoming frame is larger, the skbuff is passed directly up the | |
178 | protocol stack. Buffers consumed this way are replaced by newly allocated | |
179 | skbuffs in the last phase of rhine_rx(). | |
180 | ||
181 | The RX_COPYBREAK value is chosen to trade-off the memory wasted by | |
182 | using a full-sized skbuff for small frames vs. the copying costs of larger | |
183 | frames. New boards are typically used in generously configured machines | |
184 | and the underfilled buffers have negligible impact compared to the benefit of | |
185 | a single allocation size, so the default value of zero results in never | |
186 | copying packets. When copying is done, the cost is usually mitigated by using | |
187 | a combined copy/checksum routine. Copying also preloads the cache, which is | |
188 | most useful with small frames. | |
189 | ||
190 | Since the VIA chips are only able to transfer data to buffers on 32 bit | |
191 | boundaries, the IP header at offset 14 in an ethernet frame isn't | |
192 | longword aligned for further processing. Copying these unaligned buffers | |
193 | has the beneficial effect of 16-byte aligning the IP header. | |
194 | ||
195 | IIId. Synchronization | |
196 | ||
197 | The driver runs as two independent, single-threaded flows of control. One | |
198 | is the send-packet routine, which enforces single-threaded use by the | |
b74ca3a8 WC |
199 | netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, |
200 | which is single threaded by the hardware and interrupt handling software. | |
1da177e4 LT |
201 | |
202 | The send packet thread has partial control over the Tx ring. It locks the | |
b74ca3a8 WC |
203 | netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in |
204 | the ring is not available it stops the transmit queue by | |
205 | calling netif_stop_queue. | |
1da177e4 LT |
206 | |
207 | The interrupt handler has exclusive control over the Rx ring and records stats | |
208 | from the Tx ring. After reaping the stats, it marks the Tx queue entry as | |
209 | empty by incrementing the dirty_tx mark. If at least half of the entries in | |
210 | the Rx ring are available the transmit queue is woken up if it was stopped. | |
211 | ||
212 | IV. Notes | |
213 | ||
214 | IVb. References | |
215 | ||
216 | Preliminary VT86C100A manual from http://www.via.com.tw/ | |
217 | http://www.scyld.com/expert/100mbps.html | |
218 | http://www.scyld.com/expert/NWay.html | |
219 | ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf | |
220 | ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF | |
221 | ||
222 | ||
223 | IVc. Errata | |
224 | ||
225 | The VT86C100A manual is not reliable information. | |
226 | The 3043 chip does not handle unaligned transmit or receive buffers, resulting | |
227 | in significant performance degradation for bounce buffer copies on transmit | |
228 | and unaligned IP headers on receive. | |
229 | The chip does not pad to minimum transmit length. | |
230 | ||
231 | */ | |
232 | ||
233 | ||
234 | /* This table drives the PCI probe routines. It's mostly boilerplate in all | |
235 | of the drivers, and will likely be provided by some future kernel. | |
236 | Note the matching code -- the first table entry matchs all 56** cards but | |
237 | second only the 1234 card. | |
238 | */ | |
239 | ||
240 | enum rhine_revs { | |
241 | VT86C100A = 0x00, | |
242 | VTunknown0 = 0x20, | |
243 | VT6102 = 0x40, | |
244 | VT8231 = 0x50, /* Integrated MAC */ | |
245 | VT8233 = 0x60, /* Integrated MAC */ | |
246 | VT8235 = 0x74, /* Integrated MAC */ | |
247 | VT8237 = 0x78, /* Integrated MAC */ | |
248 | VTunknown1 = 0x7C, | |
249 | VT6105 = 0x80, | |
250 | VT6105_B0 = 0x83, | |
251 | VT6105L = 0x8A, | |
252 | VT6107 = 0x8C, | |
253 | VTunknown2 = 0x8E, | |
254 | VT6105M = 0x90, /* Management adapter */ | |
255 | }; | |
256 | ||
257 | enum rhine_quirks { | |
258 | rqWOL = 0x0001, /* Wake-On-LAN support */ | |
259 | rqForceReset = 0x0002, | |
260 | rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ | |
261 | rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ | |
262 | rqRhineI = 0x0100, /* See comment below */ | |
263 | }; | |
264 | /* | |
265 | * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable | |
266 | * MMIO as well as for the collision counter and the Tx FIFO underflow | |
267 | * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. | |
268 | */ | |
269 | ||
270 | /* Beware of PCI posted writes */ | |
271 | #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) | |
272 | ||
a3aa1884 | 273 | static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = { |
46009c8b JG |
274 | { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ |
275 | { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ | |
276 | { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ | |
277 | { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ | |
1da177e4 LT |
278 | { } /* terminate list */ |
279 | }; | |
280 | MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); | |
281 | ||
282 | ||
283 | /* Offsets to the device registers. */ | |
284 | enum register_offsets { | |
285 | StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, | |
38f49e88 | 286 | ChipCmd1=0x09, TQWake=0x0A, |
1da177e4 LT |
287 | IntrStatus=0x0C, IntrEnable=0x0E, |
288 | MulticastFilter0=0x10, MulticastFilter1=0x14, | |
289 | RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, | |
38f49e88 | 290 | MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, |
1da177e4 LT |
291 | MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, |
292 | ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, | |
293 | RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, | |
294 | StickyHW=0x83, IntrStatus2=0x84, | |
38f49e88 | 295 | CamMask=0x88, CamCon=0x92, CamAddr=0x93, |
1da177e4 LT |
296 | WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, |
297 | WOLcrClr1=0xA6, WOLcgClr=0xA7, | |
298 | PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, | |
299 | }; | |
300 | ||
301 | /* Bits in ConfigD */ | |
302 | enum backoff_bits { | |
303 | BackOptional=0x01, BackModify=0x02, | |
304 | BackCaptureEffect=0x04, BackRandom=0x08 | |
305 | }; | |
306 | ||
38f49e88 RL |
307 | /* Bits in the TxConfig (TCR) register */ |
308 | enum tcr_bits { | |
309 | TCR_PQEN=0x01, | |
310 | TCR_LB0=0x02, /* loopback[0] */ | |
311 | TCR_LB1=0x04, /* loopback[1] */ | |
312 | TCR_OFSET=0x08, | |
313 | TCR_RTGOPT=0x10, | |
314 | TCR_RTFT0=0x20, | |
315 | TCR_RTFT1=0x40, | |
316 | TCR_RTSF=0x80, | |
317 | }; | |
318 | ||
319 | /* Bits in the CamCon (CAMC) register */ | |
320 | enum camcon_bits { | |
321 | CAMC_CAMEN=0x01, | |
322 | CAMC_VCAMSL=0x02, | |
323 | CAMC_CAMWR=0x04, | |
324 | CAMC_CAMRD=0x08, | |
325 | }; | |
326 | ||
327 | /* Bits in the PCIBusConfig1 (BCR1) register */ | |
328 | enum bcr1_bits { | |
329 | BCR1_POT0=0x01, | |
330 | BCR1_POT1=0x02, | |
331 | BCR1_POT2=0x04, | |
332 | BCR1_CTFT0=0x08, | |
333 | BCR1_CTFT1=0x10, | |
334 | BCR1_CTSF=0x20, | |
335 | BCR1_TXQNOBK=0x40, /* for VT6105 */ | |
336 | BCR1_VIDFR=0x80, /* for VT6105 */ | |
337 | BCR1_MED0=0x40, /* for VT6102 */ | |
338 | BCR1_MED1=0x80, /* for VT6102 */ | |
339 | }; | |
340 | ||
1da177e4 LT |
341 | #ifdef USE_MMIO |
342 | /* Registers we check that mmio and reg are the same. */ | |
343 | static const int mmio_verify_registers[] = { | |
344 | RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, | |
345 | 0 | |
346 | }; | |
347 | #endif | |
348 | ||
349 | /* Bits in the interrupt status/mask registers. */ | |
350 | enum intr_status_bits { | |
7ab87ff4 FR |
351 | IntrRxDone = 0x0001, |
352 | IntrTxDone = 0x0002, | |
353 | IntrRxErr = 0x0004, | |
354 | IntrTxError = 0x0008, | |
355 | IntrRxEmpty = 0x0020, | |
356 | IntrPCIErr = 0x0040, | |
357 | IntrStatsMax = 0x0080, | |
358 | IntrRxEarly = 0x0100, | |
359 | IntrTxUnderrun = 0x0210, | |
360 | IntrRxOverflow = 0x0400, | |
361 | IntrRxDropped = 0x0800, | |
362 | IntrRxNoBuf = 0x1000, | |
363 | IntrTxAborted = 0x2000, | |
364 | IntrLinkChange = 0x4000, | |
365 | IntrRxWakeUp = 0x8000, | |
366 | IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ | |
367 | IntrNormalSummary = IntrRxDone | IntrTxDone, | |
368 | IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | | |
369 | IntrTxUnderrun, | |
1da177e4 LT |
370 | }; |
371 | ||
372 | /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ | |
373 | enum wol_bits { | |
374 | WOLucast = 0x10, | |
375 | WOLmagic = 0x20, | |
376 | WOLbmcast = 0x30, | |
377 | WOLlnkon = 0x40, | |
378 | WOLlnkoff = 0x80, | |
379 | }; | |
380 | ||
381 | /* The Rx and Tx buffer descriptors. */ | |
382 | struct rx_desc { | |
53c03f5c AV |
383 | __le32 rx_status; |
384 | __le32 desc_length; /* Chain flag, Buffer/frame length */ | |
385 | __le32 addr; | |
386 | __le32 next_desc; | |
1da177e4 LT |
387 | }; |
388 | struct tx_desc { | |
53c03f5c AV |
389 | __le32 tx_status; |
390 | __le32 desc_length; /* Chain flag, Tx Config, Frame length */ | |
391 | __le32 addr; | |
392 | __le32 next_desc; | |
1da177e4 LT |
393 | }; |
394 | ||
395 | /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ | |
396 | #define TXDESC 0x00e08000 | |
397 | ||
398 | enum rx_status_bits { | |
399 | RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F | |
400 | }; | |
401 | ||
402 | /* Bits in *_desc.*_status */ | |
403 | enum desc_status_bits { | |
404 | DescOwn=0x80000000 | |
405 | }; | |
406 | ||
38f49e88 RL |
407 | /* Bits in *_desc.*_length */ |
408 | enum desc_length_bits { | |
409 | DescTag=0x00010000 | |
410 | }; | |
411 | ||
1da177e4 LT |
412 | /* Bits in ChipCmd. */ |
413 | enum chip_cmd_bits { | |
414 | CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, | |
415 | CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, | |
416 | Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, | |
417 | Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, | |
418 | }; | |
419 | ||
f7b5d1b9 JG |
420 | struct rhine_stats { |
421 | u64 packets; | |
422 | u64 bytes; | |
423 | struct u64_stats_sync syncp; | |
424 | }; | |
425 | ||
1da177e4 | 426 | struct rhine_private { |
38f49e88 RL |
427 | /* Bit mask for configured VLAN ids */ |
428 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | |
429 | ||
1da177e4 LT |
430 | /* Descriptor rings */ |
431 | struct rx_desc *rx_ring; | |
432 | struct tx_desc *tx_ring; | |
433 | dma_addr_t rx_ring_dma; | |
434 | dma_addr_t tx_ring_dma; | |
435 | ||
436 | /* The addresses of receive-in-place skbuffs. */ | |
437 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | |
438 | dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; | |
439 | ||
440 | /* The saved address of a sent-in-place packet/buffer, for later free(). */ | |
441 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | |
442 | dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; | |
443 | ||
4be5de25 | 444 | /* Tx bounce buffers (Rhine-I only) */ |
1da177e4 LT |
445 | unsigned char *tx_buf[TX_RING_SIZE]; |
446 | unsigned char *tx_bufs; | |
447 | dma_addr_t tx_bufs_dma; | |
448 | ||
449 | struct pci_dev *pdev; | |
450 | long pioaddr; | |
bea3348e SH |
451 | struct net_device *dev; |
452 | struct napi_struct napi; | |
1da177e4 | 453 | spinlock_t lock; |
7ab87ff4 FR |
454 | struct mutex task_lock; |
455 | bool task_enable; | |
456 | struct work_struct slow_event_task; | |
c0d7a021 | 457 | struct work_struct reset_task; |
1da177e4 | 458 | |
fc3e0f8a FR |
459 | u32 msg_enable; |
460 | ||
1da177e4 LT |
461 | /* Frequently used values: keep some adjacent for cache effect. */ |
462 | u32 quirks; | |
463 | struct rx_desc *rx_head_desc; | |
464 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | |
465 | unsigned int cur_tx, dirty_tx; | |
466 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | |
f7b5d1b9 JG |
467 | struct rhine_stats rx_stats; |
468 | struct rhine_stats tx_stats; | |
1da177e4 LT |
469 | u8 wolopts; |
470 | ||
471 | u8 tx_thresh, rx_thresh; | |
472 | ||
473 | struct mii_if_info mii_if; | |
474 | void __iomem *base; | |
475 | }; | |
476 | ||
38f49e88 RL |
477 | #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) |
478 | #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) | |
479 | #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) | |
480 | ||
481 | #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) | |
482 | #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) | |
483 | #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) | |
484 | ||
485 | #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) | |
486 | #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) | |
487 | #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) | |
488 | ||
489 | #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) | |
490 | #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) | |
491 | #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) | |
492 | ||
493 | ||
1da177e4 LT |
494 | static int mdio_read(struct net_device *dev, int phy_id, int location); |
495 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | |
496 | static int rhine_open(struct net_device *dev); | |
c0d7a021 | 497 | static void rhine_reset_task(struct work_struct *work); |
7ab87ff4 | 498 | static void rhine_slow_event_task(struct work_struct *work); |
1da177e4 | 499 | static void rhine_tx_timeout(struct net_device *dev); |
61357325 SH |
500 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
501 | struct net_device *dev); | |
7d12e780 | 502 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance); |
1da177e4 | 503 | static void rhine_tx(struct net_device *dev); |
633949a1 | 504 | static int rhine_rx(struct net_device *dev, int limit); |
1da177e4 | 505 | static void rhine_set_rx_mode(struct net_device *dev); |
f7b5d1b9 JG |
506 | static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, |
507 | struct rtnl_link_stats64 *stats); | |
1da177e4 | 508 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
7282d491 | 509 | static const struct ethtool_ops netdev_ethtool_ops; |
1da177e4 | 510 | static int rhine_close(struct net_device *dev); |
80d5c368 PM |
511 | static int rhine_vlan_rx_add_vid(struct net_device *dev, |
512 | __be16 proto, u16 vid); | |
513 | static int rhine_vlan_rx_kill_vid(struct net_device *dev, | |
514 | __be16 proto, u16 vid); | |
7ab87ff4 | 515 | static void rhine_restart_tx(struct net_device *dev); |
1da177e4 | 516 | |
3f8c91a7 | 517 | static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) |
a384a33b FR |
518 | { |
519 | void __iomem *ioaddr = rp->base; | |
520 | int i; | |
521 | ||
522 | for (i = 0; i < 1024; i++) { | |
3f8c91a7 AM |
523 | bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask); |
524 | ||
525 | if (low ^ has_mask_bits) | |
a384a33b FR |
526 | break; |
527 | udelay(10); | |
528 | } | |
529 | if (i > 64) { | |
fc3e0f8a | 530 | netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " |
3f8c91a7 | 531 | "count: %04d\n", low ? "low" : "high", reg, mask, i); |
a384a33b FR |
532 | } |
533 | } | |
534 | ||
535 | static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) | |
536 | { | |
3f8c91a7 | 537 | rhine_wait_bit(rp, reg, mask, false); |
a384a33b FR |
538 | } |
539 | ||
540 | static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) | |
541 | { | |
3f8c91a7 | 542 | rhine_wait_bit(rp, reg, mask, true); |
a384a33b | 543 | } |
1da177e4 | 544 | |
a20a28bc | 545 | static u32 rhine_get_events(struct rhine_private *rp) |
1da177e4 | 546 | { |
1da177e4 LT |
547 | void __iomem *ioaddr = rp->base; |
548 | u32 intr_status; | |
549 | ||
550 | intr_status = ioread16(ioaddr + IntrStatus); | |
551 | /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ | |
552 | if (rp->quirks & rqStatusWBRace) | |
553 | intr_status |= ioread8(ioaddr + IntrStatus2) << 16; | |
554 | return intr_status; | |
555 | } | |
556 | ||
a20a28bc FR |
557 | static void rhine_ack_events(struct rhine_private *rp, u32 mask) |
558 | { | |
559 | void __iomem *ioaddr = rp->base; | |
560 | ||
561 | if (rp->quirks & rqStatusWBRace) | |
562 | iowrite8(mask >> 16, ioaddr + IntrStatus2); | |
563 | iowrite16(mask, ioaddr + IntrStatus); | |
7ab87ff4 | 564 | mmiowb(); |
a20a28bc FR |
565 | } |
566 | ||
1da177e4 LT |
567 | /* |
568 | * Get power related registers into sane state. | |
569 | * Notify user about past WOL event. | |
570 | */ | |
571 | static void rhine_power_init(struct net_device *dev) | |
572 | { | |
573 | struct rhine_private *rp = netdev_priv(dev); | |
574 | void __iomem *ioaddr = rp->base; | |
575 | u16 wolstat; | |
576 | ||
577 | if (rp->quirks & rqWOL) { | |
578 | /* Make sure chip is in power state D0 */ | |
579 | iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); | |
580 | ||
581 | /* Disable "force PME-enable" */ | |
582 | iowrite8(0x80, ioaddr + WOLcgClr); | |
583 | ||
584 | /* Clear power-event config bits (WOL) */ | |
585 | iowrite8(0xFF, ioaddr + WOLcrClr); | |
586 | /* More recent cards can manage two additional patterns */ | |
587 | if (rp->quirks & rq6patterns) | |
588 | iowrite8(0x03, ioaddr + WOLcrClr1); | |
589 | ||
590 | /* Save power-event status bits */ | |
591 | wolstat = ioread8(ioaddr + PwrcsrSet); | |
592 | if (rp->quirks & rq6patterns) | |
593 | wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; | |
594 | ||
595 | /* Clear power-event status bits */ | |
596 | iowrite8(0xFF, ioaddr + PwrcsrClr); | |
597 | if (rp->quirks & rq6patterns) | |
598 | iowrite8(0x03, ioaddr + PwrcsrClr1); | |
599 | ||
600 | if (wolstat) { | |
601 | char *reason; | |
602 | switch (wolstat) { | |
603 | case WOLmagic: | |
604 | reason = "Magic packet"; | |
605 | break; | |
606 | case WOLlnkon: | |
607 | reason = "Link went up"; | |
608 | break; | |
609 | case WOLlnkoff: | |
610 | reason = "Link went down"; | |
611 | break; | |
612 | case WOLucast: | |
613 | reason = "Unicast packet"; | |
614 | break; | |
615 | case WOLbmcast: | |
616 | reason = "Multicast/broadcast packet"; | |
617 | break; | |
618 | default: | |
619 | reason = "Unknown"; | |
620 | } | |
df4511fe JP |
621 | netdev_info(dev, "Woke system up. Reason: %s\n", |
622 | reason); | |
1da177e4 LT |
623 | } |
624 | } | |
625 | } | |
626 | ||
627 | static void rhine_chip_reset(struct net_device *dev) | |
628 | { | |
629 | struct rhine_private *rp = netdev_priv(dev); | |
630 | void __iomem *ioaddr = rp->base; | |
fc3e0f8a | 631 | u8 cmd1; |
1da177e4 LT |
632 | |
633 | iowrite8(Cmd1Reset, ioaddr + ChipCmd1); | |
634 | IOSYNC; | |
635 | ||
636 | if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { | |
df4511fe | 637 | netdev_info(dev, "Reset not complete yet. Trying harder.\n"); |
1da177e4 LT |
638 | |
639 | /* Force reset */ | |
640 | if (rp->quirks & rqForceReset) | |
641 | iowrite8(0x40, ioaddr + MiscCmd); | |
642 | ||
643 | /* Reset can take somewhat longer (rare) */ | |
a384a33b | 644 | rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); |
1da177e4 LT |
645 | } |
646 | ||
fc3e0f8a FR |
647 | cmd1 = ioread8(ioaddr + ChipCmd1); |
648 | netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? | |
649 | "failed" : "succeeded"); | |
1da177e4 LT |
650 | } |
651 | ||
652 | #ifdef USE_MMIO | |
653 | static void enable_mmio(long pioaddr, u32 quirks) | |
654 | { | |
655 | int n; | |
656 | if (quirks & rqRhineI) { | |
657 | /* More recent docs say that this bit is reserved ... */ | |
658 | n = inb(pioaddr + ConfigA) | 0x20; | |
659 | outb(n, pioaddr + ConfigA); | |
660 | } else { | |
661 | n = inb(pioaddr + ConfigD) | 0x80; | |
662 | outb(n, pioaddr + ConfigD); | |
663 | } | |
664 | } | |
665 | #endif | |
666 | ||
667 | /* | |
668 | * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM | |
669 | * (plus 0x6C for Rhine-I/II) | |
670 | */ | |
76e239e1 | 671 | static void rhine_reload_eeprom(long pioaddr, struct net_device *dev) |
1da177e4 LT |
672 | { |
673 | struct rhine_private *rp = netdev_priv(dev); | |
674 | void __iomem *ioaddr = rp->base; | |
a384a33b | 675 | int i; |
1da177e4 LT |
676 | |
677 | outb(0x20, pioaddr + MACRegEEcsr); | |
a384a33b FR |
678 | for (i = 0; i < 1024; i++) { |
679 | if (!(inb(pioaddr + MACRegEEcsr) & 0x20)) | |
680 | break; | |
681 | } | |
682 | if (i > 512) | |
683 | pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); | |
1da177e4 LT |
684 | |
685 | #ifdef USE_MMIO | |
686 | /* | |
687 | * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable | |
688 | * MMIO. If reloading EEPROM was done first this could be avoided, but | |
689 | * it is not known if that still works with the "win98-reboot" problem. | |
690 | */ | |
691 | enable_mmio(pioaddr, rp->quirks); | |
692 | #endif | |
693 | ||
694 | /* Turn off EEPROM-controlled wake-up (magic packet) */ | |
695 | if (rp->quirks & rqWOL) | |
696 | iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); | |
697 | ||
698 | } | |
699 | ||
700 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
701 | static void rhine_poll(struct net_device *dev) | |
702 | { | |
05d334ec FR |
703 | struct rhine_private *rp = netdev_priv(dev); |
704 | const int irq = rp->pdev->irq; | |
705 | ||
706 | disable_irq(irq); | |
707 | rhine_interrupt(irq, dev); | |
708 | enable_irq(irq); | |
1da177e4 LT |
709 | } |
710 | #endif | |
711 | ||
269f3114 FR |
712 | static void rhine_kick_tx_threshold(struct rhine_private *rp) |
713 | { | |
714 | if (rp->tx_thresh < 0xe0) { | |
715 | void __iomem *ioaddr = rp->base; | |
716 | ||
717 | rp->tx_thresh += 0x20; | |
718 | BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); | |
719 | } | |
720 | } | |
721 | ||
7ab87ff4 FR |
722 | static void rhine_tx_err(struct rhine_private *rp, u32 status) |
723 | { | |
724 | struct net_device *dev = rp->dev; | |
725 | ||
726 | if (status & IntrTxAborted) { | |
fc3e0f8a FR |
727 | netif_info(rp, tx_err, dev, |
728 | "Abort %08x, frame dropped\n", status); | |
7ab87ff4 FR |
729 | } |
730 | ||
731 | if (status & IntrTxUnderrun) { | |
732 | rhine_kick_tx_threshold(rp); | |
fc3e0f8a FR |
733 | netif_info(rp, tx_err ,dev, "Transmitter underrun, " |
734 | "Tx threshold now %02x\n", rp->tx_thresh); | |
7ab87ff4 FR |
735 | } |
736 | ||
fc3e0f8a FR |
737 | if (status & IntrTxDescRace) |
738 | netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); | |
7ab87ff4 FR |
739 | |
740 | if ((status & IntrTxError) && | |
741 | (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { | |
742 | rhine_kick_tx_threshold(rp); | |
fc3e0f8a FR |
743 | netif_info(rp, tx_err, dev, "Unspecified error. " |
744 | "Tx threshold now %02x\n", rp->tx_thresh); | |
7ab87ff4 FR |
745 | } |
746 | ||
747 | rhine_restart_tx(dev); | |
748 | } | |
749 | ||
750 | static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) | |
751 | { | |
752 | void __iomem *ioaddr = rp->base; | |
753 | struct net_device_stats *stats = &rp->dev->stats; | |
754 | ||
755 | stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); | |
756 | stats->rx_missed_errors += ioread16(ioaddr + RxMissed); | |
757 | ||
758 | /* | |
759 | * Clears the "tally counters" for CRC errors and missed frames(?). | |
760 | * It has been reported that some chips need a write of 0 to clear | |
761 | * these, for others the counters are set to 1 when written to and | |
762 | * instead cleared when read. So we clear them both ways ... | |
763 | */ | |
764 | iowrite32(0, ioaddr + RxMissed); | |
765 | ioread16(ioaddr + RxCRCErrs); | |
766 | ioread16(ioaddr + RxMissed); | |
767 | } | |
768 | ||
769 | #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ | |
770 | IntrRxErr | \ | |
771 | IntrRxEmpty | \ | |
772 | IntrRxOverflow | \ | |
773 | IntrRxDropped | \ | |
774 | IntrRxNoBuf | \ | |
775 | IntrRxWakeUp) | |
776 | ||
777 | #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ | |
778 | IntrTxAborted | \ | |
779 | IntrTxUnderrun | \ | |
780 | IntrTxDescRace) | |
781 | #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) | |
782 | ||
783 | #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ | |
784 | RHINE_EVENT_NAPI_TX | \ | |
785 | IntrStatsMax) | |
786 | #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) | |
787 | #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) | |
788 | ||
bea3348e | 789 | static int rhine_napipoll(struct napi_struct *napi, int budget) |
633949a1 | 790 | { |
bea3348e SH |
791 | struct rhine_private *rp = container_of(napi, struct rhine_private, napi); |
792 | struct net_device *dev = rp->dev; | |
633949a1 | 793 | void __iomem *ioaddr = rp->base; |
7ab87ff4 FR |
794 | u16 enable_mask = RHINE_EVENT & 0xffff; |
795 | int work_done = 0; | |
796 | u32 status; | |
797 | ||
798 | status = rhine_get_events(rp); | |
799 | rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); | |
800 | ||
801 | if (status & RHINE_EVENT_NAPI_RX) | |
802 | work_done += rhine_rx(dev, budget); | |
803 | ||
804 | if (status & RHINE_EVENT_NAPI_TX) { | |
805 | if (status & RHINE_EVENT_NAPI_TX_ERR) { | |
7ab87ff4 | 806 | /* Avoid scavenging before Tx engine turned off */ |
a384a33b | 807 | rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); |
fc3e0f8a FR |
808 | if (ioread8(ioaddr + ChipCmd) & CmdTxOn) |
809 | netif_warn(rp, tx_err, dev, "Tx still on\n"); | |
7ab87ff4 | 810 | } |
fc3e0f8a | 811 | |
7ab87ff4 FR |
812 | rhine_tx(dev); |
813 | ||
814 | if (status & RHINE_EVENT_NAPI_TX_ERR) | |
815 | rhine_tx_err(rp, status); | |
816 | } | |
817 | ||
818 | if (status & IntrStatsMax) { | |
819 | spin_lock(&rp->lock); | |
820 | rhine_update_rx_crc_and_missed_errord(rp); | |
821 | spin_unlock(&rp->lock); | |
822 | } | |
633949a1 | 823 | |
7ab87ff4 FR |
824 | if (status & RHINE_EVENT_SLOW) { |
825 | enable_mask &= ~RHINE_EVENT_SLOW; | |
826 | schedule_work(&rp->slow_event_task); | |
827 | } | |
633949a1 | 828 | |
bea3348e | 829 | if (work_done < budget) { |
288379f0 | 830 | napi_complete(napi); |
7ab87ff4 FR |
831 | iowrite16(enable_mask, ioaddr + IntrEnable); |
832 | mmiowb(); | |
633949a1 | 833 | } |
bea3348e | 834 | return work_done; |
633949a1 | 835 | } |
633949a1 | 836 | |
76e239e1 | 837 | static void rhine_hw_init(struct net_device *dev, long pioaddr) |
1da177e4 LT |
838 | { |
839 | struct rhine_private *rp = netdev_priv(dev); | |
840 | ||
841 | /* Reset the chip to erase previous misconfiguration. */ | |
842 | rhine_chip_reset(dev); | |
843 | ||
844 | /* Rhine-I needs extra time to recuperate before EEPROM reload */ | |
845 | if (rp->quirks & rqRhineI) | |
846 | msleep(5); | |
847 | ||
848 | /* Reload EEPROM controlled bytes cleared by soft reset */ | |
849 | rhine_reload_eeprom(pioaddr, dev); | |
850 | } | |
851 | ||
5d1d07d8 SH |
852 | static const struct net_device_ops rhine_netdev_ops = { |
853 | .ndo_open = rhine_open, | |
854 | .ndo_stop = rhine_close, | |
855 | .ndo_start_xmit = rhine_start_tx, | |
f7b5d1b9 | 856 | .ndo_get_stats64 = rhine_get_stats64, |
afc4b13d | 857 | .ndo_set_rx_mode = rhine_set_rx_mode, |
635ecaa7 | 858 | .ndo_change_mtu = eth_change_mtu, |
5d1d07d8 | 859 | .ndo_validate_addr = eth_validate_addr, |
fe96aaa1 | 860 | .ndo_set_mac_address = eth_mac_addr, |
5d1d07d8 SH |
861 | .ndo_do_ioctl = netdev_ioctl, |
862 | .ndo_tx_timeout = rhine_tx_timeout, | |
38f49e88 RL |
863 | .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, |
864 | .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, | |
5d1d07d8 SH |
865 | #ifdef CONFIG_NET_POLL_CONTROLLER |
866 | .ndo_poll_controller = rhine_poll, | |
867 | #endif | |
868 | }; | |
869 | ||
1dd06ae8 | 870 | static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1da177e4 LT |
871 | { |
872 | struct net_device *dev; | |
873 | struct rhine_private *rp; | |
874 | int i, rc; | |
1da177e4 LT |
875 | u32 quirks; |
876 | long pioaddr; | |
877 | long memaddr; | |
878 | void __iomem *ioaddr; | |
879 | int io_size, phy_id; | |
880 | const char *name; | |
881 | #ifdef USE_MMIO | |
882 | int bar = 1; | |
883 | #else | |
884 | int bar = 0; | |
885 | #endif | |
886 | ||
887 | /* when built into the kernel, we only print version if device is found */ | |
888 | #ifndef MODULE | |
df4511fe | 889 | pr_info_once("%s\n", version); |
1da177e4 LT |
890 | #endif |
891 | ||
1da177e4 LT |
892 | io_size = 256; |
893 | phy_id = 0; | |
894 | quirks = 0; | |
895 | name = "Rhine"; | |
44c10138 | 896 | if (pdev->revision < VTunknown0) { |
1da177e4 LT |
897 | quirks = rqRhineI; |
898 | io_size = 128; | |
899 | } | |
44c10138 | 900 | else if (pdev->revision >= VT6102) { |
1da177e4 | 901 | quirks = rqWOL | rqForceReset; |
44c10138 | 902 | if (pdev->revision < VT6105) { |
1da177e4 LT |
903 | name = "Rhine II"; |
904 | quirks |= rqStatusWBRace; /* Rhine-II exclusive */ | |
905 | } | |
906 | else { | |
907 | phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ | |
44c10138 | 908 | if (pdev->revision >= VT6105_B0) |
1da177e4 | 909 | quirks |= rq6patterns; |
44c10138 | 910 | if (pdev->revision < VT6105M) |
1da177e4 LT |
911 | name = "Rhine III"; |
912 | else | |
913 | name = "Rhine III (Management Adapter)"; | |
914 | } | |
915 | } | |
916 | ||
917 | rc = pci_enable_device(pdev); | |
918 | if (rc) | |
919 | goto err_out; | |
920 | ||
921 | /* this should always be supported */ | |
284901a9 | 922 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1da177e4 | 923 | if (rc) { |
df4511fe JP |
924 | dev_err(&pdev->dev, |
925 | "32-bit PCI DMA addresses not supported by the card!?\n"); | |
ae996154 | 926 | goto err_out_pci_disable; |
1da177e4 LT |
927 | } |
928 | ||
929 | /* sanity check */ | |
930 | if ((pci_resource_len(pdev, 0) < io_size) || | |
931 | (pci_resource_len(pdev, 1) < io_size)) { | |
932 | rc = -EIO; | |
df4511fe | 933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); |
ae996154 | 934 | goto err_out_pci_disable; |
1da177e4 LT |
935 | } |
936 | ||
937 | pioaddr = pci_resource_start(pdev, 0); | |
938 | memaddr = pci_resource_start(pdev, 1); | |
939 | ||
940 | pci_set_master(pdev); | |
941 | ||
942 | dev = alloc_etherdev(sizeof(struct rhine_private)); | |
943 | if (!dev) { | |
944 | rc = -ENOMEM; | |
ae996154 | 945 | goto err_out_pci_disable; |
1da177e4 | 946 | } |
1da177e4 LT |
947 | SET_NETDEV_DEV(dev, &pdev->dev); |
948 | ||
949 | rp = netdev_priv(dev); | |
bea3348e | 950 | rp->dev = dev; |
1da177e4 LT |
951 | rp->quirks = quirks; |
952 | rp->pioaddr = pioaddr; | |
953 | rp->pdev = pdev; | |
fc3e0f8a | 954 | rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); |
1da177e4 LT |
955 | |
956 | rc = pci_request_regions(pdev, DRV_NAME); | |
957 | if (rc) | |
958 | goto err_out_free_netdev; | |
959 | ||
960 | ioaddr = pci_iomap(pdev, bar, io_size); | |
961 | if (!ioaddr) { | |
962 | rc = -EIO; | |
df4511fe JP |
963 | dev_err(&pdev->dev, |
964 | "ioremap failed for device %s, region 0x%X @ 0x%lX\n", | |
965 | pci_name(pdev), io_size, memaddr); | |
1da177e4 LT |
966 | goto err_out_free_res; |
967 | } | |
968 | ||
969 | #ifdef USE_MMIO | |
970 | enable_mmio(pioaddr, quirks); | |
971 | ||
972 | /* Check that selected MMIO registers match the PIO ones */ | |
973 | i = 0; | |
974 | while (mmio_verify_registers[i]) { | |
975 | int reg = mmio_verify_registers[i++]; | |
976 | unsigned char a = inb(pioaddr+reg); | |
977 | unsigned char b = readb(ioaddr+reg); | |
978 | if (a != b) { | |
979 | rc = -EIO; | |
df4511fe JP |
980 | dev_err(&pdev->dev, |
981 | "MMIO do not match PIO [%02x] (%02x != %02x)\n", | |
982 | reg, a, b); | |
1da177e4 LT |
983 | goto err_out_unmap; |
984 | } | |
985 | } | |
986 | #endif /* USE_MMIO */ | |
987 | ||
1da177e4 LT |
988 | rp->base = ioaddr; |
989 | ||
827da44c JS |
990 | u64_stats_init(&rp->tx_stats.syncp); |
991 | u64_stats_init(&rp->rx_stats.syncp); | |
992 | ||
1da177e4 LT |
993 | /* Get chip registers into a sane state */ |
994 | rhine_power_init(dev); | |
995 | rhine_hw_init(dev, pioaddr); | |
996 | ||
997 | for (i = 0; i < 6; i++) | |
998 | dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); | |
999 | ||
482e3feb JP |
1000 | if (!is_valid_ether_addr(dev->dev_addr)) { |
1001 | /* Report it and use a random ethernet address instead */ | |
1002 | netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); | |
f2cedb63 | 1003 | eth_hw_addr_random(dev); |
482e3feb JP |
1004 | netdev_info(dev, "Using random MAC address: %pM\n", |
1005 | dev->dev_addr); | |
1da177e4 LT |
1006 | } |
1007 | ||
1008 | /* For Rhine-I/II, phy_id is loaded from EEPROM */ | |
1009 | if (!phy_id) | |
1010 | phy_id = ioread8(ioaddr + 0x6C); | |
1011 | ||
1da177e4 | 1012 | spin_lock_init(&rp->lock); |
7ab87ff4 | 1013 | mutex_init(&rp->task_lock); |
c0d7a021 | 1014 | INIT_WORK(&rp->reset_task, rhine_reset_task); |
7ab87ff4 | 1015 | INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); |
c0d7a021 | 1016 | |
1da177e4 LT |
1017 | rp->mii_if.dev = dev; |
1018 | rp->mii_if.mdio_read = mdio_read; | |
1019 | rp->mii_if.mdio_write = mdio_write; | |
1020 | rp->mii_if.phy_id_mask = 0x1f; | |
1021 | rp->mii_if.reg_num_mask = 0x1f; | |
1022 | ||
1023 | /* The chip-specific entries in the device structure. */ | |
5d1d07d8 SH |
1024 | dev->netdev_ops = &rhine_netdev_ops; |
1025 | dev->ethtool_ops = &netdev_ethtool_ops, | |
1da177e4 | 1026 | dev->watchdog_timeo = TX_TIMEOUT; |
5d1d07d8 | 1027 | |
bea3348e | 1028 | netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); |
32b0f53e | 1029 | |
1da177e4 LT |
1030 | if (rp->quirks & rqRhineI) |
1031 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | |
1032 | ||
38f49e88 | 1033 | if (pdev->revision >= VT6105M) |
f646968f PM |
1034 | dev->features |= NETIF_F_HW_VLAN_CTAG_TX | |
1035 | NETIF_F_HW_VLAN_CTAG_RX | | |
1036 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
38f49e88 | 1037 | |
1da177e4 LT |
1038 | /* dev->name not defined before register_netdev()! */ |
1039 | rc = register_netdev(dev); | |
1040 | if (rc) | |
1041 | goto err_out_unmap; | |
1042 | ||
df4511fe JP |
1043 | netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", |
1044 | name, | |
1da177e4 | 1045 | #ifdef USE_MMIO |
df4511fe | 1046 | memaddr, |
1da177e4 | 1047 | #else |
df4511fe | 1048 | (long)ioaddr, |
1da177e4 | 1049 | #endif |
df4511fe | 1050 | dev->dev_addr, pdev->irq); |
1da177e4 LT |
1051 | |
1052 | pci_set_drvdata(pdev, dev); | |
1053 | ||
1054 | { | |
1055 | u16 mii_cmd; | |
1056 | int mii_status = mdio_read(dev, phy_id, 1); | |
1057 | mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; | |
1058 | mdio_write(dev, phy_id, MII_BMCR, mii_cmd); | |
1059 | if (mii_status != 0xffff && mii_status != 0x0000) { | |
1060 | rp->mii_if.advertising = mdio_read(dev, phy_id, 4); | |
df4511fe JP |
1061 | netdev_info(dev, |
1062 | "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", | |
1063 | phy_id, | |
1064 | mii_status, rp->mii_if.advertising, | |
1065 | mdio_read(dev, phy_id, 5)); | |
1da177e4 LT |
1066 | |
1067 | /* set IFF_RUNNING */ | |
1068 | if (mii_status & BMSR_LSTATUS) | |
1069 | netif_carrier_on(dev); | |
1070 | else | |
1071 | netif_carrier_off(dev); | |
1072 | ||
1073 | } | |
1074 | } | |
1075 | rp->mii_if.phy_id = phy_id; | |
fc3e0f8a FR |
1076 | if (avoid_D3) |
1077 | netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); | |
1da177e4 LT |
1078 | |
1079 | return 0; | |
1080 | ||
1081 | err_out_unmap: | |
1082 | pci_iounmap(pdev, ioaddr); | |
1083 | err_out_free_res: | |
1084 | pci_release_regions(pdev); | |
1085 | err_out_free_netdev: | |
1086 | free_netdev(dev); | |
ae996154 RL |
1087 | err_out_pci_disable: |
1088 | pci_disable_device(pdev); | |
1da177e4 LT |
1089 | err_out: |
1090 | return rc; | |
1091 | } | |
1092 | ||
1093 | static int alloc_ring(struct net_device* dev) | |
1094 | { | |
1095 | struct rhine_private *rp = netdev_priv(dev); | |
1096 | void *ring; | |
1097 | dma_addr_t ring_dma; | |
1098 | ||
1099 | ring = pci_alloc_consistent(rp->pdev, | |
1100 | RX_RING_SIZE * sizeof(struct rx_desc) + | |
1101 | TX_RING_SIZE * sizeof(struct tx_desc), | |
1102 | &ring_dma); | |
1103 | if (!ring) { | |
df4511fe | 1104 | netdev_err(dev, "Could not allocate DMA memory\n"); |
1da177e4 LT |
1105 | return -ENOMEM; |
1106 | } | |
1107 | if (rp->quirks & rqRhineI) { | |
1108 | rp->tx_bufs = pci_alloc_consistent(rp->pdev, | |
1109 | PKT_BUF_SZ * TX_RING_SIZE, | |
1110 | &rp->tx_bufs_dma); | |
1111 | if (rp->tx_bufs == NULL) { | |
1112 | pci_free_consistent(rp->pdev, | |
1113 | RX_RING_SIZE * sizeof(struct rx_desc) + | |
1114 | TX_RING_SIZE * sizeof(struct tx_desc), | |
1115 | ring, ring_dma); | |
1116 | return -ENOMEM; | |
1117 | } | |
1118 | } | |
1119 | ||
1120 | rp->rx_ring = ring; | |
1121 | rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); | |
1122 | rp->rx_ring_dma = ring_dma; | |
1123 | rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); | |
1124 | ||
1125 | return 0; | |
1126 | } | |
1127 | ||
1128 | static void free_ring(struct net_device* dev) | |
1129 | { | |
1130 | struct rhine_private *rp = netdev_priv(dev); | |
1131 | ||
1132 | pci_free_consistent(rp->pdev, | |
1133 | RX_RING_SIZE * sizeof(struct rx_desc) + | |
1134 | TX_RING_SIZE * sizeof(struct tx_desc), | |
1135 | rp->rx_ring, rp->rx_ring_dma); | |
1136 | rp->tx_ring = NULL; | |
1137 | ||
1138 | if (rp->tx_bufs) | |
1139 | pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, | |
1140 | rp->tx_bufs, rp->tx_bufs_dma); | |
1141 | ||
1142 | rp->tx_bufs = NULL; | |
1143 | ||
1144 | } | |
1145 | ||
1146 | static void alloc_rbufs(struct net_device *dev) | |
1147 | { | |
1148 | struct rhine_private *rp = netdev_priv(dev); | |
1149 | dma_addr_t next; | |
1150 | int i; | |
1151 | ||
1152 | rp->dirty_rx = rp->cur_rx = 0; | |
1153 | ||
1154 | rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); | |
1155 | rp->rx_head_desc = &rp->rx_ring[0]; | |
1156 | next = rp->rx_ring_dma; | |
1157 | ||
1158 | /* Init the ring entries */ | |
1159 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1160 | rp->rx_ring[i].rx_status = 0; | |
1161 | rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); | |
1162 | next += sizeof(struct rx_desc); | |
1163 | rp->rx_ring[i].next_desc = cpu_to_le32(next); | |
1164 | rp->rx_skbuff[i] = NULL; | |
1165 | } | |
1166 | /* Mark the last entry as wrapping the ring. */ | |
1167 | rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); | |
1168 | ||
1169 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | |
1170 | for (i = 0; i < RX_RING_SIZE; i++) { | |
b26b555a | 1171 | struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); |
1da177e4 LT |
1172 | rp->rx_skbuff[i] = skb; |
1173 | if (skb == NULL) | |
1174 | break; | |
1da177e4 LT |
1175 | |
1176 | rp->rx_skbuff_dma[i] = | |
689be439 | 1177 | pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, |
1da177e4 | 1178 | PCI_DMA_FROMDEVICE); |
9b4fe5fb NH |
1179 | if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) { |
1180 | rp->rx_skbuff_dma[i] = 0; | |
1181 | dev_kfree_skb(skb); | |
1182 | break; | |
1183 | } | |
1da177e4 LT |
1184 | rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); |
1185 | rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); | |
1186 | } | |
1187 | rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | |
1188 | } | |
1189 | ||
1190 | static void free_rbufs(struct net_device* dev) | |
1191 | { | |
1192 | struct rhine_private *rp = netdev_priv(dev); | |
1193 | int i; | |
1194 | ||
1195 | /* Free all the skbuffs in the Rx queue. */ | |
1196 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1197 | rp->rx_ring[i].rx_status = 0; | |
1198 | rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ | |
1199 | if (rp->rx_skbuff[i]) { | |
1200 | pci_unmap_single(rp->pdev, | |
1201 | rp->rx_skbuff_dma[i], | |
1202 | rp->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
1203 | dev_kfree_skb(rp->rx_skbuff[i]); | |
1204 | } | |
1205 | rp->rx_skbuff[i] = NULL; | |
1206 | } | |
1207 | } | |
1208 | ||
1209 | static void alloc_tbufs(struct net_device* dev) | |
1210 | { | |
1211 | struct rhine_private *rp = netdev_priv(dev); | |
1212 | dma_addr_t next; | |
1213 | int i; | |
1214 | ||
1215 | rp->dirty_tx = rp->cur_tx = 0; | |
1216 | next = rp->tx_ring_dma; | |
1217 | for (i = 0; i < TX_RING_SIZE; i++) { | |
1218 | rp->tx_skbuff[i] = NULL; | |
1219 | rp->tx_ring[i].tx_status = 0; | |
1220 | rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); | |
1221 | next += sizeof(struct tx_desc); | |
1222 | rp->tx_ring[i].next_desc = cpu_to_le32(next); | |
4be5de25 RL |
1223 | if (rp->quirks & rqRhineI) |
1224 | rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; | |
1da177e4 LT |
1225 | } |
1226 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); | |
1227 | ||
1228 | } | |
1229 | ||
1230 | static void free_tbufs(struct net_device* dev) | |
1231 | { | |
1232 | struct rhine_private *rp = netdev_priv(dev); | |
1233 | int i; | |
1234 | ||
1235 | for (i = 0; i < TX_RING_SIZE; i++) { | |
1236 | rp->tx_ring[i].tx_status = 0; | |
1237 | rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); | |
1238 | rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ | |
1239 | if (rp->tx_skbuff[i]) { | |
1240 | if (rp->tx_skbuff_dma[i]) { | |
1241 | pci_unmap_single(rp->pdev, | |
1242 | rp->tx_skbuff_dma[i], | |
1243 | rp->tx_skbuff[i]->len, | |
1244 | PCI_DMA_TODEVICE); | |
1245 | } | |
1246 | dev_kfree_skb(rp->tx_skbuff[i]); | |
1247 | } | |
1248 | rp->tx_skbuff[i] = NULL; | |
1249 | rp->tx_buf[i] = NULL; | |
1250 | } | |
1251 | } | |
1252 | ||
1253 | static void rhine_check_media(struct net_device *dev, unsigned int init_media) | |
1254 | { | |
1255 | struct rhine_private *rp = netdev_priv(dev); | |
1256 | void __iomem *ioaddr = rp->base; | |
1257 | ||
fc3e0f8a | 1258 | mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); |
1da177e4 LT |
1259 | |
1260 | if (rp->mii_if.full_duplex) | |
1261 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, | |
1262 | ioaddr + ChipCmd1); | |
1263 | else | |
1264 | iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, | |
1265 | ioaddr + ChipCmd1); | |
fc3e0f8a FR |
1266 | |
1267 | netif_info(rp, link, dev, "force_media %d, carrier %d\n", | |
1268 | rp->mii_if.force_media, netif_carrier_ok(dev)); | |
00b428c2 RL |
1269 | } |
1270 | ||
1271 | /* Called after status of force_media possibly changed */ | |
0761be4f | 1272 | static void rhine_set_carrier(struct mii_if_info *mii) |
00b428c2 | 1273 | { |
fc3e0f8a FR |
1274 | struct net_device *dev = mii->dev; |
1275 | struct rhine_private *rp = netdev_priv(dev); | |
1276 | ||
00b428c2 RL |
1277 | if (mii->force_media) { |
1278 | /* autoneg is off: Link is always assumed to be up */ | |
fc3e0f8a FR |
1279 | if (!netif_carrier_ok(dev)) |
1280 | netif_carrier_on(dev); | |
1281 | } else /* Let MMI library update carrier status */ | |
1282 | rhine_check_media(dev, 0); | |
1283 | ||
1284 | netif_info(rp, link, dev, "force_media %d, carrier %d\n", | |
1285 | mii->force_media, netif_carrier_ok(dev)); | |
1da177e4 LT |
1286 | } |
1287 | ||
38f49e88 RL |
1288 | /** |
1289 | * rhine_set_cam - set CAM multicast filters | |
1290 | * @ioaddr: register block of this Rhine | |
1291 | * @idx: multicast CAM index [0..MCAM_SIZE-1] | |
1292 | * @addr: multicast address (6 bytes) | |
1293 | * | |
1294 | * Load addresses into multicast filters. | |
1295 | */ | |
1296 | static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) | |
1297 | { | |
1298 | int i; | |
1299 | ||
1300 | iowrite8(CAMC_CAMEN, ioaddr + CamCon); | |
1301 | wmb(); | |
1302 | ||
1303 | /* Paranoid -- idx out of range should never happen */ | |
1304 | idx &= (MCAM_SIZE - 1); | |
1305 | ||
1306 | iowrite8((u8) idx, ioaddr + CamAddr); | |
1307 | ||
1308 | for (i = 0; i < 6; i++, addr++) | |
1309 | iowrite8(*addr, ioaddr + MulticastFilter0 + i); | |
1310 | udelay(10); | |
1311 | wmb(); | |
1312 | ||
1313 | iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); | |
1314 | udelay(10); | |
1315 | ||
1316 | iowrite8(0, ioaddr + CamCon); | |
1317 | } | |
1318 | ||
1319 | /** | |
1320 | * rhine_set_vlan_cam - set CAM VLAN filters | |
1321 | * @ioaddr: register block of this Rhine | |
1322 | * @idx: VLAN CAM index [0..VCAM_SIZE-1] | |
1323 | * @addr: VLAN ID (2 bytes) | |
1324 | * | |
1325 | * Load addresses into VLAN filters. | |
1326 | */ | |
1327 | static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) | |
1328 | { | |
1329 | iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); | |
1330 | wmb(); | |
1331 | ||
1332 | /* Paranoid -- idx out of range should never happen */ | |
1333 | idx &= (VCAM_SIZE - 1); | |
1334 | ||
1335 | iowrite8((u8) idx, ioaddr + CamAddr); | |
1336 | ||
1337 | iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); | |
1338 | udelay(10); | |
1339 | wmb(); | |
1340 | ||
1341 | iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); | |
1342 | udelay(10); | |
1343 | ||
1344 | iowrite8(0, ioaddr + CamCon); | |
1345 | } | |
1346 | ||
1347 | /** | |
1348 | * rhine_set_cam_mask - set multicast CAM mask | |
1349 | * @ioaddr: register block of this Rhine | |
1350 | * @mask: multicast CAM mask | |
1351 | * | |
1352 | * Mask sets multicast filters active/inactive. | |
1353 | */ | |
1354 | static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) | |
1355 | { | |
1356 | iowrite8(CAMC_CAMEN, ioaddr + CamCon); | |
1357 | wmb(); | |
1358 | ||
1359 | /* write mask */ | |
1360 | iowrite32(mask, ioaddr + CamMask); | |
1361 | ||
1362 | /* disable CAMEN */ | |
1363 | iowrite8(0, ioaddr + CamCon); | |
1364 | } | |
1365 | ||
1366 | /** | |
1367 | * rhine_set_vlan_cam_mask - set VLAN CAM mask | |
1368 | * @ioaddr: register block of this Rhine | |
1369 | * @mask: VLAN CAM mask | |
1370 | * | |
1371 | * Mask sets VLAN filters active/inactive. | |
1372 | */ | |
1373 | static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) | |
1374 | { | |
1375 | iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); | |
1376 | wmb(); | |
1377 | ||
1378 | /* write mask */ | |
1379 | iowrite32(mask, ioaddr + CamMask); | |
1380 | ||
1381 | /* disable CAMEN */ | |
1382 | iowrite8(0, ioaddr + CamCon); | |
1383 | } | |
1384 | ||
1385 | /** | |
1386 | * rhine_init_cam_filter - initialize CAM filters | |
1387 | * @dev: network device | |
1388 | * | |
1389 | * Initialize (disable) hardware VLAN and multicast support on this | |
1390 | * Rhine. | |
1391 | */ | |
1392 | static void rhine_init_cam_filter(struct net_device *dev) | |
1393 | { | |
1394 | struct rhine_private *rp = netdev_priv(dev); | |
1395 | void __iomem *ioaddr = rp->base; | |
1396 | ||
1397 | /* Disable all CAMs */ | |
1398 | rhine_set_vlan_cam_mask(ioaddr, 0); | |
1399 | rhine_set_cam_mask(ioaddr, 0); | |
1400 | ||
1401 | /* disable hardware VLAN support */ | |
1402 | BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); | |
1403 | BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); | |
1404 | } | |
1405 | ||
1406 | /** | |
1407 | * rhine_update_vcam - update VLAN CAM filters | |
1408 | * @rp: rhine_private data of this Rhine | |
1409 | * | |
1410 | * Update VLAN CAM filters to match configuration change. | |
1411 | */ | |
1412 | static void rhine_update_vcam(struct net_device *dev) | |
1413 | { | |
1414 | struct rhine_private *rp = netdev_priv(dev); | |
1415 | void __iomem *ioaddr = rp->base; | |
1416 | u16 vid; | |
1417 | u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ | |
1418 | unsigned int i = 0; | |
1419 | ||
1420 | for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { | |
1421 | rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); | |
1422 | vCAMmask |= 1 << i; | |
1423 | if (++i >= VCAM_SIZE) | |
1424 | break; | |
1425 | } | |
1426 | rhine_set_vlan_cam_mask(ioaddr, vCAMmask); | |
1427 | } | |
1428 | ||
80d5c368 | 1429 | static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
38f49e88 RL |
1430 | { |
1431 | struct rhine_private *rp = netdev_priv(dev); | |
1432 | ||
7ab87ff4 | 1433 | spin_lock_bh(&rp->lock); |
38f49e88 RL |
1434 | set_bit(vid, rp->active_vlans); |
1435 | rhine_update_vcam(dev); | |
7ab87ff4 | 1436 | spin_unlock_bh(&rp->lock); |
8e586137 | 1437 | return 0; |
38f49e88 RL |
1438 | } |
1439 | ||
80d5c368 | 1440 | static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
38f49e88 RL |
1441 | { |
1442 | struct rhine_private *rp = netdev_priv(dev); | |
1443 | ||
7ab87ff4 | 1444 | spin_lock_bh(&rp->lock); |
38f49e88 RL |
1445 | clear_bit(vid, rp->active_vlans); |
1446 | rhine_update_vcam(dev); | |
7ab87ff4 | 1447 | spin_unlock_bh(&rp->lock); |
8e586137 | 1448 | return 0; |
38f49e88 RL |
1449 | } |
1450 | ||
1da177e4 LT |
1451 | static void init_registers(struct net_device *dev) |
1452 | { | |
1453 | struct rhine_private *rp = netdev_priv(dev); | |
1454 | void __iomem *ioaddr = rp->base; | |
1455 | int i; | |
1456 | ||
1457 | for (i = 0; i < 6; i++) | |
1458 | iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); | |
1459 | ||
1460 | /* Initialize other registers. */ | |
1461 | iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ | |
1462 | /* Configure initial FIFO thresholds. */ | |
1463 | iowrite8(0x20, ioaddr + TxConfig); | |
1464 | rp->tx_thresh = 0x20; | |
1465 | rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ | |
1466 | ||
1467 | iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); | |
1468 | iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); | |
1469 | ||
1470 | rhine_set_rx_mode(dev); | |
1471 | ||
38f49e88 RL |
1472 | if (rp->pdev->revision >= VT6105M) |
1473 | rhine_init_cam_filter(dev); | |
1474 | ||
bea3348e | 1475 | napi_enable(&rp->napi); |
ab197668 | 1476 | |
7ab87ff4 | 1477 | iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); |
1da177e4 LT |
1478 | |
1479 | iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), | |
1480 | ioaddr + ChipCmd); | |
1481 | rhine_check_media(dev, 1); | |
1482 | } | |
1483 | ||
1484 | /* Enable MII link status auto-polling (required for IntrLinkChange) */ | |
a384a33b | 1485 | static void rhine_enable_linkmon(struct rhine_private *rp) |
1da177e4 | 1486 | { |
a384a33b FR |
1487 | void __iomem *ioaddr = rp->base; |
1488 | ||
1da177e4 LT |
1489 | iowrite8(0, ioaddr + MIICmd); |
1490 | iowrite8(MII_BMSR, ioaddr + MIIRegAddr); | |
1491 | iowrite8(0x80, ioaddr + MIICmd); | |
1492 | ||
a384a33b | 1493 | rhine_wait_bit_high(rp, MIIRegAddr, 0x20); |
1da177e4 LT |
1494 | |
1495 | iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); | |
1496 | } | |
1497 | ||
1498 | /* Disable MII link status auto-polling (required for MDIO access) */ | |
a384a33b | 1499 | static void rhine_disable_linkmon(struct rhine_private *rp) |
1da177e4 | 1500 | { |
a384a33b FR |
1501 | void __iomem *ioaddr = rp->base; |
1502 | ||
1da177e4 LT |
1503 | iowrite8(0, ioaddr + MIICmd); |
1504 | ||
a384a33b | 1505 | if (rp->quirks & rqRhineI) { |
1da177e4 LT |
1506 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR |
1507 | ||
38bb6b28 JL |
1508 | /* Can be called from ISR. Evil. */ |
1509 | mdelay(1); | |
1da177e4 LT |
1510 | |
1511 | /* 0x80 must be set immediately before turning it off */ | |
1512 | iowrite8(0x80, ioaddr + MIICmd); | |
1513 | ||
a384a33b | 1514 | rhine_wait_bit_high(rp, MIIRegAddr, 0x20); |
1da177e4 LT |
1515 | |
1516 | /* Heh. Now clear 0x80 again. */ | |
1517 | iowrite8(0, ioaddr + MIICmd); | |
1518 | } | |
1519 | else | |
a384a33b | 1520 | rhine_wait_bit_high(rp, MIIRegAddr, 0x80); |
1da177e4 LT |
1521 | } |
1522 | ||
1523 | /* Read and write over the MII Management Data I/O (MDIO) interface. */ | |
1524 | ||
1525 | static int mdio_read(struct net_device *dev, int phy_id, int regnum) | |
1526 | { | |
1527 | struct rhine_private *rp = netdev_priv(dev); | |
1528 | void __iomem *ioaddr = rp->base; | |
1529 | int result; | |
1530 | ||
a384a33b | 1531 | rhine_disable_linkmon(rp); |
1da177e4 LT |
1532 | |
1533 | /* rhine_disable_linkmon already cleared MIICmd */ | |
1534 | iowrite8(phy_id, ioaddr + MIIPhyAddr); | |
1535 | iowrite8(regnum, ioaddr + MIIRegAddr); | |
1536 | iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ | |
a384a33b | 1537 | rhine_wait_bit_low(rp, MIICmd, 0x40); |
1da177e4 LT |
1538 | result = ioread16(ioaddr + MIIData); |
1539 | ||
a384a33b | 1540 | rhine_enable_linkmon(rp); |
1da177e4 LT |
1541 | return result; |
1542 | } | |
1543 | ||
1544 | static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) | |
1545 | { | |
1546 | struct rhine_private *rp = netdev_priv(dev); | |
1547 | void __iomem *ioaddr = rp->base; | |
1548 | ||
a384a33b | 1549 | rhine_disable_linkmon(rp); |
1da177e4 LT |
1550 | |
1551 | /* rhine_disable_linkmon already cleared MIICmd */ | |
1552 | iowrite8(phy_id, ioaddr + MIIPhyAddr); | |
1553 | iowrite8(regnum, ioaddr + MIIRegAddr); | |
1554 | iowrite16(value, ioaddr + MIIData); | |
1555 | iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ | |
a384a33b | 1556 | rhine_wait_bit_low(rp, MIICmd, 0x20); |
1da177e4 | 1557 | |
a384a33b | 1558 | rhine_enable_linkmon(rp); |
1da177e4 LT |
1559 | } |
1560 | ||
7ab87ff4 FR |
1561 | static void rhine_task_disable(struct rhine_private *rp) |
1562 | { | |
1563 | mutex_lock(&rp->task_lock); | |
1564 | rp->task_enable = false; | |
1565 | mutex_unlock(&rp->task_lock); | |
1566 | ||
1567 | cancel_work_sync(&rp->slow_event_task); | |
1568 | cancel_work_sync(&rp->reset_task); | |
1569 | } | |
1570 | ||
1571 | static void rhine_task_enable(struct rhine_private *rp) | |
1572 | { | |
1573 | mutex_lock(&rp->task_lock); | |
1574 | rp->task_enable = true; | |
1575 | mutex_unlock(&rp->task_lock); | |
1576 | } | |
1577 | ||
1da177e4 LT |
1578 | static int rhine_open(struct net_device *dev) |
1579 | { | |
1580 | struct rhine_private *rp = netdev_priv(dev); | |
1581 | void __iomem *ioaddr = rp->base; | |
1582 | int rc; | |
1583 | ||
76781382 | 1584 | rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, |
1da177e4 LT |
1585 | dev); |
1586 | if (rc) | |
1587 | return rc; | |
1588 | ||
fc3e0f8a | 1589 | netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq); |
1da177e4 LT |
1590 | |
1591 | rc = alloc_ring(dev); | |
1592 | if (rc) { | |
1593 | free_irq(rp->pdev->irq, dev); | |
1594 | return rc; | |
1595 | } | |
1596 | alloc_rbufs(dev); | |
1597 | alloc_tbufs(dev); | |
1598 | rhine_chip_reset(dev); | |
7ab87ff4 | 1599 | rhine_task_enable(rp); |
1da177e4 | 1600 | init_registers(dev); |
fc3e0f8a FR |
1601 | |
1602 | netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", | |
1603 | __func__, ioread16(ioaddr + ChipCmd), | |
1604 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | |
1da177e4 LT |
1605 | |
1606 | netif_start_queue(dev); | |
1607 | ||
1608 | return 0; | |
1609 | } | |
1610 | ||
c0d7a021 | 1611 | static void rhine_reset_task(struct work_struct *work) |
1da177e4 | 1612 | { |
c0d7a021 JP |
1613 | struct rhine_private *rp = container_of(work, struct rhine_private, |
1614 | reset_task); | |
1615 | struct net_device *dev = rp->dev; | |
1da177e4 | 1616 | |
7ab87ff4 | 1617 | mutex_lock(&rp->task_lock); |
1da177e4 | 1618 | |
7ab87ff4 FR |
1619 | if (!rp->task_enable) |
1620 | goto out_unlock; | |
bea3348e | 1621 | |
7ab87ff4 | 1622 | napi_disable(&rp->napi); |
a926592f | 1623 | netif_tx_disable(dev); |
c0d7a021 | 1624 | spin_lock_bh(&rp->lock); |
1da177e4 LT |
1625 | |
1626 | /* clear all descriptors */ | |
1627 | free_tbufs(dev); | |
1628 | free_rbufs(dev); | |
1629 | alloc_tbufs(dev); | |
1630 | alloc_rbufs(dev); | |
1631 | ||
1632 | /* Reinitialize the hardware. */ | |
1633 | rhine_chip_reset(dev); | |
1634 | init_registers(dev); | |
1635 | ||
c0d7a021 | 1636 | spin_unlock_bh(&rp->lock); |
1da177e4 | 1637 | |
1ae5dc34 | 1638 | dev->trans_start = jiffies; /* prevent tx timeout */ |
553e2335 | 1639 | dev->stats.tx_errors++; |
1da177e4 | 1640 | netif_wake_queue(dev); |
7ab87ff4 FR |
1641 | |
1642 | out_unlock: | |
1643 | mutex_unlock(&rp->task_lock); | |
1da177e4 LT |
1644 | } |
1645 | ||
c0d7a021 JP |
1646 | static void rhine_tx_timeout(struct net_device *dev) |
1647 | { | |
1648 | struct rhine_private *rp = netdev_priv(dev); | |
1649 | void __iomem *ioaddr = rp->base; | |
1650 | ||
df4511fe JP |
1651 | netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", |
1652 | ioread16(ioaddr + IntrStatus), | |
1653 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | |
c0d7a021 JP |
1654 | |
1655 | schedule_work(&rp->reset_task); | |
1656 | } | |
1657 | ||
61357325 SH |
1658 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
1659 | struct net_device *dev) | |
1da177e4 LT |
1660 | { |
1661 | struct rhine_private *rp = netdev_priv(dev); | |
1662 | void __iomem *ioaddr = rp->base; | |
1663 | unsigned entry; | |
1664 | ||
1665 | /* Caution: the write order is important here, set the field | |
1666 | with the "ownership" bits last. */ | |
1667 | ||
1668 | /* Calculate the next Tx descriptor entry. */ | |
1669 | entry = rp->cur_tx % TX_RING_SIZE; | |
1670 | ||
5b057c6b | 1671 | if (skb_padto(skb, ETH_ZLEN)) |
6ed10654 | 1672 | return NETDEV_TX_OK; |
1da177e4 LT |
1673 | |
1674 | rp->tx_skbuff[entry] = skb; | |
1675 | ||
1676 | if ((rp->quirks & rqRhineI) && | |
84fa7933 | 1677 | (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { |
1da177e4 LT |
1678 | /* Must use alignment buffer. */ |
1679 | if (skb->len > PKT_BUF_SZ) { | |
1680 | /* packet too long, drop it */ | |
1681 | dev_kfree_skb(skb); | |
1682 | rp->tx_skbuff[entry] = NULL; | |
553e2335 | 1683 | dev->stats.tx_dropped++; |
6ed10654 | 1684 | return NETDEV_TX_OK; |
1da177e4 | 1685 | } |
3e0d167a CB |
1686 | |
1687 | /* Padding is not copied and so must be redone. */ | |
1da177e4 | 1688 | skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); |
3e0d167a CB |
1689 | if (skb->len < ETH_ZLEN) |
1690 | memset(rp->tx_buf[entry] + skb->len, 0, | |
1691 | ETH_ZLEN - skb->len); | |
1da177e4 LT |
1692 | rp->tx_skbuff_dma[entry] = 0; |
1693 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + | |
1694 | (rp->tx_buf[entry] - | |
1695 | rp->tx_bufs)); | |
1696 | } else { | |
1697 | rp->tx_skbuff_dma[entry] = | |
1698 | pci_map_single(rp->pdev, skb->data, skb->len, | |
1699 | PCI_DMA_TODEVICE); | |
9b4fe5fb NH |
1700 | if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { |
1701 | dev_kfree_skb(skb); | |
1702 | rp->tx_skbuff_dma[entry] = 0; | |
1703 | dev->stats.tx_dropped++; | |
1704 | return NETDEV_TX_OK; | |
1705 | } | |
1da177e4 LT |
1706 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); |
1707 | } | |
1708 | ||
1709 | rp->tx_ring[entry].desc_length = | |
1710 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); | |
1711 | ||
38f49e88 | 1712 | if (unlikely(vlan_tx_tag_present(skb))) { |
207070f5 RL |
1713 | u16 vid_pcp = vlan_tx_tag_get(skb); |
1714 | ||
1715 | /* drop CFI/DEI bit, register needs VID and PCP */ | |
1716 | vid_pcp = (vid_pcp & VLAN_VID_MASK) | | |
1717 | ((vid_pcp & VLAN_PRIO_MASK) >> 1); | |
1718 | rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); | |
38f49e88 RL |
1719 | /* request tagging */ |
1720 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); | |
1721 | } | |
1722 | else | |
1723 | rp->tx_ring[entry].tx_status = 0; | |
1724 | ||
1da177e4 | 1725 | /* lock eth irq */ |
1da177e4 | 1726 | wmb(); |
38f49e88 | 1727 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); |
1da177e4 LT |
1728 | wmb(); |
1729 | ||
1730 | rp->cur_tx++; | |
1731 | ||
1732 | /* Non-x86 Todo: explicitly flush cache lines here. */ | |
1733 | ||
38f49e88 RL |
1734 | if (vlan_tx_tag_present(skb)) |
1735 | /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ | |
1736 | BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); | |
1737 | ||
1da177e4 LT |
1738 | /* Wake the potentially-idle transmit channel */ |
1739 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, | |
1740 | ioaddr + ChipCmd1); | |
1741 | IOSYNC; | |
1742 | ||
1743 | if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) | |
1744 | netif_stop_queue(dev); | |
1745 | ||
fc3e0f8a FR |
1746 | netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", |
1747 | rp->cur_tx - 1, entry); | |
1748 | ||
6ed10654 | 1749 | return NETDEV_TX_OK; |
1da177e4 LT |
1750 | } |
1751 | ||
7ab87ff4 FR |
1752 | static void rhine_irq_disable(struct rhine_private *rp) |
1753 | { | |
1754 | iowrite16(0x0000, rp->base + IntrEnable); | |
1755 | mmiowb(); | |
1756 | } | |
1757 | ||
1da177e4 LT |
1758 | /* The interrupt handler does all of the Rx thread work and cleans up |
1759 | after the Tx thread. */ | |
7d12e780 | 1760 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance) |
1da177e4 LT |
1761 | { |
1762 | struct net_device *dev = dev_instance; | |
1763 | struct rhine_private *rp = netdev_priv(dev); | |
7ab87ff4 | 1764 | u32 status; |
1da177e4 LT |
1765 | int handled = 0; |
1766 | ||
7ab87ff4 | 1767 | status = rhine_get_events(rp); |
1da177e4 | 1768 | |
fc3e0f8a | 1769 | netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); |
633949a1 | 1770 | |
7ab87ff4 FR |
1771 | if (status & RHINE_EVENT) { |
1772 | handled = 1; | |
1da177e4 | 1773 | |
7ab87ff4 FR |
1774 | rhine_irq_disable(rp); |
1775 | napi_schedule(&rp->napi); | |
1776 | } | |
1da177e4 | 1777 | |
7ab87ff4 | 1778 | if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { |
fc3e0f8a FR |
1779 | netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", |
1780 | status); | |
1da177e4 LT |
1781 | } |
1782 | ||
1da177e4 LT |
1783 | return IRQ_RETVAL(handled); |
1784 | } | |
1785 | ||
1786 | /* This routine is logically part of the interrupt handler, but isolated | |
1787 | for clarity. */ | |
1788 | static void rhine_tx(struct net_device *dev) | |
1789 | { | |
1790 | struct rhine_private *rp = netdev_priv(dev); | |
1791 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; | |
1792 | ||
1da177e4 LT |
1793 | /* find and cleanup dirty tx descriptors */ |
1794 | while (rp->dirty_tx != rp->cur_tx) { | |
1795 | txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); | |
fc3e0f8a FR |
1796 | netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", |
1797 | entry, txstatus); | |
1da177e4 LT |
1798 | if (txstatus & DescOwn) |
1799 | break; | |
1800 | if (txstatus & 0x8000) { | |
fc3e0f8a FR |
1801 | netif_dbg(rp, tx_done, dev, |
1802 | "Transmit error, Tx status %08x\n", txstatus); | |
553e2335 ED |
1803 | dev->stats.tx_errors++; |
1804 | if (txstatus & 0x0400) | |
1805 | dev->stats.tx_carrier_errors++; | |
1806 | if (txstatus & 0x0200) | |
1807 | dev->stats.tx_window_errors++; | |
1808 | if (txstatus & 0x0100) | |
1809 | dev->stats.tx_aborted_errors++; | |
1810 | if (txstatus & 0x0080) | |
1811 | dev->stats.tx_heartbeat_errors++; | |
1da177e4 LT |
1812 | if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || |
1813 | (txstatus & 0x0800) || (txstatus & 0x1000)) { | |
553e2335 | 1814 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
1815 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); |
1816 | break; /* Keep the skb - we try again */ | |
1817 | } | |
1818 | /* Transmitter restarted in 'abnormal' handler. */ | |
1819 | } else { | |
1820 | if (rp->quirks & rqRhineI) | |
553e2335 | 1821 | dev->stats.collisions += (txstatus >> 3) & 0x0F; |
1da177e4 | 1822 | else |
553e2335 | 1823 | dev->stats.collisions += txstatus & 0x0F; |
fc3e0f8a FR |
1824 | netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", |
1825 | (txstatus >> 3) & 0xF, txstatus & 0xF); | |
f7b5d1b9 JG |
1826 | |
1827 | u64_stats_update_begin(&rp->tx_stats.syncp); | |
1828 | rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; | |
1829 | rp->tx_stats.packets++; | |
1830 | u64_stats_update_end(&rp->tx_stats.syncp); | |
1da177e4 LT |
1831 | } |
1832 | /* Free the original skb. */ | |
1833 | if (rp->tx_skbuff_dma[entry]) { | |
1834 | pci_unmap_single(rp->pdev, | |
1835 | rp->tx_skbuff_dma[entry], | |
1836 | rp->tx_skbuff[entry]->len, | |
1837 | PCI_DMA_TODEVICE); | |
1838 | } | |
559bcac3 | 1839 | dev_kfree_skb(rp->tx_skbuff[entry]); |
1da177e4 LT |
1840 | rp->tx_skbuff[entry] = NULL; |
1841 | entry = (++rp->dirty_tx) % TX_RING_SIZE; | |
1842 | } | |
1843 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) | |
1844 | netif_wake_queue(dev); | |
1da177e4 LT |
1845 | } |
1846 | ||
38f49e88 RL |
1847 | /** |
1848 | * rhine_get_vlan_tci - extract TCI from Rx data buffer | |
1849 | * @skb: pointer to sk_buff | |
1850 | * @data_size: used data area of the buffer including CRC | |
1851 | * | |
1852 | * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q | |
1853 | * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte | |
1854 | * aligned following the CRC. | |
1855 | */ | |
1856 | static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) | |
1857 | { | |
1858 | u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; | |
4562b2fe | 1859 | return be16_to_cpup((__be16 *)trailer); |
38f49e88 RL |
1860 | } |
1861 | ||
633949a1 RL |
1862 | /* Process up to limit frames from receive ring */ |
1863 | static int rhine_rx(struct net_device *dev, int limit) | |
1da177e4 LT |
1864 | { |
1865 | struct rhine_private *rp = netdev_priv(dev); | |
633949a1 | 1866 | int count; |
1da177e4 | 1867 | int entry = rp->cur_rx % RX_RING_SIZE; |
1da177e4 | 1868 | |
fc3e0f8a FR |
1869 | netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, |
1870 | entry, le32_to_cpu(rp->rx_head_desc->rx_status)); | |
1da177e4 LT |
1871 | |
1872 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | |
633949a1 | 1873 | for (count = 0; count < limit; ++count) { |
1da177e4 LT |
1874 | struct rx_desc *desc = rp->rx_head_desc; |
1875 | u32 desc_status = le32_to_cpu(desc->rx_status); | |
38f49e88 | 1876 | u32 desc_length = le32_to_cpu(desc->desc_length); |
1da177e4 LT |
1877 | int data_size = desc_status >> 16; |
1878 | ||
633949a1 RL |
1879 | if (desc_status & DescOwn) |
1880 | break; | |
1881 | ||
fc3e0f8a FR |
1882 | netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, |
1883 | desc_status); | |
633949a1 | 1884 | |
1da177e4 LT |
1885 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { |
1886 | if ((desc_status & RxWholePkt) != RxWholePkt) { | |
df4511fe JP |
1887 | netdev_warn(dev, |
1888 | "Oversized Ethernet frame spanned multiple buffers, " | |
1889 | "entry %#x length %d status %08x!\n", | |
1890 | entry, data_size, | |
1891 | desc_status); | |
1892 | netdev_warn(dev, | |
1893 | "Oversized Ethernet frame %p vs %p\n", | |
1894 | rp->rx_head_desc, | |
1895 | &rp->rx_ring[entry]); | |
553e2335 | 1896 | dev->stats.rx_length_errors++; |
1da177e4 LT |
1897 | } else if (desc_status & RxErr) { |
1898 | /* There was a error. */ | |
fc3e0f8a FR |
1899 | netif_dbg(rp, rx_err, dev, |
1900 | "%s() Rx error %08x\n", __func__, | |
1901 | desc_status); | |
553e2335 ED |
1902 | dev->stats.rx_errors++; |
1903 | if (desc_status & 0x0030) | |
1904 | dev->stats.rx_length_errors++; | |
1905 | if (desc_status & 0x0048) | |
1906 | dev->stats.rx_fifo_errors++; | |
1907 | if (desc_status & 0x0004) | |
1908 | dev->stats.rx_frame_errors++; | |
1da177e4 LT |
1909 | if (desc_status & 0x0002) { |
1910 | /* this can also be updated outside the interrupt handler */ | |
1911 | spin_lock(&rp->lock); | |
553e2335 | 1912 | dev->stats.rx_crc_errors++; |
1da177e4 LT |
1913 | spin_unlock(&rp->lock); |
1914 | } | |
1915 | } | |
1916 | } else { | |
89d71a66 | 1917 | struct sk_buff *skb = NULL; |
1da177e4 LT |
1918 | /* Length should omit the CRC */ |
1919 | int pkt_len = data_size - 4; | |
38f49e88 | 1920 | u16 vlan_tci = 0; |
1da177e4 LT |
1921 | |
1922 | /* Check if the packet is long enough to accept without | |
1923 | copying to a minimally-sized skbuff. */ | |
89d71a66 ED |
1924 | if (pkt_len < rx_copybreak) |
1925 | skb = netdev_alloc_skb_ip_align(dev, pkt_len); | |
1926 | if (skb) { | |
1da177e4 LT |
1927 | pci_dma_sync_single_for_cpu(rp->pdev, |
1928 | rp->rx_skbuff_dma[entry], | |
1929 | rp->rx_buf_sz, | |
1930 | PCI_DMA_FROMDEVICE); | |
1931 | ||
8c7b7faa | 1932 | skb_copy_to_linear_data(skb, |
689be439 | 1933 | rp->rx_skbuff[entry]->data, |
8c7b7faa | 1934 | pkt_len); |
1da177e4 LT |
1935 | skb_put(skb, pkt_len); |
1936 | pci_dma_sync_single_for_device(rp->pdev, | |
1937 | rp->rx_skbuff_dma[entry], | |
1938 | rp->rx_buf_sz, | |
1939 | PCI_DMA_FROMDEVICE); | |
1940 | } else { | |
1941 | skb = rp->rx_skbuff[entry]; | |
1942 | if (skb == NULL) { | |
df4511fe | 1943 | netdev_err(dev, "Inconsistent Rx descriptor chain\n"); |
1da177e4 LT |
1944 | break; |
1945 | } | |
1946 | rp->rx_skbuff[entry] = NULL; | |
1947 | skb_put(skb, pkt_len); | |
1948 | pci_unmap_single(rp->pdev, | |
1949 | rp->rx_skbuff_dma[entry], | |
1950 | rp->rx_buf_sz, | |
1951 | PCI_DMA_FROMDEVICE); | |
1952 | } | |
38f49e88 RL |
1953 | |
1954 | if (unlikely(desc_length & DescTag)) | |
1955 | vlan_tci = rhine_get_vlan_tci(skb, data_size); | |
1956 | ||
1da177e4 | 1957 | skb->protocol = eth_type_trans(skb, dev); |
38f49e88 RL |
1958 | |
1959 | if (unlikely(desc_length & DescTag)) | |
86a9bad3 | 1960 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); |
633949a1 | 1961 | netif_receive_skb(skb); |
f7b5d1b9 JG |
1962 | |
1963 | u64_stats_update_begin(&rp->rx_stats.syncp); | |
1964 | rp->rx_stats.bytes += pkt_len; | |
1965 | rp->rx_stats.packets++; | |
1966 | u64_stats_update_end(&rp->rx_stats.syncp); | |
1da177e4 LT |
1967 | } |
1968 | entry = (++rp->cur_rx) % RX_RING_SIZE; | |
1969 | rp->rx_head_desc = &rp->rx_ring[entry]; | |
1970 | } | |
1971 | ||
1972 | /* Refill the Rx ring buffers. */ | |
1973 | for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { | |
1974 | struct sk_buff *skb; | |
1975 | entry = rp->dirty_rx % RX_RING_SIZE; | |
1976 | if (rp->rx_skbuff[entry] == NULL) { | |
b26b555a | 1977 | skb = netdev_alloc_skb(dev, rp->rx_buf_sz); |
1da177e4 LT |
1978 | rp->rx_skbuff[entry] = skb; |
1979 | if (skb == NULL) | |
1980 | break; /* Better luck next round. */ | |
1da177e4 | 1981 | rp->rx_skbuff_dma[entry] = |
689be439 | 1982 | pci_map_single(rp->pdev, skb->data, |
1da177e4 LT |
1983 | rp->rx_buf_sz, |
1984 | PCI_DMA_FROMDEVICE); | |
9b4fe5fb NH |
1985 | if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) { |
1986 | dev_kfree_skb(skb); | |
1987 | rp->rx_skbuff_dma[entry] = 0; | |
1988 | break; | |
1989 | } | |
1da177e4 LT |
1990 | rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); |
1991 | } | |
1992 | rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); | |
1993 | } | |
633949a1 RL |
1994 | |
1995 | return count; | |
1da177e4 LT |
1996 | } |
1997 | ||
1da177e4 LT |
1998 | static void rhine_restart_tx(struct net_device *dev) { |
1999 | struct rhine_private *rp = netdev_priv(dev); | |
2000 | void __iomem *ioaddr = rp->base; | |
2001 | int entry = rp->dirty_tx % TX_RING_SIZE; | |
2002 | u32 intr_status; | |
2003 | ||
2004 | /* | |
25985edc | 2005 | * If new errors occurred, we need to sort them out before doing Tx. |
1da177e4 LT |
2006 | * In that case the ISR will be back here RSN anyway. |
2007 | */ | |
a20a28bc | 2008 | intr_status = rhine_get_events(rp); |
1da177e4 LT |
2009 | |
2010 | if ((intr_status & IntrTxErrSummary) == 0) { | |
2011 | ||
2012 | /* We know better than the chip where it should continue. */ | |
2013 | iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), | |
2014 | ioaddr + TxRingPtr); | |
2015 | ||
2016 | iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, | |
2017 | ioaddr + ChipCmd); | |
38f49e88 RL |
2018 | |
2019 | if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) | |
2020 | /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ | |
2021 | BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); | |
2022 | ||
1da177e4 LT |
2023 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, |
2024 | ioaddr + ChipCmd1); | |
2025 | IOSYNC; | |
2026 | } | |
2027 | else { | |
2028 | /* This should never happen */ | |
fc3e0f8a FR |
2029 | netif_warn(rp, tx_err, dev, "another error occurred %08x\n", |
2030 | intr_status); | |
1da177e4 LT |
2031 | } |
2032 | ||
2033 | } | |
2034 | ||
7ab87ff4 | 2035 | static void rhine_slow_event_task(struct work_struct *work) |
1da177e4 | 2036 | { |
7ab87ff4 FR |
2037 | struct rhine_private *rp = |
2038 | container_of(work, struct rhine_private, slow_event_task); | |
2039 | struct net_device *dev = rp->dev; | |
2040 | u32 intr_status; | |
1da177e4 | 2041 | |
7ab87ff4 FR |
2042 | mutex_lock(&rp->task_lock); |
2043 | ||
2044 | if (!rp->task_enable) | |
2045 | goto out_unlock; | |
2046 | ||
2047 | intr_status = rhine_get_events(rp); | |
2048 | rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); | |
1da177e4 LT |
2049 | |
2050 | if (intr_status & IntrLinkChange) | |
38bb6b28 | 2051 | rhine_check_media(dev, 0); |
1da177e4 | 2052 | |
fc3e0f8a FR |
2053 | if (intr_status & IntrPCIErr) |
2054 | netif_warn(rp, hw, dev, "PCI error\n"); | |
2055 | ||
559bcac3 | 2056 | iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); |
1da177e4 | 2057 | |
7ab87ff4 FR |
2058 | out_unlock: |
2059 | mutex_unlock(&rp->task_lock); | |
1da177e4 LT |
2060 | } |
2061 | ||
f7b5d1b9 JG |
2062 | static struct rtnl_link_stats64 * |
2063 | rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
1da177e4 LT |
2064 | { |
2065 | struct rhine_private *rp = netdev_priv(dev); | |
f7b5d1b9 | 2066 | unsigned int start; |
1da177e4 | 2067 | |
7ab87ff4 FR |
2068 | spin_lock_bh(&rp->lock); |
2069 | rhine_update_rx_crc_and_missed_errord(rp); | |
2070 | spin_unlock_bh(&rp->lock); | |
1da177e4 | 2071 | |
f7b5d1b9 JG |
2072 | netdev_stats_to_stats64(stats, &dev->stats); |
2073 | ||
2074 | do { | |
2075 | start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); | |
2076 | stats->rx_packets = rp->rx_stats.packets; | |
2077 | stats->rx_bytes = rp->rx_stats.bytes; | |
2078 | } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); | |
2079 | ||
2080 | do { | |
2081 | start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); | |
2082 | stats->tx_packets = rp->tx_stats.packets; | |
2083 | stats->tx_bytes = rp->tx_stats.bytes; | |
2084 | } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); | |
2085 | ||
2086 | return stats; | |
1da177e4 LT |
2087 | } |
2088 | ||
2089 | static void rhine_set_rx_mode(struct net_device *dev) | |
2090 | { | |
2091 | struct rhine_private *rp = netdev_priv(dev); | |
2092 | void __iomem *ioaddr = rp->base; | |
2093 | u32 mc_filter[2]; /* Multicast hash filter */ | |
38f49e88 RL |
2094 | u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ |
2095 | struct netdev_hw_addr *ha; | |
1da177e4 LT |
2096 | |
2097 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | |
1da177e4 LT |
2098 | rx_mode = 0x1C; |
2099 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); | |
2100 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); | |
4cd24eaf | 2101 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || |
8e95a202 | 2102 | (dev->flags & IFF_ALLMULTI)) { |
1da177e4 LT |
2103 | /* Too many to match, or accept all multicasts. */ |
2104 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); | |
2105 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); | |
38f49e88 RL |
2106 | } else if (rp->pdev->revision >= VT6105M) { |
2107 | int i = 0; | |
2108 | u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ | |
2109 | netdev_for_each_mc_addr(ha, dev) { | |
2110 | if (i == MCAM_SIZE) | |
2111 | break; | |
2112 | rhine_set_cam(ioaddr, i, ha->addr); | |
2113 | mCAMmask |= 1 << i; | |
2114 | i++; | |
2115 | } | |
2116 | rhine_set_cam_mask(ioaddr, mCAMmask); | |
1da177e4 | 2117 | } else { |
1da177e4 | 2118 | memset(mc_filter, 0, sizeof(mc_filter)); |
22bedad3 JP |
2119 | netdev_for_each_mc_addr(ha, dev) { |
2120 | int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; | |
1da177e4 LT |
2121 | |
2122 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | |
2123 | } | |
2124 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); | |
2125 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); | |
1da177e4 | 2126 | } |
38f49e88 RL |
2127 | /* enable/disable VLAN receive filtering */ |
2128 | if (rp->pdev->revision >= VT6105M) { | |
2129 | if (dev->flags & IFF_PROMISC) | |
2130 | BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); | |
2131 | else | |
2132 | BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); | |
2133 | } | |
2134 | BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); | |
1da177e4 LT |
2135 | } |
2136 | ||
2137 | static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
2138 | { | |
2139 | struct rhine_private *rp = netdev_priv(dev); | |
2140 | ||
23020ab3 RJ |
2141 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
2142 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
2143 | strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); | |
1da177e4 LT |
2144 | } |
2145 | ||
2146 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
2147 | { | |
2148 | struct rhine_private *rp = netdev_priv(dev); | |
2149 | int rc; | |
2150 | ||
7ab87ff4 | 2151 | mutex_lock(&rp->task_lock); |
1da177e4 | 2152 | rc = mii_ethtool_gset(&rp->mii_if, cmd); |
7ab87ff4 | 2153 | mutex_unlock(&rp->task_lock); |
1da177e4 LT |
2154 | |
2155 | return rc; | |
2156 | } | |
2157 | ||
2158 | static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
2159 | { | |
2160 | struct rhine_private *rp = netdev_priv(dev); | |
2161 | int rc; | |
2162 | ||
7ab87ff4 | 2163 | mutex_lock(&rp->task_lock); |
1da177e4 | 2164 | rc = mii_ethtool_sset(&rp->mii_if, cmd); |
00b428c2 | 2165 | rhine_set_carrier(&rp->mii_if); |
7ab87ff4 | 2166 | mutex_unlock(&rp->task_lock); |
1da177e4 LT |
2167 | |
2168 | return rc; | |
2169 | } | |
2170 | ||
2171 | static int netdev_nway_reset(struct net_device *dev) | |
2172 | { | |
2173 | struct rhine_private *rp = netdev_priv(dev); | |
2174 | ||
2175 | return mii_nway_restart(&rp->mii_if); | |
2176 | } | |
2177 | ||
2178 | static u32 netdev_get_link(struct net_device *dev) | |
2179 | { | |
2180 | struct rhine_private *rp = netdev_priv(dev); | |
2181 | ||
2182 | return mii_link_ok(&rp->mii_if); | |
2183 | } | |
2184 | ||
2185 | static u32 netdev_get_msglevel(struct net_device *dev) | |
2186 | { | |
fc3e0f8a FR |
2187 | struct rhine_private *rp = netdev_priv(dev); |
2188 | ||
2189 | return rp->msg_enable; | |
1da177e4 LT |
2190 | } |
2191 | ||
2192 | static void netdev_set_msglevel(struct net_device *dev, u32 value) | |
2193 | { | |
fc3e0f8a FR |
2194 | struct rhine_private *rp = netdev_priv(dev); |
2195 | ||
2196 | rp->msg_enable = value; | |
1da177e4 LT |
2197 | } |
2198 | ||
2199 | static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
2200 | { | |
2201 | struct rhine_private *rp = netdev_priv(dev); | |
2202 | ||
2203 | if (!(rp->quirks & rqWOL)) | |
2204 | return; | |
2205 | ||
2206 | spin_lock_irq(&rp->lock); | |
2207 | wol->supported = WAKE_PHY | WAKE_MAGIC | | |
2208 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ | |
2209 | wol->wolopts = rp->wolopts; | |
2210 | spin_unlock_irq(&rp->lock); | |
2211 | } | |
2212 | ||
2213 | static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
2214 | { | |
2215 | struct rhine_private *rp = netdev_priv(dev); | |
2216 | u32 support = WAKE_PHY | WAKE_MAGIC | | |
2217 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ | |
2218 | ||
2219 | if (!(rp->quirks & rqWOL)) | |
2220 | return -EINVAL; | |
2221 | ||
2222 | if (wol->wolopts & ~support) | |
2223 | return -EINVAL; | |
2224 | ||
2225 | spin_lock_irq(&rp->lock); | |
2226 | rp->wolopts = wol->wolopts; | |
2227 | spin_unlock_irq(&rp->lock); | |
2228 | ||
2229 | return 0; | |
2230 | } | |
2231 | ||
7282d491 | 2232 | static const struct ethtool_ops netdev_ethtool_ops = { |
1da177e4 LT |
2233 | .get_drvinfo = netdev_get_drvinfo, |
2234 | .get_settings = netdev_get_settings, | |
2235 | .set_settings = netdev_set_settings, | |
2236 | .nway_reset = netdev_nway_reset, | |
2237 | .get_link = netdev_get_link, | |
2238 | .get_msglevel = netdev_get_msglevel, | |
2239 | .set_msglevel = netdev_set_msglevel, | |
2240 | .get_wol = rhine_get_wol, | |
2241 | .set_wol = rhine_set_wol, | |
1da177e4 LT |
2242 | }; |
2243 | ||
2244 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
2245 | { | |
2246 | struct rhine_private *rp = netdev_priv(dev); | |
2247 | int rc; | |
2248 | ||
2249 | if (!netif_running(dev)) | |
2250 | return -EINVAL; | |
2251 | ||
7ab87ff4 | 2252 | mutex_lock(&rp->task_lock); |
1da177e4 | 2253 | rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); |
00b428c2 | 2254 | rhine_set_carrier(&rp->mii_if); |
7ab87ff4 | 2255 | mutex_unlock(&rp->task_lock); |
1da177e4 LT |
2256 | |
2257 | return rc; | |
2258 | } | |
2259 | ||
2260 | static int rhine_close(struct net_device *dev) | |
2261 | { | |
2262 | struct rhine_private *rp = netdev_priv(dev); | |
2263 | void __iomem *ioaddr = rp->base; | |
2264 | ||
7ab87ff4 | 2265 | rhine_task_disable(rp); |
bea3348e | 2266 | napi_disable(&rp->napi); |
c0d7a021 JP |
2267 | netif_stop_queue(dev); |
2268 | ||
fc3e0f8a FR |
2269 | netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", |
2270 | ioread16(ioaddr + ChipCmd)); | |
1da177e4 LT |
2271 | |
2272 | /* Switch to loopback mode to avoid hardware races. */ | |
2273 | iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); | |
2274 | ||
7ab87ff4 | 2275 | rhine_irq_disable(rp); |
1da177e4 LT |
2276 | |
2277 | /* Stop the chip's Tx and Rx processes. */ | |
2278 | iowrite16(CmdStop, ioaddr + ChipCmd); | |
2279 | ||
1da177e4 LT |
2280 | free_irq(rp->pdev->irq, dev); |
2281 | free_rbufs(dev); | |
2282 | free_tbufs(dev); | |
2283 | free_ring(dev); | |
2284 | ||
2285 | return 0; | |
2286 | } | |
2287 | ||
2288 | ||
76e239e1 | 2289 | static void rhine_remove_one(struct pci_dev *pdev) |
1da177e4 LT |
2290 | { |
2291 | struct net_device *dev = pci_get_drvdata(pdev); | |
2292 | struct rhine_private *rp = netdev_priv(dev); | |
2293 | ||
2294 | unregister_netdev(dev); | |
2295 | ||
2296 | pci_iounmap(pdev, rp->base); | |
2297 | pci_release_regions(pdev); | |
2298 | ||
2299 | free_netdev(dev); | |
2300 | pci_disable_device(pdev); | |
1da177e4 LT |
2301 | } |
2302 | ||
d18c3db5 | 2303 | static void rhine_shutdown (struct pci_dev *pdev) |
1da177e4 | 2304 | { |
1da177e4 LT |
2305 | struct net_device *dev = pci_get_drvdata(pdev); |
2306 | struct rhine_private *rp = netdev_priv(dev); | |
2307 | void __iomem *ioaddr = rp->base; | |
2308 | ||
2309 | if (!(rp->quirks & rqWOL)) | |
2310 | return; /* Nothing to do for non-WOL adapters */ | |
2311 | ||
2312 | rhine_power_init(dev); | |
2313 | ||
2314 | /* Make sure we use pattern 0, 1 and not 4, 5 */ | |
2315 | if (rp->quirks & rq6patterns) | |
f11cf25e | 2316 | iowrite8(0x04, ioaddr + WOLcgClr); |
1da177e4 | 2317 | |
7ab87ff4 FR |
2318 | spin_lock(&rp->lock); |
2319 | ||
1da177e4 LT |
2320 | if (rp->wolopts & WAKE_MAGIC) { |
2321 | iowrite8(WOLmagic, ioaddr + WOLcrSet); | |
2322 | /* | |
2323 | * Turn EEPROM-controlled wake-up back on -- some hardware may | |
2324 | * not cooperate otherwise. | |
2325 | */ | |
2326 | iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); | |
2327 | } | |
2328 | ||
2329 | if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) | |
2330 | iowrite8(WOLbmcast, ioaddr + WOLcgSet); | |
2331 | ||
2332 | if (rp->wolopts & WAKE_PHY) | |
2333 | iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); | |
2334 | ||
2335 | if (rp->wolopts & WAKE_UCAST) | |
2336 | iowrite8(WOLucast, ioaddr + WOLcrSet); | |
2337 | ||
2338 | if (rp->wolopts) { | |
2339 | /* Enable legacy WOL (for old motherboards) */ | |
2340 | iowrite8(0x01, ioaddr + PwcfgSet); | |
2341 | iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); | |
2342 | } | |
2343 | ||
7ab87ff4 FR |
2344 | spin_unlock(&rp->lock); |
2345 | ||
e92b9b3b | 2346 | if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { |
b933b4d9 | 2347 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); |
1da177e4 | 2348 | |
e92b9b3b FR |
2349 | pci_wake_from_d3(pdev, true); |
2350 | pci_set_power_state(pdev, PCI_D3hot); | |
2351 | } | |
1da177e4 LT |
2352 | } |
2353 | ||
e92b9b3b FR |
2354 | #ifdef CONFIG_PM_SLEEP |
2355 | static int rhine_suspend(struct device *device) | |
1da177e4 | 2356 | { |
e92b9b3b | 2357 | struct pci_dev *pdev = to_pci_dev(device); |
1da177e4 LT |
2358 | struct net_device *dev = pci_get_drvdata(pdev); |
2359 | struct rhine_private *rp = netdev_priv(dev); | |
1da177e4 LT |
2360 | |
2361 | if (!netif_running(dev)) | |
2362 | return 0; | |
2363 | ||
7ab87ff4 FR |
2364 | rhine_task_disable(rp); |
2365 | rhine_irq_disable(rp); | |
bea3348e | 2366 | napi_disable(&rp->napi); |
32b0f53e | 2367 | |
1da177e4 | 2368 | netif_device_detach(dev); |
1da177e4 | 2369 | |
d18c3db5 | 2370 | rhine_shutdown(pdev); |
1da177e4 | 2371 | |
1da177e4 LT |
2372 | return 0; |
2373 | } | |
2374 | ||
e92b9b3b | 2375 | static int rhine_resume(struct device *device) |
1da177e4 | 2376 | { |
e92b9b3b | 2377 | struct pci_dev *pdev = to_pci_dev(device); |
1da177e4 LT |
2378 | struct net_device *dev = pci_get_drvdata(pdev); |
2379 | struct rhine_private *rp = netdev_priv(dev); | |
1da177e4 LT |
2380 | |
2381 | if (!netif_running(dev)) | |
2382 | return 0; | |
2383 | ||
1da177e4 LT |
2384 | #ifdef USE_MMIO |
2385 | enable_mmio(rp->pioaddr, rp->quirks); | |
2386 | #endif | |
2387 | rhine_power_init(dev); | |
2388 | free_tbufs(dev); | |
2389 | free_rbufs(dev); | |
2390 | alloc_tbufs(dev); | |
2391 | alloc_rbufs(dev); | |
7ab87ff4 FR |
2392 | rhine_task_enable(rp); |
2393 | spin_lock_bh(&rp->lock); | |
1da177e4 | 2394 | init_registers(dev); |
7ab87ff4 | 2395 | spin_unlock_bh(&rp->lock); |
1da177e4 LT |
2396 | |
2397 | netif_device_attach(dev); | |
2398 | ||
2399 | return 0; | |
2400 | } | |
e92b9b3b FR |
2401 | |
2402 | static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); | |
2403 | #define RHINE_PM_OPS (&rhine_pm_ops) | |
2404 | ||
2405 | #else | |
2406 | ||
2407 | #define RHINE_PM_OPS NULL | |
2408 | ||
2409 | #endif /* !CONFIG_PM_SLEEP */ | |
1da177e4 LT |
2410 | |
2411 | static struct pci_driver rhine_driver = { | |
2412 | .name = DRV_NAME, | |
2413 | .id_table = rhine_pci_tbl, | |
2414 | .probe = rhine_init_one, | |
76e239e1 | 2415 | .remove = rhine_remove_one, |
e92b9b3b FR |
2416 | .shutdown = rhine_shutdown, |
2417 | .driver.pm = RHINE_PM_OPS, | |
1da177e4 LT |
2418 | }; |
2419 | ||
77273eaa | 2420 | static struct dmi_system_id rhine_dmi_table[] __initdata = { |
e84df485 RL |
2421 | { |
2422 | .ident = "EPIA-M", | |
2423 | .matches = { | |
2424 | DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), | |
2425 | DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), | |
2426 | }, | |
2427 | }, | |
2428 | { | |
2429 | .ident = "KV7", | |
2430 | .matches = { | |
2431 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), | |
2432 | DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), | |
2433 | }, | |
2434 | }, | |
2435 | { NULL } | |
2436 | }; | |
1da177e4 LT |
2437 | |
2438 | static int __init rhine_init(void) | |
2439 | { | |
2440 | /* when a module, this is printed whether or not devices are found in probe */ | |
2441 | #ifdef MODULE | |
df4511fe | 2442 | pr_info("%s\n", version); |
1da177e4 | 2443 | #endif |
e84df485 RL |
2444 | if (dmi_check_system(rhine_dmi_table)) { |
2445 | /* these BIOSes fail at PXE boot if chip is in D3 */ | |
eb939922 | 2446 | avoid_D3 = true; |
df4511fe | 2447 | pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); |
e84df485 RL |
2448 | } |
2449 | else if (avoid_D3) | |
df4511fe | 2450 | pr_info("avoid_D3 set\n"); |
e84df485 | 2451 | |
29917620 | 2452 | return pci_register_driver(&rhine_driver); |
1da177e4 LT |
2453 | } |
2454 | ||
2455 | ||
2456 | static void __exit rhine_cleanup(void) | |
2457 | { | |
2458 | pci_unregister_driver(&rhine_driver); | |
2459 | } | |
2460 | ||
2461 | ||
2462 | module_init(rhine_init); | |
2463 | module_exit(rhine_cleanup); |