]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/tulip/de2104x.c
ethtool: Call ethtool's get/set_settings callbacks with cleaned data
[mirror_ubuntu-zesty-kernel.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
5a0e3ad6 45#include <linux/slab.h>
1da177e4
LT
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h>
50#include <asm/unaligned.h>
51
52/* These identify the driver base version and may not be removed. */
53static char version[] =
54KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
55
56MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(DRV_VERSION);
60
61static int debug = -1;
62module_param (debug, int, 0);
63MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
64
65/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
8e95a202
JP
66#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67 defined(CONFIG_SPARC) || defined(__ia64__) || \
68 defined(__sh__) || defined(__mips__)
1da177e4
LT
69static int rx_copybreak = 1518;
70#else
71static int rx_copybreak = 100;
72#endif
73module_param (rx_copybreak, int, 0);
74MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75
76#define PFX DRV_NAME ": "
77
78#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
b77e5228
RS
86/* Descriptor skip length in 32 bit longwords. */
87#ifndef CONFIG_DE2104X_DSL
88#define DSL 0
89#else
90#define DSL CONFIG_DE2104X_DSL
91#endif
92
1da177e4
LT
93#define DE_RX_RING_SIZE 64
94#define DE_TX_RING_SIZE 64
95#define DE_RING_BYTES \
96 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
97 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
98#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
99#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
100#define TX_BUFFS_AVAIL(CP) \
101 (((CP)->tx_tail <= (CP)->tx_head) ? \
102 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
103 (CP)->tx_tail - (CP)->tx_head - 1)
104
105#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
106#define RX_OFFSET 2
107
108#define DE_SETUP_SKB ((struct sk_buff *) 1)
109#define DE_DUMMY_SKB ((struct sk_buff *) 2)
110#define DE_SETUP_FRAME_WORDS 96
111#define DE_EEPROM_WORDS 256
112#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
113#define DE_MAX_MEDIA 5
114
115#define DE_MEDIA_TP_AUTO 0
116#define DE_MEDIA_BNC 1
117#define DE_MEDIA_AUI 2
118#define DE_MEDIA_TP 3
119#define DE_MEDIA_TP_FD 4
120#define DE_MEDIA_INVALID DE_MAX_MEDIA
121#define DE_MEDIA_FIRST 0
122#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
123#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
124
125#define DE_TIMER_LINK (60 * HZ)
126#define DE_TIMER_NO_LINK (5 * HZ)
127
128#define DE_NUM_REGS 16
129#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
130#define DE_REGS_VER 1
131
132/* Time in jiffies before concluding the transmitter is hung. */
133#define TX_TIMEOUT (6*HZ)
134
1da177e4
LT
135/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
136 to support a pre-NWay full-duplex signaling mechanism using short frames.
137 No one knows what it should be, but if left at its default value some
138 10base2(!) packets trigger a full-duplex-request interrupt. */
139#define FULL_DUPLEX_MAGIC 0x6969
140
141enum {
142 /* NIC registers */
143 BusMode = 0x00,
144 TxPoll = 0x08,
145 RxPoll = 0x10,
146 RxRingAddr = 0x18,
147 TxRingAddr = 0x20,
148 MacStatus = 0x28,
149 MacMode = 0x30,
150 IntrMask = 0x38,
151 RxMissed = 0x40,
152 ROMCmd = 0x48,
153 CSR11 = 0x58,
154 SIAStatus = 0x60,
155 CSR13 = 0x68,
156 CSR14 = 0x70,
157 CSR15 = 0x78,
158 PCIPM = 0x40,
159
160 /* BusMode bits */
161 CmdReset = (1 << 0),
162 CacheAlign16 = 0x00008000,
163 BurstLen4 = 0x00000400,
b77e5228 164 DescSkipLen = (DSL << 2),
1da177e4
LT
165
166 /* Rx/TxPoll bits */
167 NormalTxPoll = (1 << 0),
168 NormalRxPoll = (1 << 0),
169
170 /* Tx/Rx descriptor status bits */
171 DescOwn = (1 << 31),
172 RxError = (1 << 15),
173 RxErrLong = (1 << 7),
174 RxErrCRC = (1 << 1),
175 RxErrFIFO = (1 << 0),
176 RxErrRunt = (1 << 11),
177 RxErrFrame = (1 << 14),
178 RingEnd = (1 << 25),
179 FirstFrag = (1 << 29),
180 LastFrag = (1 << 30),
181 TxError = (1 << 15),
182 TxFIFOUnder = (1 << 1),
183 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
184 TxMaxCol = (1 << 8),
185 TxOWC = (1 << 9),
186 TxJabber = (1 << 14),
187 SetupFrame = (1 << 27),
188 TxSwInt = (1 << 31),
189
190 /* MacStatus bits */
191 IntrOK = (1 << 16),
192 IntrErr = (1 << 15),
193 RxIntr = (1 << 6),
194 RxEmpty = (1 << 7),
195 TxIntr = (1 << 0),
196 TxEmpty = (1 << 2),
197 PciErr = (1 << 13),
198 TxState = (1 << 22) | (1 << 21) | (1 << 20),
199 RxState = (1 << 19) | (1 << 18) | (1 << 17),
200 LinkFail = (1 << 12),
201 LinkPass = (1 << 4),
202 RxStopped = (1 << 8),
203 TxStopped = (1 << 1),
204
205 /* MacMode bits */
206 TxEnable = (1 << 13),
207 RxEnable = (1 << 1),
208 RxTx = TxEnable | RxEnable,
209 FullDuplex = (1 << 9),
210 AcceptAllMulticast = (1 << 7),
211 AcceptAllPhys = (1 << 6),
212 BOCnt = (1 << 5),
213 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
214 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
215
216 /* ROMCmd bits */
217 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
218 EE_CS = 0x01, /* EEPROM chip select. */
219 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
220 EE_WRITE_0 = 0x01,
221 EE_WRITE_1 = 0x05,
222 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
223 EE_ENB = (0x4800 | EE_CS),
224
225 /* The EEPROM commands include the alway-set leading bit. */
226 EE_READ_CMD = 6,
227
228 /* RxMissed bits */
229 RxMissedOver = (1 << 16),
230 RxMissedMask = 0xffff,
231
232 /* SROM-related bits */
233 SROMC0InfoLeaf = 27,
234 MediaBlockMask = 0x3f,
235 MediaCustomCSRs = (1 << 6),
f3b197ac 236
1da177e4
LT
237 /* PCIPM bits */
238 PM_Sleep = (1 << 31),
239 PM_Snooze = (1 << 30),
240 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 241
1da177e4
LT
242 /* SIAStatus bits */
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9),
ca9a7835 246 SelPortActive = (1 << 8),
1da177e4
LT
247 LinkFailStatus = (1 << 2),
248 NetCxnErr = (1 << 1),
249};
250
251static const u32 de_intr_mask =
252 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
253 LinkPass | LinkFail | PciErr;
254
255/*
256 * Set the programmable burst length to 4 longwords for all:
257 * DMA errors result without these values. Cache align 16 long.
258 */
b77e5228 259static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
1da177e4
LT
260
261struct de_srom_media_block {
262 u8 opts;
263 u16 csr13;
264 u16 csr14;
265 u16 csr15;
ba2d3587 266} __packed;
1da177e4
LT
267
268struct de_srom_info_leaf {
269 u16 default_media;
270 u8 n_blocks;
271 u8 unused;
ba2d3587 272} __packed;
1da177e4
LT
273
274struct de_desc {
c559a5bc
AV
275 __le32 opts1;
276 __le32 opts2;
277 __le32 addr1;
278 __le32 addr2;
b77e5228
RS
279#if DSL
280 __le32 skip[DSL];
281#endif
1da177e4
LT
282};
283
284struct media_info {
285 u16 type; /* DE_MEDIA_xxx */
286 u16 csr13;
287 u16 csr14;
288 u16 csr15;
289};
290
291struct ring_info {
292 struct sk_buff *skb;
293 dma_addr_t mapping;
294};
295
296struct de_private {
297 unsigned tx_head;
298 unsigned tx_tail;
299 unsigned rx_tail;
300
301 void __iomem *regs;
302 struct net_device *dev;
303 spinlock_t lock;
304
305 struct de_desc *rx_ring;
306 struct de_desc *tx_ring;
307 struct ring_info tx_skb[DE_TX_RING_SIZE];
308 struct ring_info rx_skb[DE_RX_RING_SIZE];
309 unsigned rx_buf_sz;
310 dma_addr_t ring_dma;
311
312 u32 msg_enable;
313
314 struct net_device_stats net_stats;
315
316 struct pci_dev *pdev;
317
318 u16 setup_frame[DE_SETUP_FRAME_WORDS];
319
320 u32 media_type;
321 u32 media_supported;
322 u32 media_advertise;
323 struct media_info media[DE_MAX_MEDIA];
324 struct timer_list media_timer;
325
326 u8 *ee_data;
327 unsigned board_idx;
328 unsigned de21040 : 1;
329 unsigned media_lock : 1;
330};
331
332
333static void de_set_rx_mode (struct net_device *dev);
334static void de_tx (struct de_private *de);
335static void de_clean_rings (struct de_private *de);
336static void de_media_interrupt (struct de_private *de, u32 status);
337static void de21040_media_timer (unsigned long data);
338static void de21041_media_timer (unsigned long data);
339static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
340
341
a3aa1884 342static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
1da177e4
LT
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
344 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
345 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
346 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
347 { },
348};
349MODULE_DEVICE_TABLE(pci, de_pci_tbl);
350
351static const char * const media_name[DE_MAX_MEDIA] = {
352 "10baseT auto",
353 "BNC",
354 "AUI",
355 "10baseT-HD",
356 "10baseT-FD"
357};
358
359/* 21040 transceiver register settings:
360 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
361static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
362static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
363static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
364
365/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
366static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
387a8562 367static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
e0f9c4f3 368/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
387a8562 369static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
1da177e4
LT
370static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
371
372
84cc1535
ML
373#define dr32(reg) ioread32(de->regs + (reg))
374#define dw32(reg, val) iowrite32((val), de->regs + (reg))
1da177e4
LT
375
376
377static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
378 u32 status, u32 len)
379{
380 if (netif_msg_rx_err (de))
381 printk (KERN_DEBUG
382 "%s: rx err, slot %d status 0x%x len %d\n",
383 de->dev->name, rx_tail, status, len);
384
385 if ((status & 0x38000300) != 0x0300) {
386 /* Ingore earlier buffers. */
387 if ((status & 0xffff) != 0x7fff) {
388 if (netif_msg_rx_err(de))
f639dc7d
JP
389 dev_warn(&de->dev->dev,
390 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
391 status);
1da177e4
LT
392 de->net_stats.rx_length_errors++;
393 }
394 } else if (status & RxError) {
395 /* There was a fatal error. */
396 de->net_stats.rx_errors++; /* end of a packet.*/
397 if (status & 0x0890) de->net_stats.rx_length_errors++;
398 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
399 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
400 }
401}
402
403static void de_rx (struct de_private *de)
404{
405 unsigned rx_tail = de->rx_tail;
406 unsigned rx_work = DE_RX_RING_SIZE;
407 unsigned drop = 0;
408 int rc;
409
46578a69 410 while (--rx_work) {
1da177e4
LT
411 u32 status, len;
412 dma_addr_t mapping;
413 struct sk_buff *skb, *copy_skb;
414 unsigned copying_skb, buflen;
415
416 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 417 BUG_ON(!skb);
1da177e4
LT
418 rmb();
419 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
420 if (status & DescOwn)
421 break;
422
423 len = ((status >> 16) & 0x7ff) - 4;
424 mapping = de->rx_skb[rx_tail].mapping;
425
426 if (unlikely(drop)) {
427 de->net_stats.rx_dropped++;
428 goto rx_next;
429 }
430
431 if (unlikely((status & 0x38008300) != 0x0300)) {
432 de_rx_err_acct(de, rx_tail, status, len);
433 goto rx_next;
434 }
435
436 copying_skb = (len <= rx_copybreak);
437
438 if (unlikely(netif_msg_rx_status(de)))
439 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
440 de->dev->name, rx_tail, status, len,
441 copying_skb);
442
443 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
444 copy_skb = dev_alloc_skb (buflen);
445 if (unlikely(!copy_skb)) {
446 de->net_stats.rx_dropped++;
447 drop = 1;
448 rx_work = 100;
449 goto rx_next;
450 }
1da177e4
LT
451
452 if (!copying_skb) {
453 pci_unmap_single(de->pdev, mapping,
454 buflen, PCI_DMA_FROMDEVICE);
455 skb_put(skb, len);
456
457 mapping =
458 de->rx_skb[rx_tail].mapping =
689be439 459 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
460 buflen, PCI_DMA_FROMDEVICE);
461 de->rx_skb[rx_tail].skb = copy_skb;
462 } else {
463 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
464 skb_reserve(copy_skb, RX_OFFSET);
d626f62b
ACM
465 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
466 len);
1da177e4
LT
467 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
468
469 /* We'll reuse the original ring buffer. */
470 skb = copy_skb;
471 }
472
473 skb->protocol = eth_type_trans (skb, de->dev);
474
475 de->net_stats.rx_packets++;
476 de->net_stats.rx_bytes += skb->len;
1da177e4
LT
477 rc = netif_rx (skb);
478 if (rc == NET_RX_DROP)
479 drop = 1;
480
481rx_next:
1da177e4
LT
482 if (rx_tail == (DE_RX_RING_SIZE - 1))
483 de->rx_ring[rx_tail].opts2 =
484 cpu_to_le32(RingEnd | de->rx_buf_sz);
485 else
486 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
487 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
b991d2bc
RS
488 wmb();
489 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
1da177e4
LT
490 rx_tail = NEXT_RX(rx_tail);
491 }
492
493 if (!rx_work)
f639dc7d 494 dev_warn(&de->dev->dev, "rx work limit reached\n");
1da177e4
LT
495
496 de->rx_tail = rx_tail;
497}
498
7d12e780 499static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
500{
501 struct net_device *dev = dev_instance;
8f15ea42 502 struct de_private *de = netdev_priv(dev);
1da177e4
LT
503 u32 status;
504
505 status = dr32(MacStatus);
506 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
507 return IRQ_NONE;
508
509 if (netif_msg_intr(de))
510 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
f639dc7d
JP
511 dev->name, status, dr32(MacMode),
512 de->rx_tail, de->tx_head, de->tx_tail);
1da177e4
LT
513
514 dw32(MacStatus, status);
515
516 if (status & (RxIntr | RxEmpty)) {
517 de_rx(de);
518 if (status & RxEmpty)
519 dw32(RxPoll, NormalRxPoll);
520 }
521
522 spin_lock(&de->lock);
523
524 if (status & (TxIntr | TxEmpty))
525 de_tx(de);
526
527 if (status & (LinkPass | LinkFail))
528 de_media_interrupt(de, status);
529
530 spin_unlock(&de->lock);
531
532 if (status & PciErr) {
533 u16 pci_status;
534
535 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
536 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
f639dc7d
JP
537 dev_err(&de->dev->dev,
538 "PCI bus error, status=%08x, PCI status=%04x\n",
539 status, pci_status);
1da177e4
LT
540 }
541
542 return IRQ_HANDLED;
543}
544
545static void de_tx (struct de_private *de)
546{
547 unsigned tx_head = de->tx_head;
548 unsigned tx_tail = de->tx_tail;
549
550 while (tx_tail != tx_head) {
551 struct sk_buff *skb;
552 u32 status;
553
554 rmb();
555 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
556 if (status & DescOwn)
557 break;
558
559 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 560 BUG_ON(!skb);
1da177e4
LT
561 if (unlikely(skb == DE_DUMMY_SKB))
562 goto next;
563
564 if (unlikely(skb == DE_SETUP_SKB)) {
565 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
566 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
567 goto next;
568 }
569
570 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
571 skb->len, PCI_DMA_TODEVICE);
572
573 if (status & LastFrag) {
574 if (status & TxError) {
575 if (netif_msg_tx_err(de))
576 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
577 de->dev->name, status);
578 de->net_stats.tx_errors++;
579 if (status & TxOWC)
580 de->net_stats.tx_window_errors++;
581 if (status & TxMaxCol)
582 de->net_stats.tx_aborted_errors++;
583 if (status & TxLinkFail)
584 de->net_stats.tx_carrier_errors++;
585 if (status & TxFIFOUnder)
586 de->net_stats.tx_fifo_errors++;
587 } else {
588 de->net_stats.tx_packets++;
589 de->net_stats.tx_bytes += skb->len;
590 if (netif_msg_tx_done(de))
f639dc7d
JP
591 printk(KERN_DEBUG "%s: tx done, slot %d\n",
592 de->dev->name, tx_tail);
1da177e4
LT
593 }
594 dev_kfree_skb_irq(skb);
595 }
596
597next:
598 de->tx_skb[tx_tail].skb = NULL;
599
600 tx_tail = NEXT_TX(tx_tail);
601 }
602
603 de->tx_tail = tx_tail;
604
605 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
606 netif_wake_queue(de->dev);
607}
608
ad096463
SH
609static netdev_tx_t de_start_xmit (struct sk_buff *skb,
610 struct net_device *dev)
1da177e4 611{
8f15ea42 612 struct de_private *de = netdev_priv(dev);
1da177e4
LT
613 unsigned int entry, tx_free;
614 u32 mapping, len, flags = FirstFrag | LastFrag;
615 struct de_desc *txd;
616
617 spin_lock_irq(&de->lock);
618
619 tx_free = TX_BUFFS_AVAIL(de);
620 if (tx_free == 0) {
621 netif_stop_queue(dev);
622 spin_unlock_irq(&de->lock);
5b548140 623 return NETDEV_TX_BUSY;
1da177e4
LT
624 }
625 tx_free--;
626
627 entry = de->tx_head;
628
629 txd = &de->tx_ring[entry];
630
631 len = skb->len;
632 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
633 if (entry == (DE_TX_RING_SIZE - 1))
634 flags |= RingEnd;
635 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
636 flags |= TxSwInt;
637 flags |= len;
638 txd->opts2 = cpu_to_le32(flags);
639 txd->addr1 = cpu_to_le32(mapping);
640
641 de->tx_skb[entry].skb = skb;
642 de->tx_skb[entry].mapping = mapping;
643 wmb();
644
645 txd->opts1 = cpu_to_le32(DescOwn);
646 wmb();
647
648 de->tx_head = NEXT_TX(entry);
649 if (netif_msg_tx_queued(de))
650 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
651 dev->name, entry, skb->len);
652
653 if (tx_free == 0)
654 netif_stop_queue(dev);
655
656 spin_unlock_irq(&de->lock);
657
658 /* Trigger an immediate transmit demand. */
659 dw32(TxPoll, NormalTxPoll);
1da177e4 660
6ed10654 661 return NETDEV_TX_OK;
1da177e4
LT
662}
663
664/* Set or clear the multicast filter for this adaptor.
665 Note that we only use exclusion around actually queueing the
666 new frame, not around filling de->setup_frame. This is non-deterministic
667 when re-entered but still correct. */
668
669#undef set_bit_le
670#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
671
672static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
673{
8f15ea42 674 struct de_private *de = netdev_priv(dev);
1da177e4 675 u16 hash_table[32];
22bedad3 676 struct netdev_hw_addr *ha;
1da177e4
LT
677 int i;
678 u16 *eaddrs;
679
680 memset(hash_table, 0, sizeof(hash_table));
681 set_bit_le(255, hash_table); /* Broadcast entry */
682 /* This should work on big-endian machines as well. */
22bedad3
JP
683 netdev_for_each_mc_addr(ha, dev) {
684 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1da177e4
LT
685
686 set_bit_le(index, hash_table);
4302b67e 687 }
1da177e4 688
4302b67e
JP
689 for (i = 0; i < 32; i++) {
690 *setup_frm++ = hash_table[i];
691 *setup_frm++ = hash_table[i];
1da177e4 692 }
4302b67e 693 setup_frm = &de->setup_frame[13*6];
1da177e4
LT
694
695 /* Fill the final entry with our physical address. */
696 eaddrs = (u16 *)dev->dev_addr;
697 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
698 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
699 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
700}
701
702static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
703{
8f15ea42 704 struct de_private *de = netdev_priv(dev);
22bedad3 705 struct netdev_hw_addr *ha;
1da177e4
LT
706 u16 *eaddrs;
707
708 /* We have <= 14 addresses so we can use the wonderful
709 16 address perfect filtering of the Tulip. */
22bedad3
JP
710 netdev_for_each_mc_addr(ha, dev) {
711 eaddrs = (u16 *) ha->addr;
1da177e4
LT
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
715 }
716 /* Fill the unused entries with the broadcast address. */
4302b67e 717 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1da177e4
LT
718 setup_frm = &de->setup_frame[15*6];
719
720 /* Fill the final entry with our physical address. */
721 eaddrs = (u16 *)dev->dev_addr;
722 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
723 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
724 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
725}
726
727
728static void __de_set_rx_mode (struct net_device *dev)
729{
8f15ea42 730 struct de_private *de = netdev_priv(dev);
1da177e4
LT
731 u32 macmode;
732 unsigned int entry;
733 u32 mapping;
734 struct de_desc *txd;
735 struct de_desc *dummy_txd = NULL;
736
737 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
738
739 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
740 macmode |= AcceptAllMulticast | AcceptAllPhys;
741 goto out;
742 }
743
4cd24eaf 744 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
745 /* Too many to filter well -- accept all multicasts. */
746 macmode |= AcceptAllMulticast;
747 goto out;
748 }
749
750 /* Note that only the low-address shortword of setup_frame is valid!
751 The values are doubled for big-endian architectures. */
4cd24eaf 752 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
1da177e4
LT
753 build_setup_frame_hash (de->setup_frame, dev);
754 else
755 build_setup_frame_perfect (de->setup_frame, dev);
756
757 /*
758 * Now add this frame to the Tx list.
759 */
760
761 entry = de->tx_head;
762
763 /* Avoid a chip errata by prefixing a dummy entry. */
764 if (entry != 0) {
765 de->tx_skb[entry].skb = DE_DUMMY_SKB;
766
767 dummy_txd = &de->tx_ring[entry];
768 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
769 cpu_to_le32(RingEnd) : 0;
770 dummy_txd->addr1 = 0;
771
772 /* Must set DescOwned later to avoid race with chip */
773
774 entry = NEXT_TX(entry);
775 }
776
777 de->tx_skb[entry].skb = DE_SETUP_SKB;
778 de->tx_skb[entry].mapping = mapping =
779 pci_map_single (de->pdev, de->setup_frame,
780 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
781
782 /* Put the setup frame on the Tx list. */
783 txd = &de->tx_ring[entry];
784 if (entry == (DE_TX_RING_SIZE - 1))
785 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
786 else
787 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
788 txd->addr1 = cpu_to_le32(mapping);
789 wmb();
790
791 txd->opts1 = cpu_to_le32(DescOwn);
792 wmb();
793
794 if (dummy_txd) {
795 dummy_txd->opts1 = cpu_to_le32(DescOwn);
796 wmb();
797 }
798
799 de->tx_head = NEXT_TX(entry);
800
1da177e4
LT
801 if (TX_BUFFS_AVAIL(de) == 0)
802 netif_stop_queue(dev);
803
804 /* Trigger an immediate transmit demand. */
805 dw32(TxPoll, NormalTxPoll);
806
807out:
808 if (macmode != dr32(MacMode))
809 dw32(MacMode, macmode);
810}
811
812static void de_set_rx_mode (struct net_device *dev)
813{
814 unsigned long flags;
8f15ea42 815 struct de_private *de = netdev_priv(dev);
1da177e4
LT
816
817 spin_lock_irqsave (&de->lock, flags);
818 __de_set_rx_mode(dev);
819 spin_unlock_irqrestore (&de->lock, flags);
820}
821
822static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
823{
824 if (unlikely(rx_missed & RxMissedOver))
825 de->net_stats.rx_missed_errors += RxMissedMask;
826 else
827 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
828}
829
830static void __de_get_stats(struct de_private *de)
831{
832 u32 tmp = dr32(RxMissed); /* self-clearing */
833
834 de_rx_missed(de, tmp);
835}
836
837static struct net_device_stats *de_get_stats(struct net_device *dev)
838{
8f15ea42 839 struct de_private *de = netdev_priv(dev);
1da177e4
LT
840
841 /* The chip only need report frame silently dropped. */
842 spin_lock_irq(&de->lock);
843 if (netif_running(dev) && netif_device_present(dev))
844 __de_get_stats(de);
845 spin_unlock_irq(&de->lock);
846
847 return &de->net_stats;
848}
849
850static inline int de_is_running (struct de_private *de)
851{
852 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
853}
854
855static void de_stop_rxtx (struct de_private *de)
856{
857 u32 macmode;
69cac988 858 unsigned int i = 1300/100;
1da177e4
LT
859
860 macmode = dr32(MacMode);
861 if (macmode & RxTx) {
862 dw32(MacMode, macmode & ~RxTx);
863 dr32(MacMode);
864 }
865
69cac988
GG
866 /* wait until in-flight frame completes.
867 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
868 * Typically expect this loop to end in < 50 us on 100BT.
869 */
870 while (--i) {
1da177e4
LT
871 if (!de_is_running(de))
872 return;
69cac988 873 udelay(100);
1da177e4 874 }
f3b197ac 875
f639dc7d 876 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
1da177e4
LT
877}
878
879static inline void de_start_rxtx (struct de_private *de)
880{
881 u32 macmode;
882
883 macmode = dr32(MacMode);
884 if ((macmode & RxTx) != RxTx) {
885 dw32(MacMode, macmode | RxTx);
886 dr32(MacMode);
887 }
888}
889
890static void de_stop_hw (struct de_private *de)
891{
892
893 udelay(5);
894 dw32(IntrMask, 0);
895
896 de_stop_rxtx(de);
897
898 dw32(MacStatus, dr32(MacStatus));
899
900 udelay(10);
901
902 de->rx_tail = 0;
903 de->tx_head = de->tx_tail = 0;
904}
905
906static void de_link_up(struct de_private *de)
907{
908 if (!netif_carrier_ok(de->dev)) {
909 netif_carrier_on(de->dev);
910 if (netif_msg_link(de))
f639dc7d
JP
911 dev_info(&de->dev->dev, "link up, media %s\n",
912 media_name[de->media_type]);
1da177e4
LT
913 }
914}
915
916static void de_link_down(struct de_private *de)
917{
918 if (netif_carrier_ok(de->dev)) {
919 netif_carrier_off(de->dev);
920 if (netif_msg_link(de))
f639dc7d 921 dev_info(&de->dev->dev, "link down\n");
1da177e4
LT
922 }
923}
924
925static void de_set_media (struct de_private *de)
926{
927 unsigned media = de->media_type;
928 u32 macmode = dr32(MacMode);
929
f25f0f8d 930 if (de_is_running(de))
f639dc7d
JP
931 dev_warn(&de->dev->dev,
932 "chip is running while changing media!\n");
1da177e4
LT
933
934 if (de->de21040)
935 dw32(CSR11, FULL_DUPLEX_MAGIC);
936 dw32(CSR13, 0); /* Reset phy */
937 dw32(CSR14, de->media[media].csr14);
938 dw32(CSR15, de->media[media].csr15);
939 dw32(CSR13, de->media[media].csr13);
940
941 /* must delay 10ms before writing to other registers,
942 * especially CSR6
943 */
944 mdelay(10);
945
946 if (media == DE_MEDIA_TP_FD)
947 macmode |= FullDuplex;
948 else
949 macmode &= ~FullDuplex;
f3b197ac 950
862ea4f2 951 if (netif_msg_link(de))
f639dc7d 952 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
862ea4f2 953 if (netif_msg_hw(de)) {
f639dc7d
JP
954 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
955 dr32(MacMode), dr32(SIAStatus),
956 dr32(CSR13), dr32(CSR14), dr32(CSR15));
957
958 dev_info(&de->dev->dev,
959 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
960 macmode, de->media[media].csr13,
961 de->media[media].csr14, de->media[media].csr15);
1da177e4
LT
962 }
963 if (macmode != dr32(MacMode))
964 dw32(MacMode, macmode);
965}
966
215faf9c 967static void de_next_media (struct de_private *de, const u32 *media,
1da177e4
LT
968 unsigned int n_media)
969{
970 unsigned int i;
971
972 for (i = 0; i < n_media; i++) {
973 if (de_ok_to_advertise(de, media[i])) {
974 de->media_type = media[i];
975 return;
976 }
977 }
978}
979
980static void de21040_media_timer (unsigned long data)
981{
982 struct de_private *de = (struct de_private *) data;
983 struct net_device *dev = de->dev;
984 u32 status = dr32(SIAStatus);
985 unsigned int carrier;
986 unsigned long flags;
f3b197ac 987
1da177e4 988 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 989
1da177e4
LT
990 if (carrier) {
991 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
992 goto no_link_yet;
993
994 de->media_timer.expires = jiffies + DE_TIMER_LINK;
995 add_timer(&de->media_timer);
996 if (!netif_carrier_ok(dev))
997 de_link_up(de);
998 else
999 if (netif_msg_timer(de))
f639dc7d
JP
1000 dev_info(&dev->dev, "%s link ok, status %x\n",
1001 media_name[de->media_type], status);
1da177e4
LT
1002 return;
1003 }
1004
f3b197ac 1005 de_link_down(de);
1da177e4
LT
1006
1007 if (de->media_lock)
1008 return;
1009
1010 if (de->media_type == DE_MEDIA_AUI) {
215faf9c 1011 static const u32 next_state = DE_MEDIA_TP;
1da177e4
LT
1012 de_next_media(de, &next_state, 1);
1013 } else {
215faf9c 1014 static const u32 next_state = DE_MEDIA_AUI;
1da177e4
LT
1015 de_next_media(de, &next_state, 1);
1016 }
1017
1018 spin_lock_irqsave(&de->lock, flags);
1019 de_stop_rxtx(de);
1020 spin_unlock_irqrestore(&de->lock, flags);
1021 de_set_media(de);
1022 de_start_rxtx(de);
1023
1024no_link_yet:
1025 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1026 add_timer(&de->media_timer);
1027
1028 if (netif_msg_timer(de))
f639dc7d
JP
1029 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1030 media_name[de->media_type], status);
1da177e4
LT
1031}
1032
1033static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1034{
1035 switch (new_media) {
1036 case DE_MEDIA_TP_AUTO:
1037 if (!(de->media_advertise & ADVERTISED_Autoneg))
1038 return 0;
1039 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1040 return 0;
1041 break;
1042 case DE_MEDIA_BNC:
1043 if (!(de->media_advertise & ADVERTISED_BNC))
1044 return 0;
1045 break;
1046 case DE_MEDIA_AUI:
1047 if (!(de->media_advertise & ADVERTISED_AUI))
1048 return 0;
1049 break;
1050 case DE_MEDIA_TP:
1051 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1052 return 0;
1053 break;
1054 case DE_MEDIA_TP_FD:
1055 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1056 return 0;
1057 break;
1058 }
f3b197ac 1059
1da177e4
LT
1060 return 1;
1061}
1062
1063static void de21041_media_timer (unsigned long data)
1064{
1065 struct de_private *de = (struct de_private *) data;
1066 struct net_device *dev = de->dev;
1067 u32 status = dr32(SIAStatus);
1068 unsigned int carrier;
1069 unsigned long flags;
f3b197ac 1070
ca9a7835
OZ
1071 /* clear port active bits */
1072 dw32(SIAStatus, NonselPortActive | SelPortActive);
1073
1da177e4 1074 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1075
1da177e4
LT
1076 if (carrier) {
1077 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1078 de->media_type == DE_MEDIA_TP ||
1079 de->media_type == DE_MEDIA_TP_FD) &&
1080 (status & LinkFailStatus))
1081 goto no_link_yet;
1082
1083 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1084 add_timer(&de->media_timer);
1085 if (!netif_carrier_ok(dev))
1086 de_link_up(de);
1087 else
1088 if (netif_msg_timer(de))
f639dc7d
JP
1089 dev_info(&dev->dev,
1090 "%s link ok, mode %x status %x\n",
1091 media_name[de->media_type],
1092 dr32(MacMode), status);
1da177e4
LT
1093 return;
1094 }
1095
f3b197ac 1096 de_link_down(de);
1da177e4
LT
1097
1098 /* if media type locked, don't switch media */
1099 if (de->media_lock)
1100 goto set_media;
1101
1102 /* if activity detected, use that as hint for new media type */
1103 if (status & NonselPortActive) {
1104 unsigned int have_media = 1;
1105
1106 /* if AUI/BNC selected, then activity is on TP port */
1107 if (de->media_type == DE_MEDIA_AUI ||
1108 de->media_type == DE_MEDIA_BNC) {
1109 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1110 de->media_type = DE_MEDIA_TP_AUTO;
1111 else
1112 have_media = 0;
1113 }
1114
1115 /* TP selected. If there is only TP and BNC, then it's BNC */
1116 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1117 de_ok_to_advertise(de, DE_MEDIA_BNC))
1118 de->media_type = DE_MEDIA_BNC;
1119
1120 /* TP selected. If there is only TP and AUI, then it's AUI */
1121 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1122 de_ok_to_advertise(de, DE_MEDIA_AUI))
1123 de->media_type = DE_MEDIA_AUI;
1124
1125 /* otherwise, ignore the hint */
1126 else
1127 have_media = 0;
1128
1129 if (have_media)
1130 goto set_media;
1131 }
1132
1133 /*
1134 * Absent or ambiguous activity hint, move to next advertised
1135 * media state. If de->media_type is left unchanged, this
1136 * simply resets the PHY and reloads the current media settings.
1137 */
1138 if (de->media_type == DE_MEDIA_AUI) {
215faf9c
JP
1139 static const u32 next_states[] = {
1140 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1141 };
1da177e4
LT
1142 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1143 } else if (de->media_type == DE_MEDIA_BNC) {
215faf9c
JP
1144 static const u32 next_states[] = {
1145 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1146 };
1da177e4
LT
1147 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1148 } else {
215faf9c
JP
1149 static const u32 next_states[] = {
1150 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1151 };
1da177e4
LT
1152 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1153 }
f3b197ac 1154
1da177e4
LT
1155set_media:
1156 spin_lock_irqsave(&de->lock, flags);
1157 de_stop_rxtx(de);
1158 spin_unlock_irqrestore(&de->lock, flags);
1159 de_set_media(de);
1160 de_start_rxtx(de);
1161
1162no_link_yet:
1163 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1164 add_timer(&de->media_timer);
1165
1166 if (netif_msg_timer(de))
f639dc7d
JP
1167 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1168 media_name[de->media_type], status);
1da177e4
LT
1169}
1170
1171static void de_media_interrupt (struct de_private *de, u32 status)
1172{
1173 if (status & LinkPass) {
ca9a7835
OZ
1174 /* Ignore if current media is AUI or BNC and we can't use TP */
1175 if ((de->media_type == DE_MEDIA_AUI ||
1176 de->media_type == DE_MEDIA_BNC) &&
1177 (de->media_lock ||
1178 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1179 return;
1180 /* If current media is not TP, change it to TP */
1181 if ((de->media_type == DE_MEDIA_AUI ||
1182 de->media_type == DE_MEDIA_BNC)) {
1183 de->media_type = DE_MEDIA_TP_AUTO;
1184 de_stop_rxtx(de);
1185 de_set_media(de);
1186 de_start_rxtx(de);
1187 }
1da177e4
LT
1188 de_link_up(de);
1189 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1190 return;
1191 }
f3b197ac 1192
7e0b58f3 1193 BUG_ON(!(status & LinkFail));
ca9a7835
OZ
1194 /* Mark the link as down only if current media is TP */
1195 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1196 de->media_type != DE_MEDIA_BNC) {
1da177e4
LT
1197 de_link_down(de);
1198 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1199 }
1200}
1201
1202static int de_reset_mac (struct de_private *de)
1203{
1204 u32 status, tmp;
1205
1206 /*
1207 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1208 * in this area.
1209 */
1210
1211 if (dr32(BusMode) == 0xffffffff)
1212 return -EBUSY;
1213
1214 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1215 dw32 (BusMode, CmdReset);
1216 mdelay (1);
1217
1218 dw32 (BusMode, de_bus_mode);
1219 mdelay (1);
1220
1221 for (tmp = 0; tmp < 5; tmp++) {
1222 dr32 (BusMode);
1223 mdelay (1);
1224 }
1225
1226 mdelay (1);
1227
1228 status = dr32(MacStatus);
1229 if (status & (RxState | TxState))
1230 return -EBUSY;
1231 if (status == 0xffffffff)
1232 return -ENODEV;
1233 return 0;
1234}
1235
1236static void de_adapter_wake (struct de_private *de)
1237{
1238 u32 pmctl;
1239
1240 if (de->de21040)
1241 return;
1242
1243 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1244 if (pmctl & PM_Mask) {
1245 pmctl &= ~PM_Mask;
1246 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1247
1248 /* de4x5.c delays, so we do too */
1249 msleep(10);
1250 }
1251}
1252
1253static void de_adapter_sleep (struct de_private *de)
1254{
1255 u32 pmctl;
1256
1257 if (de->de21040)
1258 return;
1259
b0255a02 1260 dw32(CSR13, 0); /* Reset phy */
1da177e4
LT
1261 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1262 pmctl |= PM_Sleep;
1263 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1264}
1265
1266static int de_init_hw (struct de_private *de)
1267{
1268 struct net_device *dev = de->dev;
1269 u32 macmode;
1270 int rc;
1271
1272 de_adapter_wake(de);
f3b197ac 1273
1da177e4
LT
1274 macmode = dr32(MacMode) & ~MacModeClear;
1275
1276 rc = de_reset_mac(de);
1277 if (rc)
1278 return rc;
1279
1280 de_set_media(de); /* reset phy */
1281
1282 dw32(RxRingAddr, de->ring_dma);
1283 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1284
1285 dw32(MacMode, RxTx | macmode);
1286
1287 dr32(RxMissed); /* self-clearing */
1288
1289 dw32(IntrMask, de_intr_mask);
1290
1291 de_set_rx_mode(dev);
1292
1293 return 0;
1294}
1295
1296static int de_refill_rx (struct de_private *de)
1297{
1298 unsigned i;
1299
1300 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1301 struct sk_buff *skb;
1302
1303 skb = dev_alloc_skb(de->rx_buf_sz);
1304 if (!skb)
1305 goto err_out;
1306
1307 skb->dev = de->dev;
1308
1309 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1310 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1311 de->rx_skb[i].skb = skb;
1312
1313 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1314 if (i == (DE_RX_RING_SIZE - 1))
1315 de->rx_ring[i].opts2 =
1316 cpu_to_le32(RingEnd | de->rx_buf_sz);
1317 else
1318 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1319 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1320 de->rx_ring[i].addr2 = 0;
1321 }
1322
1323 return 0;
1324
1325err_out:
1326 de_clean_rings(de);
1327 return -ENOMEM;
1328}
1329
1330static int de_init_rings (struct de_private *de)
1331{
1332 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1333 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1334
1335 de->rx_tail = 0;
1336 de->tx_head = de->tx_tail = 0;
1337
1338 return de_refill_rx (de);
1339}
1340
1341static int de_alloc_rings (struct de_private *de)
1342{
1343 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1344 if (!de->rx_ring)
1345 return -ENOMEM;
1346 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1347 return de_init_rings(de);
1348}
1349
1350static void de_clean_rings (struct de_private *de)
1351{
1352 unsigned i;
1353
1354 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1355 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1356 wmb();
1357 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1358 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1359 wmb();
1360
1361 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1362 if (de->rx_skb[i].skb) {
1363 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1364 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1365 dev_kfree_skb(de->rx_skb[i].skb);
1366 }
1367 }
1368
1369 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1370 struct sk_buff *skb = de->tx_skb[i].skb;
1371 if ((skb) && (skb != DE_DUMMY_SKB)) {
1372 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1373 de->net_stats.tx_dropped++;
1374 pci_unmap_single(de->pdev,
1375 de->tx_skb[i].mapping,
1376 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1377 dev_kfree_skb(skb);
1da177e4
LT
1378 } else {
1379 pci_unmap_single(de->pdev,
1380 de->tx_skb[i].mapping,
1381 sizeof(de->setup_frame),
1382 PCI_DMA_TODEVICE);
1383 }
1384 }
1385 }
1386
1387 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1388 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1389}
1390
1391static void de_free_rings (struct de_private *de)
1392{
1393 de_clean_rings(de);
1394 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1395 de->rx_ring = NULL;
1396 de->tx_ring = NULL;
1397}
1398
1399static int de_open (struct net_device *dev)
1400{
8f15ea42 1401 struct de_private *de = netdev_priv(dev);
1da177e4 1402 int rc;
1da177e4
LT
1403
1404 if (netif_msg_ifup(de))
1405 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1406
1407 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1408
1409 rc = de_alloc_rings(de);
1410 if (rc) {
f639dc7d 1411 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1da177e4
LT
1412 return rc;
1413 }
1414
3f735b76 1415 dw32(IntrMask, 0);
1da177e4 1416
1fb9df5d 1417 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4 1418 if (rc) {
f639dc7d
JP
1419 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1420 dev->irq, rc);
3f735b76
FR
1421 goto err_out_free;
1422 }
1423
1424 rc = de_init_hw(de);
1425 if (rc) {
f639dc7d 1426 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
3f735b76 1427 goto err_out_free_irq;
1da177e4
LT
1428 }
1429
1430 netif_start_queue(dev);
1431 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1432
1433 return 0;
1434
3f735b76
FR
1435err_out_free_irq:
1436 free_irq(dev->irq, dev);
1da177e4
LT
1437err_out_free:
1438 de_free_rings(de);
1439 return rc;
1440}
1441
1442static int de_close (struct net_device *dev)
1443{
8f15ea42 1444 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1445 unsigned long flags;
1446
1447 if (netif_msg_ifdown(de))
1448 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1449
1450 del_timer_sync(&de->media_timer);
1451
1452 spin_lock_irqsave(&de->lock, flags);
1453 de_stop_hw(de);
1454 netif_stop_queue(dev);
1455 netif_carrier_off(dev);
1456 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1457
1da177e4
LT
1458 free_irq(dev->irq, dev);
1459
1460 de_free_rings(de);
1461 de_adapter_sleep(de);
1da177e4
LT
1462 return 0;
1463}
1464
1465static void de_tx_timeout (struct net_device *dev)
1466{
8f15ea42 1467 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1468
1469 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1470 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1471 de->rx_tail, de->tx_head, de->tx_tail);
1472
1473 del_timer_sync(&de->media_timer);
1474
1475 disable_irq(dev->irq);
1476 spin_lock_irq(&de->lock);
1477
1478 de_stop_hw(de);
1479 netif_stop_queue(dev);
1480 netif_carrier_off(dev);
1481
1482 spin_unlock_irq(&de->lock);
1483 enable_irq(dev->irq);
f3b197ac 1484
1da177e4
LT
1485 /* Update the error counts. */
1486 __de_get_stats(de);
1487
1488 synchronize_irq(dev->irq);
1489 de_clean_rings(de);
1490
39bf4295
FR
1491 de_init_rings(de);
1492
1da177e4 1493 de_init_hw(de);
f3b197ac 1494
1da177e4
LT
1495 netif_wake_queue(dev);
1496}
1497
1498static void __de_get_regs(struct de_private *de, u8 *buf)
1499{
1500 int i;
1501 u32 *rbuf = (u32 *)buf;
f3b197ac 1502
1da177e4
LT
1503 /* read all CSRs */
1504 for (i = 0; i < DE_NUM_REGS; i++)
1505 rbuf[i] = dr32(i * 8);
1506
1507 /* handle self-clearing RxMissed counter, CSR8 */
1508 de_rx_missed(de, rbuf[8]);
1509}
1510
1511static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1512{
1513 ecmd->supported = de->media_supported;
1514 ecmd->transceiver = XCVR_INTERNAL;
1515 ecmd->phy_address = 0;
1516 ecmd->advertising = de->media_advertise;
f3b197ac 1517
1da177e4
LT
1518 switch (de->media_type) {
1519 case DE_MEDIA_AUI:
1520 ecmd->port = PORT_AUI;
1521 ecmd->speed = 5;
1522 break;
1523 case DE_MEDIA_BNC:
1524 ecmd->port = PORT_BNC;
1525 ecmd->speed = 2;
1526 break;
1527 default:
1528 ecmd->port = PORT_TP;
1529 ecmd->speed = SPEED_10;
1530 break;
1531 }
f3b197ac 1532
1da177e4
LT
1533 if (dr32(MacMode) & FullDuplex)
1534 ecmd->duplex = DUPLEX_FULL;
1535 else
1536 ecmd->duplex = DUPLEX_HALF;
1537
1538 if (de->media_lock)
1539 ecmd->autoneg = AUTONEG_DISABLE;
1540 else
1541 ecmd->autoneg = AUTONEG_ENABLE;
1542
1543 /* ignore maxtxpkt, maxrxpkt for now */
1544
1545 return 0;
1546}
1547
1548static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1549{
1550 u32 new_media;
1551 unsigned int media_lock;
1552
1553 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1554 return -EINVAL;
1555 if (de->de21040 && ecmd->speed == 2)
1556 return -EINVAL;
1557 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1558 return -EINVAL;
1559 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1560 return -EINVAL;
1561 if (de->de21040 && ecmd->port == PORT_BNC)
1562 return -EINVAL;
1563 if (ecmd->transceiver != XCVR_INTERNAL)
1564 return -EINVAL;
1565 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1566 return -EINVAL;
1567 if (ecmd->advertising & ~de->media_supported)
1568 return -EINVAL;
1569 if (ecmd->autoneg == AUTONEG_ENABLE &&
1570 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1571 return -EINVAL;
f3b197ac 1572
1da177e4
LT
1573 switch (ecmd->port) {
1574 case PORT_AUI:
1575 new_media = DE_MEDIA_AUI;
1576 if (!(ecmd->advertising & ADVERTISED_AUI))
1577 return -EINVAL;
1578 break;
1579 case PORT_BNC:
1580 new_media = DE_MEDIA_BNC;
1581 if (!(ecmd->advertising & ADVERTISED_BNC))
1582 return -EINVAL;
1583 break;
1584 default:
1585 if (ecmd->autoneg == AUTONEG_ENABLE)
1586 new_media = DE_MEDIA_TP_AUTO;
1587 else if (ecmd->duplex == DUPLEX_FULL)
1588 new_media = DE_MEDIA_TP_FD;
1589 else
1590 new_media = DE_MEDIA_TP;
1591 if (!(ecmd->advertising & ADVERTISED_TP))
1592 return -EINVAL;
1593 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1594 return -EINVAL;
1595 break;
1596 }
f3b197ac 1597
1da177e4 1598 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1599
1da177e4
LT
1600 if ((new_media == de->media_type) &&
1601 (media_lock == de->media_lock) &&
1602 (ecmd->advertising == de->media_advertise))
1603 return 0; /* nothing to change */
f3b197ac 1604
1da177e4 1605 de_link_down(de);
387a8562 1606 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1da177e4 1607 de_stop_rxtx(de);
f3b197ac 1608
1da177e4
LT
1609 de->media_type = new_media;
1610 de->media_lock = media_lock;
1611 de->media_advertise = ecmd->advertising;
1612 de_set_media(de);
387a8562
OZ
1613 if (netif_running(de->dev))
1614 de_start_rxtx(de);
f3b197ac 1615
1da177e4
LT
1616 return 0;
1617}
1618
1619static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1620{
8f15ea42 1621 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1622
1623 strcpy (info->driver, DRV_NAME);
1624 strcpy (info->version, DRV_VERSION);
1625 strcpy (info->bus_info, pci_name(de->pdev));
1626 info->eedump_len = DE_EEPROM_SIZE;
1627}
1628
1629static int de_get_regs_len(struct net_device *dev)
1630{
1631 return DE_REGS_SIZE;
1632}
1633
1634static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1635{
8f15ea42 1636 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1637 int rc;
1638
1639 spin_lock_irq(&de->lock);
1640 rc = __de_get_settings(de, ecmd);
1641 spin_unlock_irq(&de->lock);
1642
1643 return rc;
1644}
1645
1646static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1647{
8f15ea42 1648 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1649 int rc;
1650
1651 spin_lock_irq(&de->lock);
1652 rc = __de_set_settings(de, ecmd);
1653 spin_unlock_irq(&de->lock);
1654
1655 return rc;
1656}
1657
1658static u32 de_get_msglevel(struct net_device *dev)
1659{
8f15ea42 1660 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1661
1662 return de->msg_enable;
1663}
1664
1665static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1666{
8f15ea42 1667 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1668
1669 de->msg_enable = msglvl;
1670}
1671
1672static int de_get_eeprom(struct net_device *dev,
1673 struct ethtool_eeprom *eeprom, u8 *data)
1674{
8f15ea42 1675 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1676
1677 if (!de->ee_data)
1678 return -EOPNOTSUPP;
1679 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1680 (eeprom->len != DE_EEPROM_SIZE))
1681 return -EINVAL;
1682 memcpy(data, de->ee_data, eeprom->len);
1683
1684 return 0;
1685}
1686
1687static int de_nway_reset(struct net_device *dev)
1688{
8f15ea42 1689 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1690 u32 status;
1691
1692 if (de->media_type != DE_MEDIA_TP_AUTO)
1693 return -EINVAL;
1694 if (netif_carrier_ok(de->dev))
1695 de_link_down(de);
1696
1697 status = dr32(SIAStatus);
1698 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1699 if (netif_msg_link(de))
f639dc7d
JP
1700 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1701 status, dr32(SIAStatus));
1da177e4
LT
1702 return 0;
1703}
1704
1705static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1706 void *data)
1707{
8f15ea42 1708 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1709
1710 regs->version = (DE_REGS_VER << 2) | de->de21040;
1711
1712 spin_lock_irq(&de->lock);
1713 __de_get_regs(de, data);
1714 spin_unlock_irq(&de->lock);
1715}
1716
7282d491 1717static const struct ethtool_ops de_ethtool_ops = {
1da177e4 1718 .get_link = ethtool_op_get_link,
1da177e4
LT
1719 .get_drvinfo = de_get_drvinfo,
1720 .get_regs_len = de_get_regs_len,
1721 .get_settings = de_get_settings,
1722 .set_settings = de_set_settings,
1723 .get_msglevel = de_get_msglevel,
1724 .set_msglevel = de_set_msglevel,
1725 .get_eeprom = de_get_eeprom,
1726 .nway_reset = de_nway_reset,
1727 .get_regs = de_get_regs,
1728};
1729
4c44fd00 1730static void __devinit de21040_get_mac_address (struct de_private *de)
1da177e4
LT
1731{
1732 unsigned i;
1733
1734 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
bc0da3fc 1735 udelay(5);
1da177e4
LT
1736
1737 for (i = 0; i < 6; i++) {
1738 int value, boguscnt = 100000;
ec1d1ebb 1739 do {
1da177e4 1740 value = dr32(ROMCmd);
84cc1535 1741 rmb();
ec1d1ebb 1742 } while (value < 0 && --boguscnt > 0);
1da177e4
LT
1743 de->dev->dev_addr[i] = value;
1744 udelay(1);
1745 if (boguscnt <= 0)
f639dc7d 1746 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1da177e4
LT
1747 }
1748}
1749
4c44fd00 1750static void __devinit de21040_get_media_info(struct de_private *de)
1da177e4
LT
1751{
1752 unsigned int i;
1753
1754 de->media_type = DE_MEDIA_TP;
1755 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1756 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1757 de->media_advertise = de->media_supported;
1758
1759 for (i = 0; i < DE_MAX_MEDIA; i++) {
1760 switch (i) {
1761 case DE_MEDIA_AUI:
1762 case DE_MEDIA_TP:
1763 case DE_MEDIA_TP_FD:
1764 de->media[i].type = i;
1765 de->media[i].csr13 = t21040_csr13[i];
1766 de->media[i].csr14 = t21040_csr14[i];
1767 de->media[i].csr15 = t21040_csr15[i];
1768 break;
1769 default:
1770 de->media[i].type = DE_MEDIA_INVALID;
1771 break;
1772 }
1773 }
1774}
1775
1776/* Note: this routine returns extra data bits for size detection. */
4a1d2d81 1777static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1da177e4
LT
1778{
1779 int i;
1780 unsigned retval = 0;
1781 void __iomem *ee_addr = regs + ROMCmd;
1782 int read_cmd = location | (EE_READ_CMD << addr_len);
1783
1784 writel(EE_ENB & ~EE_CS, ee_addr);
1785 writel(EE_ENB, ee_addr);
1786
1787 /* Shift the read command bits out. */
1788 for (i = 4 + addr_len; i >= 0; i--) {
1789 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1790 writel(EE_ENB | dataval, ee_addr);
1791 readl(ee_addr);
1792 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1793 readl(ee_addr);
1794 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1795 }
1796 writel(EE_ENB, ee_addr);
1797 readl(ee_addr);
1798
1799 for (i = 16; i > 0; i--) {
1800 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1801 readl(ee_addr);
1802 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1803 writel(EE_ENB, ee_addr);
1804 readl(ee_addr);
1805 }
1806
1807 /* Terminate the EEPROM access. */
1808 writel(EE_ENB & ~EE_CS, ee_addr);
1809 return retval;
1810}
1811
4c44fd00 1812static void __devinit de21041_get_srom_info (struct de_private *de)
1da177e4
LT
1813{
1814 unsigned i, sa_offset = 0, ofs;
1815 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1816 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1817 struct de_srom_info_leaf *il;
1818 void *bufp;
1819
1820 /* download entire eeprom */
1821 for (i = 0; i < DE_EEPROM_WORDS; i++)
c559a5bc
AV
1822 ((__le16 *)ee_data)[i] =
1823 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1da177e4
LT
1824
1825 /* DEC now has a specification but early board makers
1826 just put the address in the first EEPROM locations. */
1827 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1828
1829#ifndef CONFIG_MIPS_COBALT
1830
1da177e4
LT
1831 for (i = 0; i < 8; i ++)
1832 if (ee_data[i] != ee_data[16+i])
1833 sa_offset = 20;
1834
bc053d45
RB
1835#endif
1836
1da177e4
LT
1837 /* store MAC address */
1838 for (i = 0; i < 6; i ++)
1839 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1840
1841 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1842 ofs = ee_data[SROMC0InfoLeaf];
1843 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1844 goto bad_srom;
1845
1846 /* get pointer to info leaf */
1847 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1848
1849 /* paranoia checks */
1850 if (il->n_blocks == 0)
1851 goto bad_srom;
1852 if ((sizeof(ee_data) - ofs) <
1853 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1854 goto bad_srom;
1855
1856 /* get default media type */
445854f4 1857 switch (get_unaligned(&il->default_media)) {
1da177e4
LT
1858 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1859 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1860 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1861 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1862 }
f3b197ac 1863
1da177e4 1864 if (netif_msg_probe(de))
f639dc7d
JP
1865 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1866 de->board_idx, ofs, media_name[de->media_type]);
1da177e4
LT
1867
1868 /* init SIA register values to defaults */
1869 for (i = 0; i < DE_MAX_MEDIA; i++) {
1870 de->media[i].type = DE_MEDIA_INVALID;
1871 de->media[i].csr13 = 0xffff;
1872 de->media[i].csr14 = 0xffff;
1873 de->media[i].csr15 = 0xffff;
1874 }
1875
1876 /* parse media blocks to see what medias are supported,
1877 * and if any custom CSR values are provided
1878 */
1879 bufp = ((void *)il) + sizeof(*il);
1880 for (i = 0; i < il->n_blocks; i++) {
1881 struct de_srom_media_block *ib = bufp;
1882 unsigned idx;
1883
1884 /* index based on media type in media block */
1885 switch(ib->opts & MediaBlockMask) {
1886 case 0: /* 10baseT */
1887 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1888 | SUPPORTED_Autoneg;
1889 idx = DE_MEDIA_TP;
1890 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1891 break;
1892 case 1: /* BNC */
1893 de->media_supported |= SUPPORTED_BNC;
1894 idx = DE_MEDIA_BNC;
1895 break;
1896 case 2: /* AUI */
1897 de->media_supported |= SUPPORTED_AUI;
1898 idx = DE_MEDIA_AUI;
1899 break;
1900 case 4: /* 10baseT-FD */
1901 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1902 | SUPPORTED_Autoneg;
1903 idx = DE_MEDIA_TP_FD;
1904 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1905 break;
1906 default:
1907 goto bad_srom;
1908 }
1909
1910 de->media[idx].type = idx;
1911
1912 if (netif_msg_probe(de))
f639dc7d
JP
1913 pr_info("de%d: media block #%u: %s",
1914 de->board_idx, i,
1915 media_name[de->media[idx].type]);
1da177e4
LT
1916
1917 bufp += sizeof (ib->opts);
1918
1919 if (ib->opts & MediaCustomCSRs) {
445854f4
HH
1920 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1921 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1922 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1da177e4
LT
1923 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1924 sizeof(ib->csr15);
1925
1926 if (netif_msg_probe(de))
f639dc7d
JP
1927 pr_cont(" (%x,%x,%x)\n",
1928 de->media[idx].csr13,
1929 de->media[idx].csr14,
1930 de->media[idx].csr15);
f3b197ac 1931
1da177e4 1932 } else if (netif_msg_probe(de))
f639dc7d 1933 pr_cont("\n");
1da177e4
LT
1934
1935 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1936 break;
1937 }
1938
1939 de->media_advertise = de->media_supported;
1940
1941fill_defaults:
1942 /* fill in defaults, for cases where custom CSRs not used */
1943 for (i = 0; i < DE_MAX_MEDIA; i++) {
1944 if (de->media[i].csr13 == 0xffff)
1945 de->media[i].csr13 = t21041_csr13[i];
e0f9c4f3
OZ
1946 if (de->media[i].csr14 == 0xffff) {
1947 /* autonegotiation is broken at least on some chip
1948 revisions - rev. 0x21 works, 0x11 does not */
1949 if (de->pdev->revision < 0x20)
1950 de->media[i].csr14 = t21041_csr14_brk[i];
1951 else
1952 de->media[i].csr14 = t21041_csr14[i];
1953 }
1da177e4
LT
1954 if (de->media[i].csr15 == 0xffff)
1955 de->media[i].csr15 = t21041_csr15[i];
1956 }
1957
c3a9392e 1958 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1da177e4
LT
1959
1960 return;
1961
1962bad_srom:
1963 /* for error cases, it's ok to assume we support all these */
1964 for (i = 0; i < DE_MAX_MEDIA; i++)
1965 de->media[i].type = i;
1966 de->media_supported =
1967 SUPPORTED_10baseT_Half |
1968 SUPPORTED_10baseT_Full |
1969 SUPPORTED_Autoneg |
1970 SUPPORTED_TP |
1971 SUPPORTED_AUI |
1972 SUPPORTED_BNC;
1973 goto fill_defaults;
1974}
1975
90d8743d
SH
1976static const struct net_device_ops de_netdev_ops = {
1977 .ndo_open = de_open,
1978 .ndo_stop = de_close,
1979 .ndo_set_multicast_list = de_set_rx_mode,
1980 .ndo_start_xmit = de_start_xmit,
1981 .ndo_get_stats = de_get_stats,
1982 .ndo_tx_timeout = de_tx_timeout,
1983 .ndo_change_mtu = eth_change_mtu,
1984 .ndo_set_mac_address = eth_mac_addr,
1985 .ndo_validate_addr = eth_validate_addr,
1986};
1987
4a1d2d81 1988static int __devinit de_init_one (struct pci_dev *pdev,
1da177e4
LT
1989 const struct pci_device_id *ent)
1990{
1991 struct net_device *dev;
1992 struct de_private *de;
1993 int rc;
1994 void __iomem *regs;
afc7097f 1995 unsigned long pciaddr;
1da177e4
LT
1996 static int board_idx = -1;
1997
1998 board_idx++;
1999
2000#ifndef MODULE
2001 if (board_idx == 0)
2002 printk("%s", version);
2003#endif
2004
2005 /* allocate a new ethernet device structure, and fill in defaults */
2006 dev = alloc_etherdev(sizeof(struct de_private));
2007 if (!dev)
2008 return -ENOMEM;
2009
90d8743d 2010 dev->netdev_ops = &de_netdev_ops;
1da177e4 2011 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4 2012 dev->ethtool_ops = &de_ethtool_ops;
1da177e4
LT
2013 dev->watchdog_timeo = TX_TIMEOUT;
2014
8f15ea42 2015 de = netdev_priv(dev);
1da177e4
LT
2016 de->de21040 = ent->driver_data == 0 ? 1 : 0;
2017 de->pdev = pdev;
2018 de->dev = dev;
2019 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2020 de->board_idx = board_idx;
2021 spin_lock_init (&de->lock);
2022 init_timer(&de->media_timer);
2023 if (de->de21040)
2024 de->media_timer.function = de21040_media_timer;
2025 else
2026 de->media_timer.function = de21041_media_timer;
2027 de->media_timer.data = (unsigned long) de;
2028
2029 netif_carrier_off(dev);
1da177e4
LT
2030
2031 /* wake up device, assign resources */
2032 rc = pci_enable_device(pdev);
2033 if (rc)
2034 goto err_out_free;
2035
2036 /* reserve PCI resources to ensure driver atomicity */
2037 rc = pci_request_regions(pdev, DRV_NAME);
2038 if (rc)
2039 goto err_out_disable;
2040
2041 /* check for invalid IRQ value */
2042 if (pdev->irq < 2) {
2043 rc = -EIO;
f639dc7d 2044 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
1da177e4
LT
2045 pdev->irq, pci_name(pdev));
2046 goto err_out_res;
2047 }
2048
2049 dev->irq = pdev->irq;
2050
2051 /* obtain and check validity of PCI I/O address */
2052 pciaddr = pci_resource_start(pdev, 1);
2053 if (!pciaddr) {
2054 rc = -EIO;
f639dc7d 2055 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
1da177e4
LT
2056 goto err_out_res;
2057 }
2058 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2059 rc = -EIO;
f639dc7d
JP
2060 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2061 (unsigned long long)pci_resource_len(pdev, 1),
2062 pci_name(pdev));
1da177e4
LT
2063 goto err_out_res;
2064 }
2065
2066 /* remap CSR registers */
2067 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2068 if (!regs) {
2069 rc = -EIO;
f639dc7d
JP
2070 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2071 (unsigned long long)pci_resource_len(pdev, 1),
2072 pciaddr, pci_name(pdev));
1da177e4
LT
2073 goto err_out_res;
2074 }
2075 dev->base_addr = (unsigned long) regs;
2076 de->regs = regs;
2077
2078 de_adapter_wake(de);
2079
2080 /* make sure hardware is not running */
2081 rc = de_reset_mac(de);
2082 if (rc) {
f639dc7d 2083 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
1da177e4
LT
2084 goto err_out_iomap;
2085 }
2086
2087 /* get MAC address, initialize default media type and
2088 * get list of supported media
2089 */
2090 if (de->de21040) {
2091 de21040_get_mac_address(de);
2092 de21040_get_media_info(de);
2093 } else {
2094 de21041_get_srom_info(de);
2095 }
2096
2097 /* register new network interface with kernel */
2098 rc = register_netdev(dev);
2099 if (rc)
2100 goto err_out_iomap;
2101
2102 /* print info about board and interface just registered */
f639dc7d
JP
2103 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2104 de->de21040 ? "21040" : "21041",
2105 dev->base_addr,
2106 dev->dev_addr,
2107 dev->irq);
1da177e4
LT
2108
2109 pci_set_drvdata(pdev, dev);
2110
2111 /* enable busmastering */
2112 pci_set_master(pdev);
2113
2114 /* put adapter to sleep */
2115 de_adapter_sleep(de);
2116
2117 return 0;
2118
2119err_out_iomap:
b4558ea9 2120 kfree(de->ee_data);
1da177e4
LT
2121 iounmap(regs);
2122err_out_res:
2123 pci_release_regions(pdev);
2124err_out_disable:
2125 pci_disable_device(pdev);
2126err_out_free:
2127 free_netdev(dev);
2128 return rc;
2129}
2130
4a1d2d81 2131static void __devexit de_remove_one (struct pci_dev *pdev)
1da177e4
LT
2132{
2133 struct net_device *dev = pci_get_drvdata(pdev);
8f15ea42 2134 struct de_private *de = netdev_priv(dev);
1da177e4 2135
7e0b58f3 2136 BUG_ON(!dev);
1da177e4 2137 unregister_netdev(dev);
b4558ea9 2138 kfree(de->ee_data);
1da177e4
LT
2139 iounmap(de->regs);
2140 pci_release_regions(pdev);
2141 pci_disable_device(pdev);
2142 pci_set_drvdata(pdev, NULL);
2143 free_netdev(dev);
2144}
2145
2146#ifdef CONFIG_PM
2147
05adc3b7 2148static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2149{
2150 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2151 struct de_private *de = netdev_priv(dev);
1da177e4
LT
2152
2153 rtnl_lock();
2154 if (netif_running (dev)) {
2155 del_timer_sync(&de->media_timer);
2156
2157 disable_irq(dev->irq);
2158 spin_lock_irq(&de->lock);
2159
2160 de_stop_hw(de);
2161 netif_stop_queue(dev);
2162 netif_device_detach(dev);
2163 netif_carrier_off(dev);
2164
2165 spin_unlock_irq(&de->lock);
2166 enable_irq(dev->irq);
f3b197ac 2167
1da177e4
LT
2168 /* Update the error counts. */
2169 __de_get_stats(de);
2170
2171 synchronize_irq(dev->irq);
2172 de_clean_rings(de);
2173
2174 de_adapter_sleep(de);
2175 pci_disable_device(pdev);
2176 } else {
2177 netif_device_detach(dev);
2178 }
2179 rtnl_unlock();
2180 return 0;
2181}
2182
2183static int de_resume (struct pci_dev *pdev)
2184{
2185 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2186 struct de_private *de = netdev_priv(dev);
9f486ae1 2187 int retval = 0;
1da177e4
LT
2188
2189 rtnl_lock();
2190 if (netif_device_present(dev))
2191 goto out;
9f486ae1
VH
2192 if (!netif_running(dev))
2193 goto out_attach;
2194 if ((retval = pci_enable_device(pdev))) {
f639dc7d 2195 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
9f486ae1 2196 goto out;
1da177e4 2197 }
b0255a02
OZ
2198 pci_set_master(pdev);
2199 de_init_rings(de);
9f486ae1
VH
2200 de_init_hw(de);
2201out_attach:
2202 netif_device_attach(dev);
1da177e4
LT
2203out:
2204 rtnl_unlock();
2205 return 0;
2206}
2207
2208#endif /* CONFIG_PM */
2209
2210static struct pci_driver de_driver = {
2211 .name = DRV_NAME,
2212 .id_table = de_pci_tbl,
2213 .probe = de_init_one,
4a1d2d81 2214 .remove = __devexit_p(de_remove_one),
1da177e4
LT
2215#ifdef CONFIG_PM
2216 .suspend = de_suspend,
2217 .resume = de_resume,
2218#endif
2219};
2220
2221static int __init de_init (void)
2222{
2223#ifdef MODULE
2224 printk("%s", version);
2225#endif
29917620 2226 return pci_register_driver(&de_driver);
1da177e4
LT
2227}
2228
2229static void __exit de_exit (void)
2230{
2231 pci_unregister_driver (&de_driver);
2232}
2233
2234module_init(de_init);
2235module_exit(de_exit);