]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/net/tulip/de2104x.c
Merge branch 'for-linus' of git://git.infradead.org/ubi-2.6
[mirror_ubuntu-kernels.git] / drivers / net / tulip / de2104x.c
1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2 /*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30 #define DRV_NAME "de2104x"
31 #define DRV_VERSION "0.7"
32 #define DRV_RELDATE "Mar 17, 2004"
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/init.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
41 #include <linux/ethtool.h>
42 #include <linux/compiler.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/crc32.h>
45
46 #include <asm/io.h>
47 #include <asm/irq.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
50
51 /* These identify the driver base version and may not be removed. */
52 static char version[] =
53 KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
54
55 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
56 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_VERSION);
59
60 static int debug = -1;
61 module_param (debug, int, 0);
62 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
63
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
66 || defined(CONFIG_SPARC) || defined(__ia64__) \
67 || defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 module_param (rx_copybreak, int, 0);
73 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
74
75 #define PFX DRV_NAME ": "
76
77 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85 #define DE_RX_RING_SIZE 64
86 #define DE_TX_RING_SIZE 64
87 #define DE_RING_BYTES \
88 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
89 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
90 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
91 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
92 #define TX_BUFFS_AVAIL(CP) \
93 (((CP)->tx_tail <= (CP)->tx_head) ? \
94 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
95 (CP)->tx_tail - (CP)->tx_head - 1)
96
97 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
98 #define RX_OFFSET 2
99
100 #define DE_SETUP_SKB ((struct sk_buff *) 1)
101 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
102 #define DE_SETUP_FRAME_WORDS 96
103 #define DE_EEPROM_WORDS 256
104 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
105 #define DE_MAX_MEDIA 5
106
107 #define DE_MEDIA_TP_AUTO 0
108 #define DE_MEDIA_BNC 1
109 #define DE_MEDIA_AUI 2
110 #define DE_MEDIA_TP 3
111 #define DE_MEDIA_TP_FD 4
112 #define DE_MEDIA_INVALID DE_MAX_MEDIA
113 #define DE_MEDIA_FIRST 0
114 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
115 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
116
117 #define DE_TIMER_LINK (60 * HZ)
118 #define DE_TIMER_NO_LINK (5 * HZ)
119
120 #define DE_NUM_REGS 16
121 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
122 #define DE_REGS_VER 1
123
124 /* Time in jiffies before concluding the transmitter is hung. */
125 #define TX_TIMEOUT (6*HZ)
126
127 #define DE_UNALIGNED_16(a) (u16)(get_unaligned((u16 *)(a)))
128
129 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
130 to support a pre-NWay full-duplex signaling mechanism using short frames.
131 No one knows what it should be, but if left at its default value some
132 10base2(!) packets trigger a full-duplex-request interrupt. */
133 #define FULL_DUPLEX_MAGIC 0x6969
134
135 enum {
136 /* NIC registers */
137 BusMode = 0x00,
138 TxPoll = 0x08,
139 RxPoll = 0x10,
140 RxRingAddr = 0x18,
141 TxRingAddr = 0x20,
142 MacStatus = 0x28,
143 MacMode = 0x30,
144 IntrMask = 0x38,
145 RxMissed = 0x40,
146 ROMCmd = 0x48,
147 CSR11 = 0x58,
148 SIAStatus = 0x60,
149 CSR13 = 0x68,
150 CSR14 = 0x70,
151 CSR15 = 0x78,
152 PCIPM = 0x40,
153
154 /* BusMode bits */
155 CmdReset = (1 << 0),
156 CacheAlign16 = 0x00008000,
157 BurstLen4 = 0x00000400,
158
159 /* Rx/TxPoll bits */
160 NormalTxPoll = (1 << 0),
161 NormalRxPoll = (1 << 0),
162
163 /* Tx/Rx descriptor status bits */
164 DescOwn = (1 << 31),
165 RxError = (1 << 15),
166 RxErrLong = (1 << 7),
167 RxErrCRC = (1 << 1),
168 RxErrFIFO = (1 << 0),
169 RxErrRunt = (1 << 11),
170 RxErrFrame = (1 << 14),
171 RingEnd = (1 << 25),
172 FirstFrag = (1 << 29),
173 LastFrag = (1 << 30),
174 TxError = (1 << 15),
175 TxFIFOUnder = (1 << 1),
176 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
177 TxMaxCol = (1 << 8),
178 TxOWC = (1 << 9),
179 TxJabber = (1 << 14),
180 SetupFrame = (1 << 27),
181 TxSwInt = (1 << 31),
182
183 /* MacStatus bits */
184 IntrOK = (1 << 16),
185 IntrErr = (1 << 15),
186 RxIntr = (1 << 6),
187 RxEmpty = (1 << 7),
188 TxIntr = (1 << 0),
189 TxEmpty = (1 << 2),
190 PciErr = (1 << 13),
191 TxState = (1 << 22) | (1 << 21) | (1 << 20),
192 RxState = (1 << 19) | (1 << 18) | (1 << 17),
193 LinkFail = (1 << 12),
194 LinkPass = (1 << 4),
195 RxStopped = (1 << 8),
196 TxStopped = (1 << 1),
197
198 /* MacMode bits */
199 TxEnable = (1 << 13),
200 RxEnable = (1 << 1),
201 RxTx = TxEnable | RxEnable,
202 FullDuplex = (1 << 9),
203 AcceptAllMulticast = (1 << 7),
204 AcceptAllPhys = (1 << 6),
205 BOCnt = (1 << 5),
206 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
207 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
208
209 /* ROMCmd bits */
210 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
211 EE_CS = 0x01, /* EEPROM chip select. */
212 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
213 EE_WRITE_0 = 0x01,
214 EE_WRITE_1 = 0x05,
215 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
216 EE_ENB = (0x4800 | EE_CS),
217
218 /* The EEPROM commands include the alway-set leading bit. */
219 EE_READ_CMD = 6,
220
221 /* RxMissed bits */
222 RxMissedOver = (1 << 16),
223 RxMissedMask = 0xffff,
224
225 /* SROM-related bits */
226 SROMC0InfoLeaf = 27,
227 MediaBlockMask = 0x3f,
228 MediaCustomCSRs = (1 << 6),
229
230 /* PCIPM bits */
231 PM_Sleep = (1 << 31),
232 PM_Snooze = (1 << 30),
233 PM_Mask = PM_Sleep | PM_Snooze,
234
235 /* SIAStatus bits */
236 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
237 NWayRestart = (1 << 12),
238 NonselPortActive = (1 << 9),
239 LinkFailStatus = (1 << 2),
240 NetCxnErr = (1 << 1),
241 };
242
243 static const u32 de_intr_mask =
244 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
245 LinkPass | LinkFail | PciErr;
246
247 /*
248 * Set the programmable burst length to 4 longwords for all:
249 * DMA errors result without these values. Cache align 16 long.
250 */
251 static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
252
253 struct de_srom_media_block {
254 u8 opts;
255 u16 csr13;
256 u16 csr14;
257 u16 csr15;
258 } __attribute__((packed));
259
260 struct de_srom_info_leaf {
261 u16 default_media;
262 u8 n_blocks;
263 u8 unused;
264 } __attribute__((packed));
265
266 struct de_desc {
267 u32 opts1;
268 u32 opts2;
269 u32 addr1;
270 u32 addr2;
271 };
272
273 struct media_info {
274 u16 type; /* DE_MEDIA_xxx */
275 u16 csr13;
276 u16 csr14;
277 u16 csr15;
278 };
279
280 struct ring_info {
281 struct sk_buff *skb;
282 dma_addr_t mapping;
283 };
284
285 struct de_private {
286 unsigned tx_head;
287 unsigned tx_tail;
288 unsigned rx_tail;
289
290 void __iomem *regs;
291 struct net_device *dev;
292 spinlock_t lock;
293
294 struct de_desc *rx_ring;
295 struct de_desc *tx_ring;
296 struct ring_info tx_skb[DE_TX_RING_SIZE];
297 struct ring_info rx_skb[DE_RX_RING_SIZE];
298 unsigned rx_buf_sz;
299 dma_addr_t ring_dma;
300
301 u32 msg_enable;
302
303 struct net_device_stats net_stats;
304
305 struct pci_dev *pdev;
306
307 u16 setup_frame[DE_SETUP_FRAME_WORDS];
308
309 u32 media_type;
310 u32 media_supported;
311 u32 media_advertise;
312 struct media_info media[DE_MAX_MEDIA];
313 struct timer_list media_timer;
314
315 u8 *ee_data;
316 unsigned board_idx;
317 unsigned de21040 : 1;
318 unsigned media_lock : 1;
319 };
320
321
322 static void de_set_rx_mode (struct net_device *dev);
323 static void de_tx (struct de_private *de);
324 static void de_clean_rings (struct de_private *de);
325 static void de_media_interrupt (struct de_private *de, u32 status);
326 static void de21040_media_timer (unsigned long data);
327 static void de21041_media_timer (unsigned long data);
328 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
329
330
331 static struct pci_device_id de_pci_tbl[] = {
332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
336 { },
337 };
338 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
339
340 static const char * const media_name[DE_MAX_MEDIA] = {
341 "10baseT auto",
342 "BNC",
343 "AUI",
344 "10baseT-HD",
345 "10baseT-FD"
346 };
347
348 /* 21040 transceiver register settings:
349 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
350 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
351 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
352 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
353
354 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
355 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
356 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
357 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
358
359
360 #define dr32(reg) readl(de->regs + (reg))
361 #define dw32(reg,val) writel((val), de->regs + (reg))
362
363
364 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
365 u32 status, u32 len)
366 {
367 if (netif_msg_rx_err (de))
368 printk (KERN_DEBUG
369 "%s: rx err, slot %d status 0x%x len %d\n",
370 de->dev->name, rx_tail, status, len);
371
372 if ((status & 0x38000300) != 0x0300) {
373 /* Ingore earlier buffers. */
374 if ((status & 0xffff) != 0x7fff) {
375 if (netif_msg_rx_err(de))
376 printk(KERN_WARNING "%s: Oversized Ethernet frame "
377 "spanned multiple buffers, status %8.8x!\n",
378 de->dev->name, status);
379 de->net_stats.rx_length_errors++;
380 }
381 } else if (status & RxError) {
382 /* There was a fatal error. */
383 de->net_stats.rx_errors++; /* end of a packet.*/
384 if (status & 0x0890) de->net_stats.rx_length_errors++;
385 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
386 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
387 }
388 }
389
390 static void de_rx (struct de_private *de)
391 {
392 unsigned rx_tail = de->rx_tail;
393 unsigned rx_work = DE_RX_RING_SIZE;
394 unsigned drop = 0;
395 int rc;
396
397 while (rx_work--) {
398 u32 status, len;
399 dma_addr_t mapping;
400 struct sk_buff *skb, *copy_skb;
401 unsigned copying_skb, buflen;
402
403 skb = de->rx_skb[rx_tail].skb;
404 BUG_ON(!skb);
405 rmb();
406 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
407 if (status & DescOwn)
408 break;
409
410 len = ((status >> 16) & 0x7ff) - 4;
411 mapping = de->rx_skb[rx_tail].mapping;
412
413 if (unlikely(drop)) {
414 de->net_stats.rx_dropped++;
415 goto rx_next;
416 }
417
418 if (unlikely((status & 0x38008300) != 0x0300)) {
419 de_rx_err_acct(de, rx_tail, status, len);
420 goto rx_next;
421 }
422
423 copying_skb = (len <= rx_copybreak);
424
425 if (unlikely(netif_msg_rx_status(de)))
426 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
427 de->dev->name, rx_tail, status, len,
428 copying_skb);
429
430 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
431 copy_skb = dev_alloc_skb (buflen);
432 if (unlikely(!copy_skb)) {
433 de->net_stats.rx_dropped++;
434 drop = 1;
435 rx_work = 100;
436 goto rx_next;
437 }
438
439 if (!copying_skb) {
440 pci_unmap_single(de->pdev, mapping,
441 buflen, PCI_DMA_FROMDEVICE);
442 skb_put(skb, len);
443
444 mapping =
445 de->rx_skb[rx_tail].mapping =
446 pci_map_single(de->pdev, copy_skb->data,
447 buflen, PCI_DMA_FROMDEVICE);
448 de->rx_skb[rx_tail].skb = copy_skb;
449 } else {
450 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
451 skb_reserve(copy_skb, RX_OFFSET);
452 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
453 len);
454 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
455
456 /* We'll reuse the original ring buffer. */
457 skb = copy_skb;
458 }
459
460 skb->protocol = eth_type_trans (skb, de->dev);
461
462 de->net_stats.rx_packets++;
463 de->net_stats.rx_bytes += skb->len;
464 de->dev->last_rx = jiffies;
465 rc = netif_rx (skb);
466 if (rc == NET_RX_DROP)
467 drop = 1;
468
469 rx_next:
470 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
471 if (rx_tail == (DE_RX_RING_SIZE - 1))
472 de->rx_ring[rx_tail].opts2 =
473 cpu_to_le32(RingEnd | de->rx_buf_sz);
474 else
475 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
476 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
477 rx_tail = NEXT_RX(rx_tail);
478 }
479
480 if (!rx_work)
481 printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
482
483 de->rx_tail = rx_tail;
484 }
485
486 static irqreturn_t de_interrupt (int irq, void *dev_instance)
487 {
488 struct net_device *dev = dev_instance;
489 struct de_private *de = dev->priv;
490 u32 status;
491
492 status = dr32(MacStatus);
493 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
494 return IRQ_NONE;
495
496 if (netif_msg_intr(de))
497 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
498 dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
499
500 dw32(MacStatus, status);
501
502 if (status & (RxIntr | RxEmpty)) {
503 de_rx(de);
504 if (status & RxEmpty)
505 dw32(RxPoll, NormalRxPoll);
506 }
507
508 spin_lock(&de->lock);
509
510 if (status & (TxIntr | TxEmpty))
511 de_tx(de);
512
513 if (status & (LinkPass | LinkFail))
514 de_media_interrupt(de, status);
515
516 spin_unlock(&de->lock);
517
518 if (status & PciErr) {
519 u16 pci_status;
520
521 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
522 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
523 printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
524 dev->name, status, pci_status);
525 }
526
527 return IRQ_HANDLED;
528 }
529
530 static void de_tx (struct de_private *de)
531 {
532 unsigned tx_head = de->tx_head;
533 unsigned tx_tail = de->tx_tail;
534
535 while (tx_tail != tx_head) {
536 struct sk_buff *skb;
537 u32 status;
538
539 rmb();
540 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
541 if (status & DescOwn)
542 break;
543
544 skb = de->tx_skb[tx_tail].skb;
545 BUG_ON(!skb);
546 if (unlikely(skb == DE_DUMMY_SKB))
547 goto next;
548
549 if (unlikely(skb == DE_SETUP_SKB)) {
550 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
551 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
552 goto next;
553 }
554
555 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
556 skb->len, PCI_DMA_TODEVICE);
557
558 if (status & LastFrag) {
559 if (status & TxError) {
560 if (netif_msg_tx_err(de))
561 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
562 de->dev->name, status);
563 de->net_stats.tx_errors++;
564 if (status & TxOWC)
565 de->net_stats.tx_window_errors++;
566 if (status & TxMaxCol)
567 de->net_stats.tx_aborted_errors++;
568 if (status & TxLinkFail)
569 de->net_stats.tx_carrier_errors++;
570 if (status & TxFIFOUnder)
571 de->net_stats.tx_fifo_errors++;
572 } else {
573 de->net_stats.tx_packets++;
574 de->net_stats.tx_bytes += skb->len;
575 if (netif_msg_tx_done(de))
576 printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
577 }
578 dev_kfree_skb_irq(skb);
579 }
580
581 next:
582 de->tx_skb[tx_tail].skb = NULL;
583
584 tx_tail = NEXT_TX(tx_tail);
585 }
586
587 de->tx_tail = tx_tail;
588
589 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
590 netif_wake_queue(de->dev);
591 }
592
593 static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
594 {
595 struct de_private *de = dev->priv;
596 unsigned int entry, tx_free;
597 u32 mapping, len, flags = FirstFrag | LastFrag;
598 struct de_desc *txd;
599
600 spin_lock_irq(&de->lock);
601
602 tx_free = TX_BUFFS_AVAIL(de);
603 if (tx_free == 0) {
604 netif_stop_queue(dev);
605 spin_unlock_irq(&de->lock);
606 return 1;
607 }
608 tx_free--;
609
610 entry = de->tx_head;
611
612 txd = &de->tx_ring[entry];
613
614 len = skb->len;
615 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
616 if (entry == (DE_TX_RING_SIZE - 1))
617 flags |= RingEnd;
618 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
619 flags |= TxSwInt;
620 flags |= len;
621 txd->opts2 = cpu_to_le32(flags);
622 txd->addr1 = cpu_to_le32(mapping);
623
624 de->tx_skb[entry].skb = skb;
625 de->tx_skb[entry].mapping = mapping;
626 wmb();
627
628 txd->opts1 = cpu_to_le32(DescOwn);
629 wmb();
630
631 de->tx_head = NEXT_TX(entry);
632 if (netif_msg_tx_queued(de))
633 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
634 dev->name, entry, skb->len);
635
636 if (tx_free == 0)
637 netif_stop_queue(dev);
638
639 spin_unlock_irq(&de->lock);
640
641 /* Trigger an immediate transmit demand. */
642 dw32(TxPoll, NormalTxPoll);
643 dev->trans_start = jiffies;
644
645 return 0;
646 }
647
648 /* Set or clear the multicast filter for this adaptor.
649 Note that we only use exclusion around actually queueing the
650 new frame, not around filling de->setup_frame. This is non-deterministic
651 when re-entered but still correct. */
652
653 #undef set_bit_le
654 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
655
656 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
657 {
658 struct de_private *de = dev->priv;
659 u16 hash_table[32];
660 struct dev_mc_list *mclist;
661 int i;
662 u16 *eaddrs;
663
664 memset(hash_table, 0, sizeof(hash_table));
665 set_bit_le(255, hash_table); /* Broadcast entry */
666 /* This should work on big-endian machines as well. */
667 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
668 i++, mclist = mclist->next) {
669 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
670
671 set_bit_le(index, hash_table);
672
673 for (i = 0; i < 32; i++) {
674 *setup_frm++ = hash_table[i];
675 *setup_frm++ = hash_table[i];
676 }
677 setup_frm = &de->setup_frame[13*6];
678 }
679
680 /* Fill the final entry with our physical address. */
681 eaddrs = (u16 *)dev->dev_addr;
682 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
683 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
684 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
685 }
686
687 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
688 {
689 struct de_private *de = dev->priv;
690 struct dev_mc_list *mclist;
691 int i;
692 u16 *eaddrs;
693
694 /* We have <= 14 addresses so we can use the wonderful
695 16 address perfect filtering of the Tulip. */
696 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
697 i++, mclist = mclist->next) {
698 eaddrs = (u16 *)mclist->dmi_addr;
699 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
700 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
701 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
702 }
703 /* Fill the unused entries with the broadcast address. */
704 memset(setup_frm, 0xff, (15-i)*12);
705 setup_frm = &de->setup_frame[15*6];
706
707 /* Fill the final entry with our physical address. */
708 eaddrs = (u16 *)dev->dev_addr;
709 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
710 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
711 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
712 }
713
714
715 static void __de_set_rx_mode (struct net_device *dev)
716 {
717 struct de_private *de = dev->priv;
718 u32 macmode;
719 unsigned int entry;
720 u32 mapping;
721 struct de_desc *txd;
722 struct de_desc *dummy_txd = NULL;
723
724 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
725
726 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
727 macmode |= AcceptAllMulticast | AcceptAllPhys;
728 goto out;
729 }
730
731 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
732 /* Too many to filter well -- accept all multicasts. */
733 macmode |= AcceptAllMulticast;
734 goto out;
735 }
736
737 /* Note that only the low-address shortword of setup_frame is valid!
738 The values are doubled for big-endian architectures. */
739 if (dev->mc_count > 14) /* Must use a multicast hash table. */
740 build_setup_frame_hash (de->setup_frame, dev);
741 else
742 build_setup_frame_perfect (de->setup_frame, dev);
743
744 /*
745 * Now add this frame to the Tx list.
746 */
747
748 entry = de->tx_head;
749
750 /* Avoid a chip errata by prefixing a dummy entry. */
751 if (entry != 0) {
752 de->tx_skb[entry].skb = DE_DUMMY_SKB;
753
754 dummy_txd = &de->tx_ring[entry];
755 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
756 cpu_to_le32(RingEnd) : 0;
757 dummy_txd->addr1 = 0;
758
759 /* Must set DescOwned later to avoid race with chip */
760
761 entry = NEXT_TX(entry);
762 }
763
764 de->tx_skb[entry].skb = DE_SETUP_SKB;
765 de->tx_skb[entry].mapping = mapping =
766 pci_map_single (de->pdev, de->setup_frame,
767 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
768
769 /* Put the setup frame on the Tx list. */
770 txd = &de->tx_ring[entry];
771 if (entry == (DE_TX_RING_SIZE - 1))
772 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
773 else
774 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
775 txd->addr1 = cpu_to_le32(mapping);
776 wmb();
777
778 txd->opts1 = cpu_to_le32(DescOwn);
779 wmb();
780
781 if (dummy_txd) {
782 dummy_txd->opts1 = cpu_to_le32(DescOwn);
783 wmb();
784 }
785
786 de->tx_head = NEXT_TX(entry);
787
788 BUG_ON(TX_BUFFS_AVAIL(de) < 0);
789 if (TX_BUFFS_AVAIL(de) == 0)
790 netif_stop_queue(dev);
791
792 /* Trigger an immediate transmit demand. */
793 dw32(TxPoll, NormalTxPoll);
794
795 out:
796 if (macmode != dr32(MacMode))
797 dw32(MacMode, macmode);
798 }
799
800 static void de_set_rx_mode (struct net_device *dev)
801 {
802 unsigned long flags;
803 struct de_private *de = dev->priv;
804
805 spin_lock_irqsave (&de->lock, flags);
806 __de_set_rx_mode(dev);
807 spin_unlock_irqrestore (&de->lock, flags);
808 }
809
810 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
811 {
812 if (unlikely(rx_missed & RxMissedOver))
813 de->net_stats.rx_missed_errors += RxMissedMask;
814 else
815 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
816 }
817
818 static void __de_get_stats(struct de_private *de)
819 {
820 u32 tmp = dr32(RxMissed); /* self-clearing */
821
822 de_rx_missed(de, tmp);
823 }
824
825 static struct net_device_stats *de_get_stats(struct net_device *dev)
826 {
827 struct de_private *de = dev->priv;
828
829 /* The chip only need report frame silently dropped. */
830 spin_lock_irq(&de->lock);
831 if (netif_running(dev) && netif_device_present(dev))
832 __de_get_stats(de);
833 spin_unlock_irq(&de->lock);
834
835 return &de->net_stats;
836 }
837
838 static inline int de_is_running (struct de_private *de)
839 {
840 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
841 }
842
843 static void de_stop_rxtx (struct de_private *de)
844 {
845 u32 macmode;
846 unsigned int work = 1000;
847
848 macmode = dr32(MacMode);
849 if (macmode & RxTx) {
850 dw32(MacMode, macmode & ~RxTx);
851 dr32(MacMode);
852 }
853
854 while (--work > 0) {
855 if (!de_is_running(de))
856 return;
857 cpu_relax();
858 }
859
860 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
861 }
862
863 static inline void de_start_rxtx (struct de_private *de)
864 {
865 u32 macmode;
866
867 macmode = dr32(MacMode);
868 if ((macmode & RxTx) != RxTx) {
869 dw32(MacMode, macmode | RxTx);
870 dr32(MacMode);
871 }
872 }
873
874 static void de_stop_hw (struct de_private *de)
875 {
876
877 udelay(5);
878 dw32(IntrMask, 0);
879
880 de_stop_rxtx(de);
881
882 dw32(MacStatus, dr32(MacStatus));
883
884 udelay(10);
885
886 de->rx_tail = 0;
887 de->tx_head = de->tx_tail = 0;
888 }
889
890 static void de_link_up(struct de_private *de)
891 {
892 if (!netif_carrier_ok(de->dev)) {
893 netif_carrier_on(de->dev);
894 if (netif_msg_link(de))
895 printk(KERN_INFO "%s: link up, media %s\n",
896 de->dev->name, media_name[de->media_type]);
897 }
898 }
899
900 static void de_link_down(struct de_private *de)
901 {
902 if (netif_carrier_ok(de->dev)) {
903 netif_carrier_off(de->dev);
904 if (netif_msg_link(de))
905 printk(KERN_INFO "%s: link down\n", de->dev->name);
906 }
907 }
908
909 static void de_set_media (struct de_private *de)
910 {
911 unsigned media = de->media_type;
912 u32 macmode = dr32(MacMode);
913
914 BUG_ON(de_is_running(de));
915
916 if (de->de21040)
917 dw32(CSR11, FULL_DUPLEX_MAGIC);
918 dw32(CSR13, 0); /* Reset phy */
919 dw32(CSR14, de->media[media].csr14);
920 dw32(CSR15, de->media[media].csr15);
921 dw32(CSR13, de->media[media].csr13);
922
923 /* must delay 10ms before writing to other registers,
924 * especially CSR6
925 */
926 mdelay(10);
927
928 if (media == DE_MEDIA_TP_FD)
929 macmode |= FullDuplex;
930 else
931 macmode &= ~FullDuplex;
932
933 if (netif_msg_link(de)) {
934 printk(KERN_INFO "%s: set link %s\n"
935 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
936 KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
937 de->dev->name, media_name[media],
938 de->dev->name, dr32(MacMode), dr32(SIAStatus),
939 dr32(CSR13), dr32(CSR14), dr32(CSR15),
940 de->dev->name, macmode, de->media[media].csr13,
941 de->media[media].csr14, de->media[media].csr15);
942 }
943 if (macmode != dr32(MacMode))
944 dw32(MacMode, macmode);
945 }
946
947 static void de_next_media (struct de_private *de, u32 *media,
948 unsigned int n_media)
949 {
950 unsigned int i;
951
952 for (i = 0; i < n_media; i++) {
953 if (de_ok_to_advertise(de, media[i])) {
954 de->media_type = media[i];
955 return;
956 }
957 }
958 }
959
960 static void de21040_media_timer (unsigned long data)
961 {
962 struct de_private *de = (struct de_private *) data;
963 struct net_device *dev = de->dev;
964 u32 status = dr32(SIAStatus);
965 unsigned int carrier;
966 unsigned long flags;
967
968 carrier = (status & NetCxnErr) ? 0 : 1;
969
970 if (carrier) {
971 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
972 goto no_link_yet;
973
974 de->media_timer.expires = jiffies + DE_TIMER_LINK;
975 add_timer(&de->media_timer);
976 if (!netif_carrier_ok(dev))
977 de_link_up(de);
978 else
979 if (netif_msg_timer(de))
980 printk(KERN_INFO "%s: %s link ok, status %x\n",
981 dev->name, media_name[de->media_type],
982 status);
983 return;
984 }
985
986 de_link_down(de);
987
988 if (de->media_lock)
989 return;
990
991 if (de->media_type == DE_MEDIA_AUI) {
992 u32 next_state = DE_MEDIA_TP;
993 de_next_media(de, &next_state, 1);
994 } else {
995 u32 next_state = DE_MEDIA_AUI;
996 de_next_media(de, &next_state, 1);
997 }
998
999 spin_lock_irqsave(&de->lock, flags);
1000 de_stop_rxtx(de);
1001 spin_unlock_irqrestore(&de->lock, flags);
1002 de_set_media(de);
1003 de_start_rxtx(de);
1004
1005 no_link_yet:
1006 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1007 add_timer(&de->media_timer);
1008
1009 if (netif_msg_timer(de))
1010 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1011 dev->name, media_name[de->media_type], status);
1012 }
1013
1014 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1015 {
1016 switch (new_media) {
1017 case DE_MEDIA_TP_AUTO:
1018 if (!(de->media_advertise & ADVERTISED_Autoneg))
1019 return 0;
1020 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1021 return 0;
1022 break;
1023 case DE_MEDIA_BNC:
1024 if (!(de->media_advertise & ADVERTISED_BNC))
1025 return 0;
1026 break;
1027 case DE_MEDIA_AUI:
1028 if (!(de->media_advertise & ADVERTISED_AUI))
1029 return 0;
1030 break;
1031 case DE_MEDIA_TP:
1032 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1033 return 0;
1034 break;
1035 case DE_MEDIA_TP_FD:
1036 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1037 return 0;
1038 break;
1039 }
1040
1041 return 1;
1042 }
1043
1044 static void de21041_media_timer (unsigned long data)
1045 {
1046 struct de_private *de = (struct de_private *) data;
1047 struct net_device *dev = de->dev;
1048 u32 status = dr32(SIAStatus);
1049 unsigned int carrier;
1050 unsigned long flags;
1051
1052 carrier = (status & NetCxnErr) ? 0 : 1;
1053
1054 if (carrier) {
1055 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1056 de->media_type == DE_MEDIA_TP ||
1057 de->media_type == DE_MEDIA_TP_FD) &&
1058 (status & LinkFailStatus))
1059 goto no_link_yet;
1060
1061 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1062 add_timer(&de->media_timer);
1063 if (!netif_carrier_ok(dev))
1064 de_link_up(de);
1065 else
1066 if (netif_msg_timer(de))
1067 printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
1068 dev->name, media_name[de->media_type],
1069 dr32(MacMode), status);
1070 return;
1071 }
1072
1073 de_link_down(de);
1074
1075 /* if media type locked, don't switch media */
1076 if (de->media_lock)
1077 goto set_media;
1078
1079 /* if activity detected, use that as hint for new media type */
1080 if (status & NonselPortActive) {
1081 unsigned int have_media = 1;
1082
1083 /* if AUI/BNC selected, then activity is on TP port */
1084 if (de->media_type == DE_MEDIA_AUI ||
1085 de->media_type == DE_MEDIA_BNC) {
1086 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1087 de->media_type = DE_MEDIA_TP_AUTO;
1088 else
1089 have_media = 0;
1090 }
1091
1092 /* TP selected. If there is only TP and BNC, then it's BNC */
1093 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1094 de_ok_to_advertise(de, DE_MEDIA_BNC))
1095 de->media_type = DE_MEDIA_BNC;
1096
1097 /* TP selected. If there is only TP and AUI, then it's AUI */
1098 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1099 de_ok_to_advertise(de, DE_MEDIA_AUI))
1100 de->media_type = DE_MEDIA_AUI;
1101
1102 /* otherwise, ignore the hint */
1103 else
1104 have_media = 0;
1105
1106 if (have_media)
1107 goto set_media;
1108 }
1109
1110 /*
1111 * Absent or ambiguous activity hint, move to next advertised
1112 * media state. If de->media_type is left unchanged, this
1113 * simply resets the PHY and reloads the current media settings.
1114 */
1115 if (de->media_type == DE_MEDIA_AUI) {
1116 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1117 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1118 } else if (de->media_type == DE_MEDIA_BNC) {
1119 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1120 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1121 } else {
1122 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1123 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1124 }
1125
1126 set_media:
1127 spin_lock_irqsave(&de->lock, flags);
1128 de_stop_rxtx(de);
1129 spin_unlock_irqrestore(&de->lock, flags);
1130 de_set_media(de);
1131 de_start_rxtx(de);
1132
1133 no_link_yet:
1134 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1135 add_timer(&de->media_timer);
1136
1137 if (netif_msg_timer(de))
1138 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1139 dev->name, media_name[de->media_type], status);
1140 }
1141
1142 static void de_media_interrupt (struct de_private *de, u32 status)
1143 {
1144 if (status & LinkPass) {
1145 de_link_up(de);
1146 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1147 return;
1148 }
1149
1150 BUG_ON(!(status & LinkFail));
1151
1152 if (netif_carrier_ok(de->dev)) {
1153 de_link_down(de);
1154 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1155 }
1156 }
1157
1158 static int de_reset_mac (struct de_private *de)
1159 {
1160 u32 status, tmp;
1161
1162 /*
1163 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1164 * in this area.
1165 */
1166
1167 if (dr32(BusMode) == 0xffffffff)
1168 return -EBUSY;
1169
1170 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1171 dw32 (BusMode, CmdReset);
1172 mdelay (1);
1173
1174 dw32 (BusMode, de_bus_mode);
1175 mdelay (1);
1176
1177 for (tmp = 0; tmp < 5; tmp++) {
1178 dr32 (BusMode);
1179 mdelay (1);
1180 }
1181
1182 mdelay (1);
1183
1184 status = dr32(MacStatus);
1185 if (status & (RxState | TxState))
1186 return -EBUSY;
1187 if (status == 0xffffffff)
1188 return -ENODEV;
1189 return 0;
1190 }
1191
1192 static void de_adapter_wake (struct de_private *de)
1193 {
1194 u32 pmctl;
1195
1196 if (de->de21040)
1197 return;
1198
1199 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1200 if (pmctl & PM_Mask) {
1201 pmctl &= ~PM_Mask;
1202 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1203
1204 /* de4x5.c delays, so we do too */
1205 msleep(10);
1206 }
1207 }
1208
1209 static void de_adapter_sleep (struct de_private *de)
1210 {
1211 u32 pmctl;
1212
1213 if (de->de21040)
1214 return;
1215
1216 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1217 pmctl |= PM_Sleep;
1218 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1219 }
1220
1221 static int de_init_hw (struct de_private *de)
1222 {
1223 struct net_device *dev = de->dev;
1224 u32 macmode;
1225 int rc;
1226
1227 de_adapter_wake(de);
1228
1229 macmode = dr32(MacMode) & ~MacModeClear;
1230
1231 rc = de_reset_mac(de);
1232 if (rc)
1233 return rc;
1234
1235 de_set_media(de); /* reset phy */
1236
1237 dw32(RxRingAddr, de->ring_dma);
1238 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1239
1240 dw32(MacMode, RxTx | macmode);
1241
1242 dr32(RxMissed); /* self-clearing */
1243
1244 dw32(IntrMask, de_intr_mask);
1245
1246 de_set_rx_mode(dev);
1247
1248 return 0;
1249 }
1250
1251 static int de_refill_rx (struct de_private *de)
1252 {
1253 unsigned i;
1254
1255 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1256 struct sk_buff *skb;
1257
1258 skb = dev_alloc_skb(de->rx_buf_sz);
1259 if (!skb)
1260 goto err_out;
1261
1262 skb->dev = de->dev;
1263
1264 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1265 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1266 de->rx_skb[i].skb = skb;
1267
1268 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1269 if (i == (DE_RX_RING_SIZE - 1))
1270 de->rx_ring[i].opts2 =
1271 cpu_to_le32(RingEnd | de->rx_buf_sz);
1272 else
1273 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1274 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1275 de->rx_ring[i].addr2 = 0;
1276 }
1277
1278 return 0;
1279
1280 err_out:
1281 de_clean_rings(de);
1282 return -ENOMEM;
1283 }
1284
1285 static int de_init_rings (struct de_private *de)
1286 {
1287 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1288 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1289
1290 de->rx_tail = 0;
1291 de->tx_head = de->tx_tail = 0;
1292
1293 return de_refill_rx (de);
1294 }
1295
1296 static int de_alloc_rings (struct de_private *de)
1297 {
1298 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1299 if (!de->rx_ring)
1300 return -ENOMEM;
1301 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1302 return de_init_rings(de);
1303 }
1304
1305 static void de_clean_rings (struct de_private *de)
1306 {
1307 unsigned i;
1308
1309 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1310 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1311 wmb();
1312 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1313 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1314 wmb();
1315
1316 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1317 if (de->rx_skb[i].skb) {
1318 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1319 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1320 dev_kfree_skb(de->rx_skb[i].skb);
1321 }
1322 }
1323
1324 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1325 struct sk_buff *skb = de->tx_skb[i].skb;
1326 if ((skb) && (skb != DE_DUMMY_SKB)) {
1327 if (skb != DE_SETUP_SKB) {
1328 de->net_stats.tx_dropped++;
1329 pci_unmap_single(de->pdev,
1330 de->tx_skb[i].mapping,
1331 skb->len, PCI_DMA_TODEVICE);
1332 dev_kfree_skb(skb);
1333 } else {
1334 pci_unmap_single(de->pdev,
1335 de->tx_skb[i].mapping,
1336 sizeof(de->setup_frame),
1337 PCI_DMA_TODEVICE);
1338 }
1339 }
1340 }
1341
1342 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1343 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1344 }
1345
1346 static void de_free_rings (struct de_private *de)
1347 {
1348 de_clean_rings(de);
1349 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1350 de->rx_ring = NULL;
1351 de->tx_ring = NULL;
1352 }
1353
1354 static int de_open (struct net_device *dev)
1355 {
1356 struct de_private *de = dev->priv;
1357 int rc;
1358
1359 if (netif_msg_ifup(de))
1360 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1361
1362 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1363
1364 rc = de_alloc_rings(de);
1365 if (rc) {
1366 printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
1367 dev->name, rc);
1368 return rc;
1369 }
1370
1371 dw32(IntrMask, 0);
1372
1373 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1374 if (rc) {
1375 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1376 dev->name, dev->irq, rc);
1377 goto err_out_free;
1378 }
1379
1380 rc = de_init_hw(de);
1381 if (rc) {
1382 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1383 dev->name, rc);
1384 goto err_out_free_irq;
1385 }
1386
1387 netif_start_queue(dev);
1388 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1389
1390 return 0;
1391
1392 err_out_free_irq:
1393 free_irq(dev->irq, dev);
1394 err_out_free:
1395 de_free_rings(de);
1396 return rc;
1397 }
1398
1399 static int de_close (struct net_device *dev)
1400 {
1401 struct de_private *de = dev->priv;
1402 unsigned long flags;
1403
1404 if (netif_msg_ifdown(de))
1405 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1406
1407 del_timer_sync(&de->media_timer);
1408
1409 spin_lock_irqsave(&de->lock, flags);
1410 de_stop_hw(de);
1411 netif_stop_queue(dev);
1412 netif_carrier_off(dev);
1413 spin_unlock_irqrestore(&de->lock, flags);
1414
1415 free_irq(dev->irq, dev);
1416
1417 de_free_rings(de);
1418 de_adapter_sleep(de);
1419 pci_disable_device(de->pdev);
1420 return 0;
1421 }
1422
1423 static void de_tx_timeout (struct net_device *dev)
1424 {
1425 struct de_private *de = dev->priv;
1426
1427 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1428 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1429 de->rx_tail, de->tx_head, de->tx_tail);
1430
1431 del_timer_sync(&de->media_timer);
1432
1433 disable_irq(dev->irq);
1434 spin_lock_irq(&de->lock);
1435
1436 de_stop_hw(de);
1437 netif_stop_queue(dev);
1438 netif_carrier_off(dev);
1439
1440 spin_unlock_irq(&de->lock);
1441 enable_irq(dev->irq);
1442
1443 /* Update the error counts. */
1444 __de_get_stats(de);
1445
1446 synchronize_irq(dev->irq);
1447 de_clean_rings(de);
1448
1449 de_init_rings(de);
1450
1451 de_init_hw(de);
1452
1453 netif_wake_queue(dev);
1454 }
1455
1456 static void __de_get_regs(struct de_private *de, u8 *buf)
1457 {
1458 int i;
1459 u32 *rbuf = (u32 *)buf;
1460
1461 /* read all CSRs */
1462 for (i = 0; i < DE_NUM_REGS; i++)
1463 rbuf[i] = dr32(i * 8);
1464
1465 /* handle self-clearing RxMissed counter, CSR8 */
1466 de_rx_missed(de, rbuf[8]);
1467 }
1468
1469 static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1470 {
1471 ecmd->supported = de->media_supported;
1472 ecmd->transceiver = XCVR_INTERNAL;
1473 ecmd->phy_address = 0;
1474 ecmd->advertising = de->media_advertise;
1475
1476 switch (de->media_type) {
1477 case DE_MEDIA_AUI:
1478 ecmd->port = PORT_AUI;
1479 ecmd->speed = 5;
1480 break;
1481 case DE_MEDIA_BNC:
1482 ecmd->port = PORT_BNC;
1483 ecmd->speed = 2;
1484 break;
1485 default:
1486 ecmd->port = PORT_TP;
1487 ecmd->speed = SPEED_10;
1488 break;
1489 }
1490
1491 if (dr32(MacMode) & FullDuplex)
1492 ecmd->duplex = DUPLEX_FULL;
1493 else
1494 ecmd->duplex = DUPLEX_HALF;
1495
1496 if (de->media_lock)
1497 ecmd->autoneg = AUTONEG_DISABLE;
1498 else
1499 ecmd->autoneg = AUTONEG_ENABLE;
1500
1501 /* ignore maxtxpkt, maxrxpkt for now */
1502
1503 return 0;
1504 }
1505
1506 static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1507 {
1508 u32 new_media;
1509 unsigned int media_lock;
1510
1511 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1512 return -EINVAL;
1513 if (de->de21040 && ecmd->speed == 2)
1514 return -EINVAL;
1515 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1516 return -EINVAL;
1517 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1518 return -EINVAL;
1519 if (de->de21040 && ecmd->port == PORT_BNC)
1520 return -EINVAL;
1521 if (ecmd->transceiver != XCVR_INTERNAL)
1522 return -EINVAL;
1523 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1524 return -EINVAL;
1525 if (ecmd->advertising & ~de->media_supported)
1526 return -EINVAL;
1527 if (ecmd->autoneg == AUTONEG_ENABLE &&
1528 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1529 return -EINVAL;
1530
1531 switch (ecmd->port) {
1532 case PORT_AUI:
1533 new_media = DE_MEDIA_AUI;
1534 if (!(ecmd->advertising & ADVERTISED_AUI))
1535 return -EINVAL;
1536 break;
1537 case PORT_BNC:
1538 new_media = DE_MEDIA_BNC;
1539 if (!(ecmd->advertising & ADVERTISED_BNC))
1540 return -EINVAL;
1541 break;
1542 default:
1543 if (ecmd->autoneg == AUTONEG_ENABLE)
1544 new_media = DE_MEDIA_TP_AUTO;
1545 else if (ecmd->duplex == DUPLEX_FULL)
1546 new_media = DE_MEDIA_TP_FD;
1547 else
1548 new_media = DE_MEDIA_TP;
1549 if (!(ecmd->advertising & ADVERTISED_TP))
1550 return -EINVAL;
1551 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1552 return -EINVAL;
1553 break;
1554 }
1555
1556 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1557
1558 if ((new_media == de->media_type) &&
1559 (media_lock == de->media_lock) &&
1560 (ecmd->advertising == de->media_advertise))
1561 return 0; /* nothing to change */
1562
1563 de_link_down(de);
1564 de_stop_rxtx(de);
1565
1566 de->media_type = new_media;
1567 de->media_lock = media_lock;
1568 de->media_advertise = ecmd->advertising;
1569 de_set_media(de);
1570
1571 return 0;
1572 }
1573
1574 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1575 {
1576 struct de_private *de = dev->priv;
1577
1578 strcpy (info->driver, DRV_NAME);
1579 strcpy (info->version, DRV_VERSION);
1580 strcpy (info->bus_info, pci_name(de->pdev));
1581 info->eedump_len = DE_EEPROM_SIZE;
1582 }
1583
1584 static int de_get_regs_len(struct net_device *dev)
1585 {
1586 return DE_REGS_SIZE;
1587 }
1588
1589 static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1590 {
1591 struct de_private *de = dev->priv;
1592 int rc;
1593
1594 spin_lock_irq(&de->lock);
1595 rc = __de_get_settings(de, ecmd);
1596 spin_unlock_irq(&de->lock);
1597
1598 return rc;
1599 }
1600
1601 static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1602 {
1603 struct de_private *de = dev->priv;
1604 int rc;
1605
1606 spin_lock_irq(&de->lock);
1607 rc = __de_set_settings(de, ecmd);
1608 spin_unlock_irq(&de->lock);
1609
1610 return rc;
1611 }
1612
1613 static u32 de_get_msglevel(struct net_device *dev)
1614 {
1615 struct de_private *de = dev->priv;
1616
1617 return de->msg_enable;
1618 }
1619
1620 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1621 {
1622 struct de_private *de = dev->priv;
1623
1624 de->msg_enable = msglvl;
1625 }
1626
1627 static int de_get_eeprom(struct net_device *dev,
1628 struct ethtool_eeprom *eeprom, u8 *data)
1629 {
1630 struct de_private *de = dev->priv;
1631
1632 if (!de->ee_data)
1633 return -EOPNOTSUPP;
1634 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1635 (eeprom->len != DE_EEPROM_SIZE))
1636 return -EINVAL;
1637 memcpy(data, de->ee_data, eeprom->len);
1638
1639 return 0;
1640 }
1641
1642 static int de_nway_reset(struct net_device *dev)
1643 {
1644 struct de_private *de = dev->priv;
1645 u32 status;
1646
1647 if (de->media_type != DE_MEDIA_TP_AUTO)
1648 return -EINVAL;
1649 if (netif_carrier_ok(de->dev))
1650 de_link_down(de);
1651
1652 status = dr32(SIAStatus);
1653 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1654 if (netif_msg_link(de))
1655 printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
1656 de->dev->name, status, dr32(SIAStatus));
1657 return 0;
1658 }
1659
1660 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1661 void *data)
1662 {
1663 struct de_private *de = dev->priv;
1664
1665 regs->version = (DE_REGS_VER << 2) | de->de21040;
1666
1667 spin_lock_irq(&de->lock);
1668 __de_get_regs(de, data);
1669 spin_unlock_irq(&de->lock);
1670 }
1671
1672 static const struct ethtool_ops de_ethtool_ops = {
1673 .get_link = ethtool_op_get_link,
1674 .get_tx_csum = ethtool_op_get_tx_csum,
1675 .get_sg = ethtool_op_get_sg,
1676 .get_drvinfo = de_get_drvinfo,
1677 .get_regs_len = de_get_regs_len,
1678 .get_settings = de_get_settings,
1679 .set_settings = de_set_settings,
1680 .get_msglevel = de_get_msglevel,
1681 .set_msglevel = de_set_msglevel,
1682 .get_eeprom = de_get_eeprom,
1683 .nway_reset = de_nway_reset,
1684 .get_regs = de_get_regs,
1685 };
1686
1687 static void __devinit de21040_get_mac_address (struct de_private *de)
1688 {
1689 unsigned i;
1690
1691 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1692
1693 for (i = 0; i < 6; i++) {
1694 int value, boguscnt = 100000;
1695 do
1696 value = dr32(ROMCmd);
1697 while (value < 0 && --boguscnt > 0);
1698 de->dev->dev_addr[i] = value;
1699 udelay(1);
1700 if (boguscnt <= 0)
1701 printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
1702 }
1703 }
1704
1705 static void __devinit de21040_get_media_info(struct de_private *de)
1706 {
1707 unsigned int i;
1708
1709 de->media_type = DE_MEDIA_TP;
1710 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1711 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1712 de->media_advertise = de->media_supported;
1713
1714 for (i = 0; i < DE_MAX_MEDIA; i++) {
1715 switch (i) {
1716 case DE_MEDIA_AUI:
1717 case DE_MEDIA_TP:
1718 case DE_MEDIA_TP_FD:
1719 de->media[i].type = i;
1720 de->media[i].csr13 = t21040_csr13[i];
1721 de->media[i].csr14 = t21040_csr14[i];
1722 de->media[i].csr15 = t21040_csr15[i];
1723 break;
1724 default:
1725 de->media[i].type = DE_MEDIA_INVALID;
1726 break;
1727 }
1728 }
1729 }
1730
1731 /* Note: this routine returns extra data bits for size detection. */
1732 static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1733 {
1734 int i;
1735 unsigned retval = 0;
1736 void __iomem *ee_addr = regs + ROMCmd;
1737 int read_cmd = location | (EE_READ_CMD << addr_len);
1738
1739 writel(EE_ENB & ~EE_CS, ee_addr);
1740 writel(EE_ENB, ee_addr);
1741
1742 /* Shift the read command bits out. */
1743 for (i = 4 + addr_len; i >= 0; i--) {
1744 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1745 writel(EE_ENB | dataval, ee_addr);
1746 readl(ee_addr);
1747 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1748 readl(ee_addr);
1749 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1750 }
1751 writel(EE_ENB, ee_addr);
1752 readl(ee_addr);
1753
1754 for (i = 16; i > 0; i--) {
1755 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1756 readl(ee_addr);
1757 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1758 writel(EE_ENB, ee_addr);
1759 readl(ee_addr);
1760 }
1761
1762 /* Terminate the EEPROM access. */
1763 writel(EE_ENB & ~EE_CS, ee_addr);
1764 return retval;
1765 }
1766
1767 static void __devinit de21041_get_srom_info (struct de_private *de)
1768 {
1769 unsigned i, sa_offset = 0, ofs;
1770 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1771 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1772 struct de_srom_info_leaf *il;
1773 void *bufp;
1774
1775 /* download entire eeprom */
1776 for (i = 0; i < DE_EEPROM_WORDS; i++)
1777 ((u16 *)ee_data)[i] =
1778 le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size));
1779
1780 /* DEC now has a specification but early board makers
1781 just put the address in the first EEPROM locations. */
1782 /* This does memcmp(eedata, eedata+16, 8) */
1783
1784 #ifndef CONFIG_MIPS_COBALT
1785
1786 for (i = 0; i < 8; i ++)
1787 if (ee_data[i] != ee_data[16+i])
1788 sa_offset = 20;
1789
1790 #endif
1791
1792 /* store MAC address */
1793 for (i = 0; i < 6; i ++)
1794 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1795
1796 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1797 ofs = ee_data[SROMC0InfoLeaf];
1798 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1799 goto bad_srom;
1800
1801 /* get pointer to info leaf */
1802 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1803
1804 /* paranoia checks */
1805 if (il->n_blocks == 0)
1806 goto bad_srom;
1807 if ((sizeof(ee_data) - ofs) <
1808 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1809 goto bad_srom;
1810
1811 /* get default media type */
1812 switch (DE_UNALIGNED_16(&il->default_media)) {
1813 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1814 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1815 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1816 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1817 }
1818
1819 if (netif_msg_probe(de))
1820 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
1821 de->board_idx, ofs,
1822 media_name[de->media_type]);
1823
1824 /* init SIA register values to defaults */
1825 for (i = 0; i < DE_MAX_MEDIA; i++) {
1826 de->media[i].type = DE_MEDIA_INVALID;
1827 de->media[i].csr13 = 0xffff;
1828 de->media[i].csr14 = 0xffff;
1829 de->media[i].csr15 = 0xffff;
1830 }
1831
1832 /* parse media blocks to see what medias are supported,
1833 * and if any custom CSR values are provided
1834 */
1835 bufp = ((void *)il) + sizeof(*il);
1836 for (i = 0; i < il->n_blocks; i++) {
1837 struct de_srom_media_block *ib = bufp;
1838 unsigned idx;
1839
1840 /* index based on media type in media block */
1841 switch(ib->opts & MediaBlockMask) {
1842 case 0: /* 10baseT */
1843 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1844 | SUPPORTED_Autoneg;
1845 idx = DE_MEDIA_TP;
1846 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1847 break;
1848 case 1: /* BNC */
1849 de->media_supported |= SUPPORTED_BNC;
1850 idx = DE_MEDIA_BNC;
1851 break;
1852 case 2: /* AUI */
1853 de->media_supported |= SUPPORTED_AUI;
1854 idx = DE_MEDIA_AUI;
1855 break;
1856 case 4: /* 10baseT-FD */
1857 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1858 | SUPPORTED_Autoneg;
1859 idx = DE_MEDIA_TP_FD;
1860 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1861 break;
1862 default:
1863 goto bad_srom;
1864 }
1865
1866 de->media[idx].type = idx;
1867
1868 if (netif_msg_probe(de))
1869 printk(KERN_INFO "de%d: media block #%u: %s",
1870 de->board_idx, i,
1871 media_name[de->media[idx].type]);
1872
1873 bufp += sizeof (ib->opts);
1874
1875 if (ib->opts & MediaCustomCSRs) {
1876 de->media[idx].csr13 = DE_UNALIGNED_16(&ib->csr13);
1877 de->media[idx].csr14 = DE_UNALIGNED_16(&ib->csr14);
1878 de->media[idx].csr15 = DE_UNALIGNED_16(&ib->csr15);
1879 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1880 sizeof(ib->csr15);
1881
1882 if (netif_msg_probe(de))
1883 printk(" (%x,%x,%x)\n",
1884 de->media[idx].csr13,
1885 de->media[idx].csr14,
1886 de->media[idx].csr15);
1887
1888 } else if (netif_msg_probe(de))
1889 printk("\n");
1890
1891 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1892 break;
1893 }
1894
1895 de->media_advertise = de->media_supported;
1896
1897 fill_defaults:
1898 /* fill in defaults, for cases where custom CSRs not used */
1899 for (i = 0; i < DE_MAX_MEDIA; i++) {
1900 if (de->media[i].csr13 == 0xffff)
1901 de->media[i].csr13 = t21041_csr13[i];
1902 if (de->media[i].csr14 == 0xffff)
1903 de->media[i].csr14 = t21041_csr14[i];
1904 if (de->media[i].csr15 == 0xffff)
1905 de->media[i].csr15 = t21041_csr15[i];
1906 }
1907
1908 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1909
1910 return;
1911
1912 bad_srom:
1913 /* for error cases, it's ok to assume we support all these */
1914 for (i = 0; i < DE_MAX_MEDIA; i++)
1915 de->media[i].type = i;
1916 de->media_supported =
1917 SUPPORTED_10baseT_Half |
1918 SUPPORTED_10baseT_Full |
1919 SUPPORTED_Autoneg |
1920 SUPPORTED_TP |
1921 SUPPORTED_AUI |
1922 SUPPORTED_BNC;
1923 goto fill_defaults;
1924 }
1925
1926 static int __devinit de_init_one (struct pci_dev *pdev,
1927 const struct pci_device_id *ent)
1928 {
1929 struct net_device *dev;
1930 struct de_private *de;
1931 int rc;
1932 void __iomem *regs;
1933 unsigned long pciaddr;
1934 static int board_idx = -1;
1935
1936 board_idx++;
1937
1938 #ifndef MODULE
1939 if (board_idx == 0)
1940 printk("%s", version);
1941 #endif
1942
1943 /* allocate a new ethernet device structure, and fill in defaults */
1944 dev = alloc_etherdev(sizeof(struct de_private));
1945 if (!dev)
1946 return -ENOMEM;
1947
1948 SET_MODULE_OWNER(dev);
1949 SET_NETDEV_DEV(dev, &pdev->dev);
1950 dev->open = de_open;
1951 dev->stop = de_close;
1952 dev->set_multicast_list = de_set_rx_mode;
1953 dev->hard_start_xmit = de_start_xmit;
1954 dev->get_stats = de_get_stats;
1955 dev->ethtool_ops = &de_ethtool_ops;
1956 dev->tx_timeout = de_tx_timeout;
1957 dev->watchdog_timeo = TX_TIMEOUT;
1958
1959 de = dev->priv;
1960 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1961 de->pdev = pdev;
1962 de->dev = dev;
1963 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1964 de->board_idx = board_idx;
1965 spin_lock_init (&de->lock);
1966 init_timer(&de->media_timer);
1967 if (de->de21040)
1968 de->media_timer.function = de21040_media_timer;
1969 else
1970 de->media_timer.function = de21041_media_timer;
1971 de->media_timer.data = (unsigned long) de;
1972
1973 netif_carrier_off(dev);
1974 netif_stop_queue(dev);
1975
1976 /* wake up device, assign resources */
1977 rc = pci_enable_device(pdev);
1978 if (rc)
1979 goto err_out_free;
1980
1981 /* reserve PCI resources to ensure driver atomicity */
1982 rc = pci_request_regions(pdev, DRV_NAME);
1983 if (rc)
1984 goto err_out_disable;
1985
1986 /* check for invalid IRQ value */
1987 if (pdev->irq < 2) {
1988 rc = -EIO;
1989 printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
1990 pdev->irq, pci_name(pdev));
1991 goto err_out_res;
1992 }
1993
1994 dev->irq = pdev->irq;
1995
1996 /* obtain and check validity of PCI I/O address */
1997 pciaddr = pci_resource_start(pdev, 1);
1998 if (!pciaddr) {
1999 rc = -EIO;
2000 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
2001 pci_name(pdev));
2002 goto err_out_res;
2003 }
2004 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2005 rc = -EIO;
2006 printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
2007 (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
2008 goto err_out_res;
2009 }
2010
2011 /* remap CSR registers */
2012 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2013 if (!regs) {
2014 rc = -EIO;
2015 printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2016 (unsigned long long)pci_resource_len(pdev, 1),
2017 pciaddr, pci_name(pdev));
2018 goto err_out_res;
2019 }
2020 dev->base_addr = (unsigned long) regs;
2021 de->regs = regs;
2022
2023 de_adapter_wake(de);
2024
2025 /* make sure hardware is not running */
2026 rc = de_reset_mac(de);
2027 if (rc) {
2028 printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
2029 pci_name(pdev));
2030 goto err_out_iomap;
2031 }
2032
2033 /* get MAC address, initialize default media type and
2034 * get list of supported media
2035 */
2036 if (de->de21040) {
2037 de21040_get_mac_address(de);
2038 de21040_get_media_info(de);
2039 } else {
2040 de21041_get_srom_info(de);
2041 }
2042
2043 /* register new network interface with kernel */
2044 rc = register_netdev(dev);
2045 if (rc)
2046 goto err_out_iomap;
2047
2048 /* print info about board and interface just registered */
2049 printk (KERN_INFO "%s: %s at 0x%lx, "
2050 "%02x:%02x:%02x:%02x:%02x:%02x, "
2051 "IRQ %d\n",
2052 dev->name,
2053 de->de21040 ? "21040" : "21041",
2054 dev->base_addr,
2055 dev->dev_addr[0], dev->dev_addr[1],
2056 dev->dev_addr[2], dev->dev_addr[3],
2057 dev->dev_addr[4], dev->dev_addr[5],
2058 dev->irq);
2059
2060 pci_set_drvdata(pdev, dev);
2061
2062 /* enable busmastering */
2063 pci_set_master(pdev);
2064
2065 /* put adapter to sleep */
2066 de_adapter_sleep(de);
2067
2068 return 0;
2069
2070 err_out_iomap:
2071 kfree(de->ee_data);
2072 iounmap(regs);
2073 err_out_res:
2074 pci_release_regions(pdev);
2075 err_out_disable:
2076 pci_disable_device(pdev);
2077 err_out_free:
2078 free_netdev(dev);
2079 return rc;
2080 }
2081
2082 static void __devexit de_remove_one (struct pci_dev *pdev)
2083 {
2084 struct net_device *dev = pci_get_drvdata(pdev);
2085 struct de_private *de = dev->priv;
2086
2087 BUG_ON(!dev);
2088 unregister_netdev(dev);
2089 kfree(de->ee_data);
2090 iounmap(de->regs);
2091 pci_release_regions(pdev);
2092 pci_disable_device(pdev);
2093 pci_set_drvdata(pdev, NULL);
2094 free_netdev(dev);
2095 }
2096
2097 #ifdef CONFIG_PM
2098
2099 static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2100 {
2101 struct net_device *dev = pci_get_drvdata (pdev);
2102 struct de_private *de = dev->priv;
2103
2104 rtnl_lock();
2105 if (netif_running (dev)) {
2106 del_timer_sync(&de->media_timer);
2107
2108 disable_irq(dev->irq);
2109 spin_lock_irq(&de->lock);
2110
2111 de_stop_hw(de);
2112 netif_stop_queue(dev);
2113 netif_device_detach(dev);
2114 netif_carrier_off(dev);
2115
2116 spin_unlock_irq(&de->lock);
2117 enable_irq(dev->irq);
2118
2119 /* Update the error counts. */
2120 __de_get_stats(de);
2121
2122 synchronize_irq(dev->irq);
2123 de_clean_rings(de);
2124
2125 de_adapter_sleep(de);
2126 pci_disable_device(pdev);
2127 } else {
2128 netif_device_detach(dev);
2129 }
2130 rtnl_unlock();
2131 return 0;
2132 }
2133
2134 static int de_resume (struct pci_dev *pdev)
2135 {
2136 struct net_device *dev = pci_get_drvdata (pdev);
2137 struct de_private *de = dev->priv;
2138 int retval = 0;
2139
2140 rtnl_lock();
2141 if (netif_device_present(dev))
2142 goto out;
2143 if (!netif_running(dev))
2144 goto out_attach;
2145 if ((retval = pci_enable_device(pdev))) {
2146 printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
2147 dev->name);
2148 goto out;
2149 }
2150 de_init_hw(de);
2151 out_attach:
2152 netif_device_attach(dev);
2153 out:
2154 rtnl_unlock();
2155 return 0;
2156 }
2157
2158 #endif /* CONFIG_PM */
2159
2160 static struct pci_driver de_driver = {
2161 .name = DRV_NAME,
2162 .id_table = de_pci_tbl,
2163 .probe = de_init_one,
2164 .remove = __devexit_p(de_remove_one),
2165 #ifdef CONFIG_PM
2166 .suspend = de_suspend,
2167 .resume = de_resume,
2168 #endif
2169 };
2170
2171 static int __init de_init (void)
2172 {
2173 #ifdef MODULE
2174 printk("%s", version);
2175 #endif
2176 return pci_register_driver(&de_driver);
2177 }
2178
2179 static void __exit de_exit (void)
2180 {
2181 pci_unregister_driver (&de_driver);
2182 }
2183
2184 module_init(de_init);
2185 module_exit(de_exit);