]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/tulip/de2104x.c
net: move address list functions to a separate file
[mirror_ubuntu-zesty-kernel.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
45
46#include <asm/io.h>
47#include <asm/irq.h>
48#include <asm/uaccess.h>
49#include <asm/unaligned.h>
50
51/* These identify the driver base version and may not be removed. */
52static char version[] =
53KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
54
55MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
56MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_VERSION);
59
60static int debug = -1;
61module_param (debug, int, 0);
62MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
8e95a202
JP
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
1da177e4
LT
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72module_param (rx_copybreak, int, 0);
73MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
74
75#define PFX DRV_NAME ": "
76
77#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
b77e5228
RS
85/* Descriptor skip length in 32 bit longwords. */
86#ifndef CONFIG_DE2104X_DSL
87#define DSL 0
88#else
89#define DSL CONFIG_DE2104X_DSL
90#endif
91
1da177e4
LT
92#define DE_RX_RING_SIZE 64
93#define DE_TX_RING_SIZE 64
94#define DE_RING_BYTES \
95 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
96 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
97#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
98#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
99#define TX_BUFFS_AVAIL(CP) \
100 (((CP)->tx_tail <= (CP)->tx_head) ? \
101 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
102 (CP)->tx_tail - (CP)->tx_head - 1)
103
104#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
105#define RX_OFFSET 2
106
107#define DE_SETUP_SKB ((struct sk_buff *) 1)
108#define DE_DUMMY_SKB ((struct sk_buff *) 2)
109#define DE_SETUP_FRAME_WORDS 96
110#define DE_EEPROM_WORDS 256
111#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
112#define DE_MAX_MEDIA 5
113
114#define DE_MEDIA_TP_AUTO 0
115#define DE_MEDIA_BNC 1
116#define DE_MEDIA_AUI 2
117#define DE_MEDIA_TP 3
118#define DE_MEDIA_TP_FD 4
119#define DE_MEDIA_INVALID DE_MAX_MEDIA
120#define DE_MEDIA_FIRST 0
121#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
122#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
123
124#define DE_TIMER_LINK (60 * HZ)
125#define DE_TIMER_NO_LINK (5 * HZ)
126
127#define DE_NUM_REGS 16
128#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
129#define DE_REGS_VER 1
130
131/* Time in jiffies before concluding the transmitter is hung. */
132#define TX_TIMEOUT (6*HZ)
133
1da177e4
LT
134/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
135 to support a pre-NWay full-duplex signaling mechanism using short frames.
136 No one knows what it should be, but if left at its default value some
137 10base2(!) packets trigger a full-duplex-request interrupt. */
138#define FULL_DUPLEX_MAGIC 0x6969
139
140enum {
141 /* NIC registers */
142 BusMode = 0x00,
143 TxPoll = 0x08,
144 RxPoll = 0x10,
145 RxRingAddr = 0x18,
146 TxRingAddr = 0x20,
147 MacStatus = 0x28,
148 MacMode = 0x30,
149 IntrMask = 0x38,
150 RxMissed = 0x40,
151 ROMCmd = 0x48,
152 CSR11 = 0x58,
153 SIAStatus = 0x60,
154 CSR13 = 0x68,
155 CSR14 = 0x70,
156 CSR15 = 0x78,
157 PCIPM = 0x40,
158
159 /* BusMode bits */
160 CmdReset = (1 << 0),
161 CacheAlign16 = 0x00008000,
162 BurstLen4 = 0x00000400,
b77e5228 163 DescSkipLen = (DSL << 2),
1da177e4
LT
164
165 /* Rx/TxPoll bits */
166 NormalTxPoll = (1 << 0),
167 NormalRxPoll = (1 << 0),
168
169 /* Tx/Rx descriptor status bits */
170 DescOwn = (1 << 31),
171 RxError = (1 << 15),
172 RxErrLong = (1 << 7),
173 RxErrCRC = (1 << 1),
174 RxErrFIFO = (1 << 0),
175 RxErrRunt = (1 << 11),
176 RxErrFrame = (1 << 14),
177 RingEnd = (1 << 25),
178 FirstFrag = (1 << 29),
179 LastFrag = (1 << 30),
180 TxError = (1 << 15),
181 TxFIFOUnder = (1 << 1),
182 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
183 TxMaxCol = (1 << 8),
184 TxOWC = (1 << 9),
185 TxJabber = (1 << 14),
186 SetupFrame = (1 << 27),
187 TxSwInt = (1 << 31),
188
189 /* MacStatus bits */
190 IntrOK = (1 << 16),
191 IntrErr = (1 << 15),
192 RxIntr = (1 << 6),
193 RxEmpty = (1 << 7),
194 TxIntr = (1 << 0),
195 TxEmpty = (1 << 2),
196 PciErr = (1 << 13),
197 TxState = (1 << 22) | (1 << 21) | (1 << 20),
198 RxState = (1 << 19) | (1 << 18) | (1 << 17),
199 LinkFail = (1 << 12),
200 LinkPass = (1 << 4),
201 RxStopped = (1 << 8),
202 TxStopped = (1 << 1),
203
204 /* MacMode bits */
205 TxEnable = (1 << 13),
206 RxEnable = (1 << 1),
207 RxTx = TxEnable | RxEnable,
208 FullDuplex = (1 << 9),
209 AcceptAllMulticast = (1 << 7),
210 AcceptAllPhys = (1 << 6),
211 BOCnt = (1 << 5),
212 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
213 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
214
215 /* ROMCmd bits */
216 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
217 EE_CS = 0x01, /* EEPROM chip select. */
218 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
219 EE_WRITE_0 = 0x01,
220 EE_WRITE_1 = 0x05,
221 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
222 EE_ENB = (0x4800 | EE_CS),
223
224 /* The EEPROM commands include the alway-set leading bit. */
225 EE_READ_CMD = 6,
226
227 /* RxMissed bits */
228 RxMissedOver = (1 << 16),
229 RxMissedMask = 0xffff,
230
231 /* SROM-related bits */
232 SROMC0InfoLeaf = 27,
233 MediaBlockMask = 0x3f,
234 MediaCustomCSRs = (1 << 6),
f3b197ac 235
1da177e4
LT
236 /* PCIPM bits */
237 PM_Sleep = (1 << 31),
238 PM_Snooze = (1 << 30),
239 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 240
1da177e4
LT
241 /* SIAStatus bits */
242 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
243 NWayRestart = (1 << 12),
244 NonselPortActive = (1 << 9),
245 LinkFailStatus = (1 << 2),
246 NetCxnErr = (1 << 1),
247};
248
249static const u32 de_intr_mask =
250 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
251 LinkPass | LinkFail | PciErr;
252
253/*
254 * Set the programmable burst length to 4 longwords for all:
255 * DMA errors result without these values. Cache align 16 long.
256 */
b77e5228 257static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
1da177e4
LT
258
259struct de_srom_media_block {
260 u8 opts;
261 u16 csr13;
262 u16 csr14;
263 u16 csr15;
264} __attribute__((packed));
265
266struct de_srom_info_leaf {
267 u16 default_media;
268 u8 n_blocks;
269 u8 unused;
270} __attribute__((packed));
271
272struct de_desc {
c559a5bc
AV
273 __le32 opts1;
274 __le32 opts2;
275 __le32 addr1;
276 __le32 addr2;
b77e5228
RS
277#if DSL
278 __le32 skip[DSL];
279#endif
1da177e4
LT
280};
281
282struct media_info {
283 u16 type; /* DE_MEDIA_xxx */
284 u16 csr13;
285 u16 csr14;
286 u16 csr15;
287};
288
289struct ring_info {
290 struct sk_buff *skb;
291 dma_addr_t mapping;
292};
293
294struct de_private {
295 unsigned tx_head;
296 unsigned tx_tail;
297 unsigned rx_tail;
298
299 void __iomem *regs;
300 struct net_device *dev;
301 spinlock_t lock;
302
303 struct de_desc *rx_ring;
304 struct de_desc *tx_ring;
305 struct ring_info tx_skb[DE_TX_RING_SIZE];
306 struct ring_info rx_skb[DE_RX_RING_SIZE];
307 unsigned rx_buf_sz;
308 dma_addr_t ring_dma;
309
310 u32 msg_enable;
311
312 struct net_device_stats net_stats;
313
314 struct pci_dev *pdev;
315
316 u16 setup_frame[DE_SETUP_FRAME_WORDS];
317
318 u32 media_type;
319 u32 media_supported;
320 u32 media_advertise;
321 struct media_info media[DE_MAX_MEDIA];
322 struct timer_list media_timer;
323
324 u8 *ee_data;
325 unsigned board_idx;
326 unsigned de21040 : 1;
327 unsigned media_lock : 1;
328};
329
330
331static void de_set_rx_mode (struct net_device *dev);
332static void de_tx (struct de_private *de);
333static void de_clean_rings (struct de_private *de);
334static void de_media_interrupt (struct de_private *de, u32 status);
335static void de21040_media_timer (unsigned long data);
336static void de21041_media_timer (unsigned long data);
337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
338
339
a3aa1884 340static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
1da177e4
LT
341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
344 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
345 { },
346};
347MODULE_DEVICE_TABLE(pci, de_pci_tbl);
348
349static const char * const media_name[DE_MAX_MEDIA] = {
350 "10baseT auto",
351 "BNC",
352 "AUI",
353 "10baseT-HD",
354 "10baseT-FD"
355};
356
357/* 21040 transceiver register settings:
358 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
359static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
360static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
361static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
362
363/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
364static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
365static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
366static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
367
368
369#define dr32(reg) readl(de->regs + (reg))
370#define dw32(reg,val) writel((val), de->regs + (reg))
371
372
373static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
374 u32 status, u32 len)
375{
376 if (netif_msg_rx_err (de))
377 printk (KERN_DEBUG
378 "%s: rx err, slot %d status 0x%x len %d\n",
379 de->dev->name, rx_tail, status, len);
380
381 if ((status & 0x38000300) != 0x0300) {
382 /* Ingore earlier buffers. */
383 if ((status & 0xffff) != 0x7fff) {
384 if (netif_msg_rx_err(de))
f639dc7d
JP
385 dev_warn(&de->dev->dev,
386 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
387 status);
1da177e4
LT
388 de->net_stats.rx_length_errors++;
389 }
390 } else if (status & RxError) {
391 /* There was a fatal error. */
392 de->net_stats.rx_errors++; /* end of a packet.*/
393 if (status & 0x0890) de->net_stats.rx_length_errors++;
394 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
395 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
396 }
397}
398
399static void de_rx (struct de_private *de)
400{
401 unsigned rx_tail = de->rx_tail;
402 unsigned rx_work = DE_RX_RING_SIZE;
403 unsigned drop = 0;
404 int rc;
405
46578a69 406 while (--rx_work) {
1da177e4
LT
407 u32 status, len;
408 dma_addr_t mapping;
409 struct sk_buff *skb, *copy_skb;
410 unsigned copying_skb, buflen;
411
412 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 413 BUG_ON(!skb);
1da177e4
LT
414 rmb();
415 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
416 if (status & DescOwn)
417 break;
418
419 len = ((status >> 16) & 0x7ff) - 4;
420 mapping = de->rx_skb[rx_tail].mapping;
421
422 if (unlikely(drop)) {
423 de->net_stats.rx_dropped++;
424 goto rx_next;
425 }
426
427 if (unlikely((status & 0x38008300) != 0x0300)) {
428 de_rx_err_acct(de, rx_tail, status, len);
429 goto rx_next;
430 }
431
432 copying_skb = (len <= rx_copybreak);
433
434 if (unlikely(netif_msg_rx_status(de)))
435 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
436 de->dev->name, rx_tail, status, len,
437 copying_skb);
438
439 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
440 copy_skb = dev_alloc_skb (buflen);
441 if (unlikely(!copy_skb)) {
442 de->net_stats.rx_dropped++;
443 drop = 1;
444 rx_work = 100;
445 goto rx_next;
446 }
1da177e4
LT
447
448 if (!copying_skb) {
449 pci_unmap_single(de->pdev, mapping,
450 buflen, PCI_DMA_FROMDEVICE);
451 skb_put(skb, len);
452
453 mapping =
454 de->rx_skb[rx_tail].mapping =
689be439 455 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
456 buflen, PCI_DMA_FROMDEVICE);
457 de->rx_skb[rx_tail].skb = copy_skb;
458 } else {
459 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
460 skb_reserve(copy_skb, RX_OFFSET);
d626f62b
ACM
461 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
462 len);
1da177e4
LT
463 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
464
465 /* We'll reuse the original ring buffer. */
466 skb = copy_skb;
467 }
468
469 skb->protocol = eth_type_trans (skb, de->dev);
470
471 de->net_stats.rx_packets++;
472 de->net_stats.rx_bytes += skb->len;
1da177e4
LT
473 rc = netif_rx (skb);
474 if (rc == NET_RX_DROP)
475 drop = 1;
476
477rx_next:
1da177e4
LT
478 if (rx_tail == (DE_RX_RING_SIZE - 1))
479 de->rx_ring[rx_tail].opts2 =
480 cpu_to_le32(RingEnd | de->rx_buf_sz);
481 else
482 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
483 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
b991d2bc
RS
484 wmb();
485 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
1da177e4
LT
486 rx_tail = NEXT_RX(rx_tail);
487 }
488
489 if (!rx_work)
f639dc7d 490 dev_warn(&de->dev->dev, "rx work limit reached\n");
1da177e4
LT
491
492 de->rx_tail = rx_tail;
493}
494
7d12e780 495static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
496{
497 struct net_device *dev = dev_instance;
8f15ea42 498 struct de_private *de = netdev_priv(dev);
1da177e4
LT
499 u32 status;
500
501 status = dr32(MacStatus);
502 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
503 return IRQ_NONE;
504
505 if (netif_msg_intr(de))
506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
f639dc7d
JP
507 dev->name, status, dr32(MacMode),
508 de->rx_tail, de->tx_head, de->tx_tail);
1da177e4
LT
509
510 dw32(MacStatus, status);
511
512 if (status & (RxIntr | RxEmpty)) {
513 de_rx(de);
514 if (status & RxEmpty)
515 dw32(RxPoll, NormalRxPoll);
516 }
517
518 spin_lock(&de->lock);
519
520 if (status & (TxIntr | TxEmpty))
521 de_tx(de);
522
523 if (status & (LinkPass | LinkFail))
524 de_media_interrupt(de, status);
525
526 spin_unlock(&de->lock);
527
528 if (status & PciErr) {
529 u16 pci_status;
530
531 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
532 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
f639dc7d
JP
533 dev_err(&de->dev->dev,
534 "PCI bus error, status=%08x, PCI status=%04x\n",
535 status, pci_status);
1da177e4
LT
536 }
537
538 return IRQ_HANDLED;
539}
540
541static void de_tx (struct de_private *de)
542{
543 unsigned tx_head = de->tx_head;
544 unsigned tx_tail = de->tx_tail;
545
546 while (tx_tail != tx_head) {
547 struct sk_buff *skb;
548 u32 status;
549
550 rmb();
551 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
552 if (status & DescOwn)
553 break;
554
555 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 556 BUG_ON(!skb);
1da177e4
LT
557 if (unlikely(skb == DE_DUMMY_SKB))
558 goto next;
559
560 if (unlikely(skb == DE_SETUP_SKB)) {
561 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
562 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
563 goto next;
564 }
565
566 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
567 skb->len, PCI_DMA_TODEVICE);
568
569 if (status & LastFrag) {
570 if (status & TxError) {
571 if (netif_msg_tx_err(de))
572 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
573 de->dev->name, status);
574 de->net_stats.tx_errors++;
575 if (status & TxOWC)
576 de->net_stats.tx_window_errors++;
577 if (status & TxMaxCol)
578 de->net_stats.tx_aborted_errors++;
579 if (status & TxLinkFail)
580 de->net_stats.tx_carrier_errors++;
581 if (status & TxFIFOUnder)
582 de->net_stats.tx_fifo_errors++;
583 } else {
584 de->net_stats.tx_packets++;
585 de->net_stats.tx_bytes += skb->len;
586 if (netif_msg_tx_done(de))
f639dc7d
JP
587 printk(KERN_DEBUG "%s: tx done, slot %d\n",
588 de->dev->name, tx_tail);
1da177e4
LT
589 }
590 dev_kfree_skb_irq(skb);
591 }
592
593next:
594 de->tx_skb[tx_tail].skb = NULL;
595
596 tx_tail = NEXT_TX(tx_tail);
597 }
598
599 de->tx_tail = tx_tail;
600
601 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
602 netif_wake_queue(de->dev);
603}
604
ad096463
SH
605static netdev_tx_t de_start_xmit (struct sk_buff *skb,
606 struct net_device *dev)
1da177e4 607{
8f15ea42 608 struct de_private *de = netdev_priv(dev);
1da177e4
LT
609 unsigned int entry, tx_free;
610 u32 mapping, len, flags = FirstFrag | LastFrag;
611 struct de_desc *txd;
612
613 spin_lock_irq(&de->lock);
614
615 tx_free = TX_BUFFS_AVAIL(de);
616 if (tx_free == 0) {
617 netif_stop_queue(dev);
618 spin_unlock_irq(&de->lock);
5b548140 619 return NETDEV_TX_BUSY;
1da177e4
LT
620 }
621 tx_free--;
622
623 entry = de->tx_head;
624
625 txd = &de->tx_ring[entry];
626
627 len = skb->len;
628 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
629 if (entry == (DE_TX_RING_SIZE - 1))
630 flags |= RingEnd;
631 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
632 flags |= TxSwInt;
633 flags |= len;
634 txd->opts2 = cpu_to_le32(flags);
635 txd->addr1 = cpu_to_le32(mapping);
636
637 de->tx_skb[entry].skb = skb;
638 de->tx_skb[entry].mapping = mapping;
639 wmb();
640
641 txd->opts1 = cpu_to_le32(DescOwn);
642 wmb();
643
644 de->tx_head = NEXT_TX(entry);
645 if (netif_msg_tx_queued(de))
646 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
647 dev->name, entry, skb->len);
648
649 if (tx_free == 0)
650 netif_stop_queue(dev);
651
652 spin_unlock_irq(&de->lock);
653
654 /* Trigger an immediate transmit demand. */
655 dw32(TxPoll, NormalTxPoll);
656 dev->trans_start = jiffies;
657
6ed10654 658 return NETDEV_TX_OK;
1da177e4
LT
659}
660
661/* Set or clear the multicast filter for this adaptor.
662 Note that we only use exclusion around actually queueing the
663 new frame, not around filling de->setup_frame. This is non-deterministic
664 when re-entered but still correct. */
665
666#undef set_bit_le
667#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
668
669static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
670{
8f15ea42 671 struct de_private *de = netdev_priv(dev);
1da177e4
LT
672 u16 hash_table[32];
673 struct dev_mc_list *mclist;
674 int i;
675 u16 *eaddrs;
676
677 memset(hash_table, 0, sizeof(hash_table));
678 set_bit_le(255, hash_table); /* Broadcast entry */
679 /* This should work on big-endian machines as well. */
4302b67e 680 netdev_for_each_mc_addr(mclist, dev) {
1da177e4
LT
681 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
682
683 set_bit_le(index, hash_table);
4302b67e 684 }
1da177e4 685
4302b67e
JP
686 for (i = 0; i < 32; i++) {
687 *setup_frm++ = hash_table[i];
688 *setup_frm++ = hash_table[i];
1da177e4 689 }
4302b67e 690 setup_frm = &de->setup_frame[13*6];
1da177e4
LT
691
692 /* Fill the final entry with our physical address. */
693 eaddrs = (u16 *)dev->dev_addr;
694 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
695 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
696 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
697}
698
699static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
700{
8f15ea42 701 struct de_private *de = netdev_priv(dev);
1da177e4 702 struct dev_mc_list *mclist;
1da177e4
LT
703 u16 *eaddrs;
704
705 /* We have <= 14 addresses so we can use the wonderful
706 16 address perfect filtering of the Tulip. */
4302b67e 707 netdev_for_each_mc_addr(mclist, dev) {
1da177e4
LT
708 eaddrs = (u16 *)mclist->dmi_addr;
709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 }
713 /* Fill the unused entries with the broadcast address. */
4302b67e 714 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1da177e4
LT
715 setup_frm = &de->setup_frame[15*6];
716
717 /* Fill the final entry with our physical address. */
718 eaddrs = (u16 *)dev->dev_addr;
719 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
720 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
721 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
722}
723
724
725static void __de_set_rx_mode (struct net_device *dev)
726{
8f15ea42 727 struct de_private *de = netdev_priv(dev);
1da177e4
LT
728 u32 macmode;
729 unsigned int entry;
730 u32 mapping;
731 struct de_desc *txd;
732 struct de_desc *dummy_txd = NULL;
733
734 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
735
736 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
737 macmode |= AcceptAllMulticast | AcceptAllPhys;
738 goto out;
739 }
740
4cd24eaf 741 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
742 /* Too many to filter well -- accept all multicasts. */
743 macmode |= AcceptAllMulticast;
744 goto out;
745 }
746
747 /* Note that only the low-address shortword of setup_frame is valid!
748 The values are doubled for big-endian architectures. */
4cd24eaf 749 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
1da177e4
LT
750 build_setup_frame_hash (de->setup_frame, dev);
751 else
752 build_setup_frame_perfect (de->setup_frame, dev);
753
754 /*
755 * Now add this frame to the Tx list.
756 */
757
758 entry = de->tx_head;
759
760 /* Avoid a chip errata by prefixing a dummy entry. */
761 if (entry != 0) {
762 de->tx_skb[entry].skb = DE_DUMMY_SKB;
763
764 dummy_txd = &de->tx_ring[entry];
765 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
766 cpu_to_le32(RingEnd) : 0;
767 dummy_txd->addr1 = 0;
768
769 /* Must set DescOwned later to avoid race with chip */
770
771 entry = NEXT_TX(entry);
772 }
773
774 de->tx_skb[entry].skb = DE_SETUP_SKB;
775 de->tx_skb[entry].mapping = mapping =
776 pci_map_single (de->pdev, de->setup_frame,
777 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
778
779 /* Put the setup frame on the Tx list. */
780 txd = &de->tx_ring[entry];
781 if (entry == (DE_TX_RING_SIZE - 1))
782 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
783 else
784 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
785 txd->addr1 = cpu_to_le32(mapping);
786 wmb();
787
788 txd->opts1 = cpu_to_le32(DescOwn);
789 wmb();
790
791 if (dummy_txd) {
792 dummy_txd->opts1 = cpu_to_le32(DescOwn);
793 wmb();
794 }
795
796 de->tx_head = NEXT_TX(entry);
797
1da177e4
LT
798 if (TX_BUFFS_AVAIL(de) == 0)
799 netif_stop_queue(dev);
800
801 /* Trigger an immediate transmit demand. */
802 dw32(TxPoll, NormalTxPoll);
803
804out:
805 if (macmode != dr32(MacMode))
806 dw32(MacMode, macmode);
807}
808
809static void de_set_rx_mode (struct net_device *dev)
810{
811 unsigned long flags;
8f15ea42 812 struct de_private *de = netdev_priv(dev);
1da177e4
LT
813
814 spin_lock_irqsave (&de->lock, flags);
815 __de_set_rx_mode(dev);
816 spin_unlock_irqrestore (&de->lock, flags);
817}
818
819static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
820{
821 if (unlikely(rx_missed & RxMissedOver))
822 de->net_stats.rx_missed_errors += RxMissedMask;
823 else
824 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
825}
826
827static void __de_get_stats(struct de_private *de)
828{
829 u32 tmp = dr32(RxMissed); /* self-clearing */
830
831 de_rx_missed(de, tmp);
832}
833
834static struct net_device_stats *de_get_stats(struct net_device *dev)
835{
8f15ea42 836 struct de_private *de = netdev_priv(dev);
1da177e4
LT
837
838 /* The chip only need report frame silently dropped. */
839 spin_lock_irq(&de->lock);
840 if (netif_running(dev) && netif_device_present(dev))
841 __de_get_stats(de);
842 spin_unlock_irq(&de->lock);
843
844 return &de->net_stats;
845}
846
847static inline int de_is_running (struct de_private *de)
848{
849 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
850}
851
852static void de_stop_rxtx (struct de_private *de)
853{
854 u32 macmode;
69cac988 855 unsigned int i = 1300/100;
1da177e4
LT
856
857 macmode = dr32(MacMode);
858 if (macmode & RxTx) {
859 dw32(MacMode, macmode & ~RxTx);
860 dr32(MacMode);
861 }
862
69cac988
GG
863 /* wait until in-flight frame completes.
864 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
865 * Typically expect this loop to end in < 50 us on 100BT.
866 */
867 while (--i) {
1da177e4
LT
868 if (!de_is_running(de))
869 return;
69cac988 870 udelay(100);
1da177e4 871 }
f3b197ac 872
f639dc7d 873 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
1da177e4
LT
874}
875
876static inline void de_start_rxtx (struct de_private *de)
877{
878 u32 macmode;
879
880 macmode = dr32(MacMode);
881 if ((macmode & RxTx) != RxTx) {
882 dw32(MacMode, macmode | RxTx);
883 dr32(MacMode);
884 }
885}
886
887static void de_stop_hw (struct de_private *de)
888{
889
890 udelay(5);
891 dw32(IntrMask, 0);
892
893 de_stop_rxtx(de);
894
895 dw32(MacStatus, dr32(MacStatus));
896
897 udelay(10);
898
899 de->rx_tail = 0;
900 de->tx_head = de->tx_tail = 0;
901}
902
903static void de_link_up(struct de_private *de)
904{
905 if (!netif_carrier_ok(de->dev)) {
906 netif_carrier_on(de->dev);
907 if (netif_msg_link(de))
f639dc7d
JP
908 dev_info(&de->dev->dev, "link up, media %s\n",
909 media_name[de->media_type]);
1da177e4
LT
910 }
911}
912
913static void de_link_down(struct de_private *de)
914{
915 if (netif_carrier_ok(de->dev)) {
916 netif_carrier_off(de->dev);
917 if (netif_msg_link(de))
f639dc7d 918 dev_info(&de->dev->dev, "link down\n");
1da177e4
LT
919 }
920}
921
922static void de_set_media (struct de_private *de)
923{
924 unsigned media = de->media_type;
925 u32 macmode = dr32(MacMode);
926
f25f0f8d 927 if (de_is_running(de))
f639dc7d
JP
928 dev_warn(&de->dev->dev,
929 "chip is running while changing media!\n");
1da177e4
LT
930
931 if (de->de21040)
932 dw32(CSR11, FULL_DUPLEX_MAGIC);
933 dw32(CSR13, 0); /* Reset phy */
934 dw32(CSR14, de->media[media].csr14);
935 dw32(CSR15, de->media[media].csr15);
936 dw32(CSR13, de->media[media].csr13);
937
938 /* must delay 10ms before writing to other registers,
939 * especially CSR6
940 */
941 mdelay(10);
942
943 if (media == DE_MEDIA_TP_FD)
944 macmode |= FullDuplex;
945 else
946 macmode &= ~FullDuplex;
f3b197ac 947
1da177e4 948 if (netif_msg_link(de)) {
f639dc7d
JP
949 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
950 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
951 dr32(MacMode), dr32(SIAStatus),
952 dr32(CSR13), dr32(CSR14), dr32(CSR15));
953
954 dev_info(&de->dev->dev,
955 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
956 macmode, de->media[media].csr13,
957 de->media[media].csr14, de->media[media].csr15);
1da177e4
LT
958 }
959 if (macmode != dr32(MacMode))
960 dw32(MacMode, macmode);
961}
962
963static void de_next_media (struct de_private *de, u32 *media,
964 unsigned int n_media)
965{
966 unsigned int i;
967
968 for (i = 0; i < n_media; i++) {
969 if (de_ok_to_advertise(de, media[i])) {
970 de->media_type = media[i];
971 return;
972 }
973 }
974}
975
976static void de21040_media_timer (unsigned long data)
977{
978 struct de_private *de = (struct de_private *) data;
979 struct net_device *dev = de->dev;
980 u32 status = dr32(SIAStatus);
981 unsigned int carrier;
982 unsigned long flags;
f3b197ac 983
1da177e4 984 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 985
1da177e4
LT
986 if (carrier) {
987 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
988 goto no_link_yet;
989
990 de->media_timer.expires = jiffies + DE_TIMER_LINK;
991 add_timer(&de->media_timer);
992 if (!netif_carrier_ok(dev))
993 de_link_up(de);
994 else
995 if (netif_msg_timer(de))
f639dc7d
JP
996 dev_info(&dev->dev, "%s link ok, status %x\n",
997 media_name[de->media_type], status);
1da177e4
LT
998 return;
999 }
1000
f3b197ac 1001 de_link_down(de);
1da177e4
LT
1002
1003 if (de->media_lock)
1004 return;
1005
1006 if (de->media_type == DE_MEDIA_AUI) {
1007 u32 next_state = DE_MEDIA_TP;
1008 de_next_media(de, &next_state, 1);
1009 } else {
1010 u32 next_state = DE_MEDIA_AUI;
1011 de_next_media(de, &next_state, 1);
1012 }
1013
1014 spin_lock_irqsave(&de->lock, flags);
1015 de_stop_rxtx(de);
1016 spin_unlock_irqrestore(&de->lock, flags);
1017 de_set_media(de);
1018 de_start_rxtx(de);
1019
1020no_link_yet:
1021 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1022 add_timer(&de->media_timer);
1023
1024 if (netif_msg_timer(de))
f639dc7d
JP
1025 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1026 media_name[de->media_type], status);
1da177e4
LT
1027}
1028
1029static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1030{
1031 switch (new_media) {
1032 case DE_MEDIA_TP_AUTO:
1033 if (!(de->media_advertise & ADVERTISED_Autoneg))
1034 return 0;
1035 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1036 return 0;
1037 break;
1038 case DE_MEDIA_BNC:
1039 if (!(de->media_advertise & ADVERTISED_BNC))
1040 return 0;
1041 break;
1042 case DE_MEDIA_AUI:
1043 if (!(de->media_advertise & ADVERTISED_AUI))
1044 return 0;
1045 break;
1046 case DE_MEDIA_TP:
1047 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1048 return 0;
1049 break;
1050 case DE_MEDIA_TP_FD:
1051 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1052 return 0;
1053 break;
1054 }
f3b197ac 1055
1da177e4
LT
1056 return 1;
1057}
1058
1059static void de21041_media_timer (unsigned long data)
1060{
1061 struct de_private *de = (struct de_private *) data;
1062 struct net_device *dev = de->dev;
1063 u32 status = dr32(SIAStatus);
1064 unsigned int carrier;
1065 unsigned long flags;
f3b197ac 1066
1da177e4 1067 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1068
1da177e4
LT
1069 if (carrier) {
1070 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1071 de->media_type == DE_MEDIA_TP ||
1072 de->media_type == DE_MEDIA_TP_FD) &&
1073 (status & LinkFailStatus))
1074 goto no_link_yet;
1075
1076 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1077 add_timer(&de->media_timer);
1078 if (!netif_carrier_ok(dev))
1079 de_link_up(de);
1080 else
1081 if (netif_msg_timer(de))
f639dc7d
JP
1082 dev_info(&dev->dev,
1083 "%s link ok, mode %x status %x\n",
1084 media_name[de->media_type],
1085 dr32(MacMode), status);
1da177e4
LT
1086 return;
1087 }
1088
f3b197ac 1089 de_link_down(de);
1da177e4
LT
1090
1091 /* if media type locked, don't switch media */
1092 if (de->media_lock)
1093 goto set_media;
1094
1095 /* if activity detected, use that as hint for new media type */
1096 if (status & NonselPortActive) {
1097 unsigned int have_media = 1;
1098
1099 /* if AUI/BNC selected, then activity is on TP port */
1100 if (de->media_type == DE_MEDIA_AUI ||
1101 de->media_type == DE_MEDIA_BNC) {
1102 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1103 de->media_type = DE_MEDIA_TP_AUTO;
1104 else
1105 have_media = 0;
1106 }
1107
1108 /* TP selected. If there is only TP and BNC, then it's BNC */
1109 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1110 de_ok_to_advertise(de, DE_MEDIA_BNC))
1111 de->media_type = DE_MEDIA_BNC;
1112
1113 /* TP selected. If there is only TP and AUI, then it's AUI */
1114 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1115 de_ok_to_advertise(de, DE_MEDIA_AUI))
1116 de->media_type = DE_MEDIA_AUI;
1117
1118 /* otherwise, ignore the hint */
1119 else
1120 have_media = 0;
1121
1122 if (have_media)
1123 goto set_media;
1124 }
1125
1126 /*
1127 * Absent or ambiguous activity hint, move to next advertised
1128 * media state. If de->media_type is left unchanged, this
1129 * simply resets the PHY and reloads the current media settings.
1130 */
1131 if (de->media_type == DE_MEDIA_AUI) {
1132 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1133 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1134 } else if (de->media_type == DE_MEDIA_BNC) {
1135 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1136 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1137 } else {
1138 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1139 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1140 }
f3b197ac 1141
1da177e4
LT
1142set_media:
1143 spin_lock_irqsave(&de->lock, flags);
1144 de_stop_rxtx(de);
1145 spin_unlock_irqrestore(&de->lock, flags);
1146 de_set_media(de);
1147 de_start_rxtx(de);
1148
1149no_link_yet:
1150 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1151 add_timer(&de->media_timer);
1152
1153 if (netif_msg_timer(de))
f639dc7d
JP
1154 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1155 media_name[de->media_type], status);
1da177e4
LT
1156}
1157
1158static void de_media_interrupt (struct de_private *de, u32 status)
1159{
1160 if (status & LinkPass) {
1161 de_link_up(de);
1162 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1163 return;
1164 }
f3b197ac 1165
7e0b58f3 1166 BUG_ON(!(status & LinkFail));
1da177e4
LT
1167
1168 if (netif_carrier_ok(de->dev)) {
1169 de_link_down(de);
1170 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1171 }
1172}
1173
1174static int de_reset_mac (struct de_private *de)
1175{
1176 u32 status, tmp;
1177
1178 /*
1179 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1180 * in this area.
1181 */
1182
1183 if (dr32(BusMode) == 0xffffffff)
1184 return -EBUSY;
1185
1186 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1187 dw32 (BusMode, CmdReset);
1188 mdelay (1);
1189
1190 dw32 (BusMode, de_bus_mode);
1191 mdelay (1);
1192
1193 for (tmp = 0; tmp < 5; tmp++) {
1194 dr32 (BusMode);
1195 mdelay (1);
1196 }
1197
1198 mdelay (1);
1199
1200 status = dr32(MacStatus);
1201 if (status & (RxState | TxState))
1202 return -EBUSY;
1203 if (status == 0xffffffff)
1204 return -ENODEV;
1205 return 0;
1206}
1207
1208static void de_adapter_wake (struct de_private *de)
1209{
1210 u32 pmctl;
1211
1212 if (de->de21040)
1213 return;
1214
1215 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1216 if (pmctl & PM_Mask) {
1217 pmctl &= ~PM_Mask;
1218 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1219
1220 /* de4x5.c delays, so we do too */
1221 msleep(10);
1222 }
1223}
1224
1225static void de_adapter_sleep (struct de_private *de)
1226{
1227 u32 pmctl;
1228
1229 if (de->de21040)
1230 return;
1231
1232 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1233 pmctl |= PM_Sleep;
1234 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1235}
1236
1237static int de_init_hw (struct de_private *de)
1238{
1239 struct net_device *dev = de->dev;
1240 u32 macmode;
1241 int rc;
1242
1243 de_adapter_wake(de);
f3b197ac 1244
1da177e4
LT
1245 macmode = dr32(MacMode) & ~MacModeClear;
1246
1247 rc = de_reset_mac(de);
1248 if (rc)
1249 return rc;
1250
1251 de_set_media(de); /* reset phy */
1252
1253 dw32(RxRingAddr, de->ring_dma);
1254 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1255
1256 dw32(MacMode, RxTx | macmode);
1257
1258 dr32(RxMissed); /* self-clearing */
1259
1260 dw32(IntrMask, de_intr_mask);
1261
1262 de_set_rx_mode(dev);
1263
1264 return 0;
1265}
1266
1267static int de_refill_rx (struct de_private *de)
1268{
1269 unsigned i;
1270
1271 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1272 struct sk_buff *skb;
1273
1274 skb = dev_alloc_skb(de->rx_buf_sz);
1275 if (!skb)
1276 goto err_out;
1277
1278 skb->dev = de->dev;
1279
1280 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1281 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1282 de->rx_skb[i].skb = skb;
1283
1284 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1285 if (i == (DE_RX_RING_SIZE - 1))
1286 de->rx_ring[i].opts2 =
1287 cpu_to_le32(RingEnd | de->rx_buf_sz);
1288 else
1289 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1290 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1291 de->rx_ring[i].addr2 = 0;
1292 }
1293
1294 return 0;
1295
1296err_out:
1297 de_clean_rings(de);
1298 return -ENOMEM;
1299}
1300
1301static int de_init_rings (struct de_private *de)
1302{
1303 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1304 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1305
1306 de->rx_tail = 0;
1307 de->tx_head = de->tx_tail = 0;
1308
1309 return de_refill_rx (de);
1310}
1311
1312static int de_alloc_rings (struct de_private *de)
1313{
1314 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1315 if (!de->rx_ring)
1316 return -ENOMEM;
1317 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1318 return de_init_rings(de);
1319}
1320
1321static void de_clean_rings (struct de_private *de)
1322{
1323 unsigned i;
1324
1325 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1326 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1327 wmb();
1328 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1329 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1330 wmb();
1331
1332 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1333 if (de->rx_skb[i].skb) {
1334 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1335 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1336 dev_kfree_skb(de->rx_skb[i].skb);
1337 }
1338 }
1339
1340 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1341 struct sk_buff *skb = de->tx_skb[i].skb;
1342 if ((skb) && (skb != DE_DUMMY_SKB)) {
1343 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1344 de->net_stats.tx_dropped++;
1345 pci_unmap_single(de->pdev,
1346 de->tx_skb[i].mapping,
1347 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1348 dev_kfree_skb(skb);
1da177e4
LT
1349 } else {
1350 pci_unmap_single(de->pdev,
1351 de->tx_skb[i].mapping,
1352 sizeof(de->setup_frame),
1353 PCI_DMA_TODEVICE);
1354 }
1355 }
1356 }
1357
1358 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1359 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1360}
1361
1362static void de_free_rings (struct de_private *de)
1363{
1364 de_clean_rings(de);
1365 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1366 de->rx_ring = NULL;
1367 de->tx_ring = NULL;
1368}
1369
1370static int de_open (struct net_device *dev)
1371{
8f15ea42 1372 struct de_private *de = netdev_priv(dev);
1da177e4 1373 int rc;
1da177e4
LT
1374
1375 if (netif_msg_ifup(de))
1376 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1377
1378 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1379
1380 rc = de_alloc_rings(de);
1381 if (rc) {
f639dc7d 1382 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1da177e4
LT
1383 return rc;
1384 }
1385
3f735b76 1386 dw32(IntrMask, 0);
1da177e4 1387
1fb9df5d 1388 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4 1389 if (rc) {
f639dc7d
JP
1390 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1391 dev->irq, rc);
3f735b76
FR
1392 goto err_out_free;
1393 }
1394
1395 rc = de_init_hw(de);
1396 if (rc) {
f639dc7d 1397 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
3f735b76 1398 goto err_out_free_irq;
1da177e4
LT
1399 }
1400
1401 netif_start_queue(dev);
1402 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1403
1404 return 0;
1405
3f735b76
FR
1406err_out_free_irq:
1407 free_irq(dev->irq, dev);
1da177e4
LT
1408err_out_free:
1409 de_free_rings(de);
1410 return rc;
1411}
1412
1413static int de_close (struct net_device *dev)
1414{
8f15ea42 1415 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1416 unsigned long flags;
1417
1418 if (netif_msg_ifdown(de))
1419 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1420
1421 del_timer_sync(&de->media_timer);
1422
1423 spin_lock_irqsave(&de->lock, flags);
1424 de_stop_hw(de);
1425 netif_stop_queue(dev);
1426 netif_carrier_off(dev);
1427 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1428
1da177e4
LT
1429 free_irq(dev->irq, dev);
1430
1431 de_free_rings(de);
1432 de_adapter_sleep(de);
1da177e4
LT
1433 return 0;
1434}
1435
1436static void de_tx_timeout (struct net_device *dev)
1437{
8f15ea42 1438 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1439
1440 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1441 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1442 de->rx_tail, de->tx_head, de->tx_tail);
1443
1444 del_timer_sync(&de->media_timer);
1445
1446 disable_irq(dev->irq);
1447 spin_lock_irq(&de->lock);
1448
1449 de_stop_hw(de);
1450 netif_stop_queue(dev);
1451 netif_carrier_off(dev);
1452
1453 spin_unlock_irq(&de->lock);
1454 enable_irq(dev->irq);
f3b197ac 1455
1da177e4
LT
1456 /* Update the error counts. */
1457 __de_get_stats(de);
1458
1459 synchronize_irq(dev->irq);
1460 de_clean_rings(de);
1461
39bf4295
FR
1462 de_init_rings(de);
1463
1da177e4 1464 de_init_hw(de);
f3b197ac 1465
1da177e4
LT
1466 netif_wake_queue(dev);
1467}
1468
1469static void __de_get_regs(struct de_private *de, u8 *buf)
1470{
1471 int i;
1472 u32 *rbuf = (u32 *)buf;
f3b197ac 1473
1da177e4
LT
1474 /* read all CSRs */
1475 for (i = 0; i < DE_NUM_REGS; i++)
1476 rbuf[i] = dr32(i * 8);
1477
1478 /* handle self-clearing RxMissed counter, CSR8 */
1479 de_rx_missed(de, rbuf[8]);
1480}
1481
1482static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1483{
1484 ecmd->supported = de->media_supported;
1485 ecmd->transceiver = XCVR_INTERNAL;
1486 ecmd->phy_address = 0;
1487 ecmd->advertising = de->media_advertise;
f3b197ac 1488
1da177e4
LT
1489 switch (de->media_type) {
1490 case DE_MEDIA_AUI:
1491 ecmd->port = PORT_AUI;
1492 ecmd->speed = 5;
1493 break;
1494 case DE_MEDIA_BNC:
1495 ecmd->port = PORT_BNC;
1496 ecmd->speed = 2;
1497 break;
1498 default:
1499 ecmd->port = PORT_TP;
1500 ecmd->speed = SPEED_10;
1501 break;
1502 }
f3b197ac 1503
1da177e4
LT
1504 if (dr32(MacMode) & FullDuplex)
1505 ecmd->duplex = DUPLEX_FULL;
1506 else
1507 ecmd->duplex = DUPLEX_HALF;
1508
1509 if (de->media_lock)
1510 ecmd->autoneg = AUTONEG_DISABLE;
1511 else
1512 ecmd->autoneg = AUTONEG_ENABLE;
1513
1514 /* ignore maxtxpkt, maxrxpkt for now */
1515
1516 return 0;
1517}
1518
1519static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1520{
1521 u32 new_media;
1522 unsigned int media_lock;
1523
1524 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1525 return -EINVAL;
1526 if (de->de21040 && ecmd->speed == 2)
1527 return -EINVAL;
1528 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1529 return -EINVAL;
1530 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1531 return -EINVAL;
1532 if (de->de21040 && ecmd->port == PORT_BNC)
1533 return -EINVAL;
1534 if (ecmd->transceiver != XCVR_INTERNAL)
1535 return -EINVAL;
1536 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1537 return -EINVAL;
1538 if (ecmd->advertising & ~de->media_supported)
1539 return -EINVAL;
1540 if (ecmd->autoneg == AUTONEG_ENABLE &&
1541 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1542 return -EINVAL;
f3b197ac 1543
1da177e4
LT
1544 switch (ecmd->port) {
1545 case PORT_AUI:
1546 new_media = DE_MEDIA_AUI;
1547 if (!(ecmd->advertising & ADVERTISED_AUI))
1548 return -EINVAL;
1549 break;
1550 case PORT_BNC:
1551 new_media = DE_MEDIA_BNC;
1552 if (!(ecmd->advertising & ADVERTISED_BNC))
1553 return -EINVAL;
1554 break;
1555 default:
1556 if (ecmd->autoneg == AUTONEG_ENABLE)
1557 new_media = DE_MEDIA_TP_AUTO;
1558 else if (ecmd->duplex == DUPLEX_FULL)
1559 new_media = DE_MEDIA_TP_FD;
1560 else
1561 new_media = DE_MEDIA_TP;
1562 if (!(ecmd->advertising & ADVERTISED_TP))
1563 return -EINVAL;
1564 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1565 return -EINVAL;
1566 break;
1567 }
f3b197ac 1568
1da177e4 1569 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1570
1da177e4
LT
1571 if ((new_media == de->media_type) &&
1572 (media_lock == de->media_lock) &&
1573 (ecmd->advertising == de->media_advertise))
1574 return 0; /* nothing to change */
f3b197ac 1575
1da177e4
LT
1576 de_link_down(de);
1577 de_stop_rxtx(de);
f3b197ac 1578
1da177e4
LT
1579 de->media_type = new_media;
1580 de->media_lock = media_lock;
1581 de->media_advertise = ecmd->advertising;
1582 de_set_media(de);
f3b197ac 1583
1da177e4
LT
1584 return 0;
1585}
1586
1587static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1588{
8f15ea42 1589 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1590
1591 strcpy (info->driver, DRV_NAME);
1592 strcpy (info->version, DRV_VERSION);
1593 strcpy (info->bus_info, pci_name(de->pdev));
1594 info->eedump_len = DE_EEPROM_SIZE;
1595}
1596
1597static int de_get_regs_len(struct net_device *dev)
1598{
1599 return DE_REGS_SIZE;
1600}
1601
1602static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1603{
8f15ea42 1604 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1605 int rc;
1606
1607 spin_lock_irq(&de->lock);
1608 rc = __de_get_settings(de, ecmd);
1609 spin_unlock_irq(&de->lock);
1610
1611 return rc;
1612}
1613
1614static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1615{
8f15ea42 1616 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1617 int rc;
1618
1619 spin_lock_irq(&de->lock);
1620 rc = __de_set_settings(de, ecmd);
1621 spin_unlock_irq(&de->lock);
1622
1623 return rc;
1624}
1625
1626static u32 de_get_msglevel(struct net_device *dev)
1627{
8f15ea42 1628 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1629
1630 return de->msg_enable;
1631}
1632
1633static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1634{
8f15ea42 1635 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1636
1637 de->msg_enable = msglvl;
1638}
1639
1640static int de_get_eeprom(struct net_device *dev,
1641 struct ethtool_eeprom *eeprom, u8 *data)
1642{
8f15ea42 1643 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1644
1645 if (!de->ee_data)
1646 return -EOPNOTSUPP;
1647 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1648 (eeprom->len != DE_EEPROM_SIZE))
1649 return -EINVAL;
1650 memcpy(data, de->ee_data, eeprom->len);
1651
1652 return 0;
1653}
1654
1655static int de_nway_reset(struct net_device *dev)
1656{
8f15ea42 1657 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1658 u32 status;
1659
1660 if (de->media_type != DE_MEDIA_TP_AUTO)
1661 return -EINVAL;
1662 if (netif_carrier_ok(de->dev))
1663 de_link_down(de);
1664
1665 status = dr32(SIAStatus);
1666 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1667 if (netif_msg_link(de))
f639dc7d
JP
1668 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1669 status, dr32(SIAStatus));
1da177e4
LT
1670 return 0;
1671}
1672
1673static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1674 void *data)
1675{
8f15ea42 1676 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1677
1678 regs->version = (DE_REGS_VER << 2) | de->de21040;
1679
1680 spin_lock_irq(&de->lock);
1681 __de_get_regs(de, data);
1682 spin_unlock_irq(&de->lock);
1683}
1684
7282d491 1685static const struct ethtool_ops de_ethtool_ops = {
1da177e4 1686 .get_link = ethtool_op_get_link,
1da177e4
LT
1687 .get_drvinfo = de_get_drvinfo,
1688 .get_regs_len = de_get_regs_len,
1689 .get_settings = de_get_settings,
1690 .set_settings = de_set_settings,
1691 .get_msglevel = de_get_msglevel,
1692 .set_msglevel = de_set_msglevel,
1693 .get_eeprom = de_get_eeprom,
1694 .nway_reset = de_nway_reset,
1695 .get_regs = de_get_regs,
1696};
1697
4c44fd00 1698static void __devinit de21040_get_mac_address (struct de_private *de)
1da177e4
LT
1699{
1700 unsigned i;
1701
1702 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
bc0da3fc 1703 udelay(5);
1da177e4
LT
1704
1705 for (i = 0; i < 6; i++) {
1706 int value, boguscnt = 100000;
ec1d1ebb 1707 do {
1da177e4 1708 value = dr32(ROMCmd);
ec1d1ebb 1709 } while (value < 0 && --boguscnt > 0);
1da177e4
LT
1710 de->dev->dev_addr[i] = value;
1711 udelay(1);
1712 if (boguscnt <= 0)
f639dc7d 1713 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1da177e4
LT
1714 }
1715}
1716
4c44fd00 1717static void __devinit de21040_get_media_info(struct de_private *de)
1da177e4
LT
1718{
1719 unsigned int i;
1720
1721 de->media_type = DE_MEDIA_TP;
1722 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1723 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1724 de->media_advertise = de->media_supported;
1725
1726 for (i = 0; i < DE_MAX_MEDIA; i++) {
1727 switch (i) {
1728 case DE_MEDIA_AUI:
1729 case DE_MEDIA_TP:
1730 case DE_MEDIA_TP_FD:
1731 de->media[i].type = i;
1732 de->media[i].csr13 = t21040_csr13[i];
1733 de->media[i].csr14 = t21040_csr14[i];
1734 de->media[i].csr15 = t21040_csr15[i];
1735 break;
1736 default:
1737 de->media[i].type = DE_MEDIA_INVALID;
1738 break;
1739 }
1740 }
1741}
1742
1743/* Note: this routine returns extra data bits for size detection. */
4a1d2d81 1744static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1da177e4
LT
1745{
1746 int i;
1747 unsigned retval = 0;
1748 void __iomem *ee_addr = regs + ROMCmd;
1749 int read_cmd = location | (EE_READ_CMD << addr_len);
1750
1751 writel(EE_ENB & ~EE_CS, ee_addr);
1752 writel(EE_ENB, ee_addr);
1753
1754 /* Shift the read command bits out. */
1755 for (i = 4 + addr_len; i >= 0; i--) {
1756 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1757 writel(EE_ENB | dataval, ee_addr);
1758 readl(ee_addr);
1759 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1760 readl(ee_addr);
1761 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1762 }
1763 writel(EE_ENB, ee_addr);
1764 readl(ee_addr);
1765
1766 for (i = 16; i > 0; i--) {
1767 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1768 readl(ee_addr);
1769 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1770 writel(EE_ENB, ee_addr);
1771 readl(ee_addr);
1772 }
1773
1774 /* Terminate the EEPROM access. */
1775 writel(EE_ENB & ~EE_CS, ee_addr);
1776 return retval;
1777}
1778
4c44fd00 1779static void __devinit de21041_get_srom_info (struct de_private *de)
1da177e4
LT
1780{
1781 unsigned i, sa_offset = 0, ofs;
1782 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1783 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1784 struct de_srom_info_leaf *il;
1785 void *bufp;
1786
1787 /* download entire eeprom */
1788 for (i = 0; i < DE_EEPROM_WORDS; i++)
c559a5bc
AV
1789 ((__le16 *)ee_data)[i] =
1790 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1da177e4
LT
1791
1792 /* DEC now has a specification but early board makers
1793 just put the address in the first EEPROM locations. */
1794 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1795
1796#ifndef CONFIG_MIPS_COBALT
1797
1da177e4
LT
1798 for (i = 0; i < 8; i ++)
1799 if (ee_data[i] != ee_data[16+i])
1800 sa_offset = 20;
1801
bc053d45
RB
1802#endif
1803
1da177e4
LT
1804 /* store MAC address */
1805 for (i = 0; i < 6; i ++)
1806 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1807
1808 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1809 ofs = ee_data[SROMC0InfoLeaf];
1810 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1811 goto bad_srom;
1812
1813 /* get pointer to info leaf */
1814 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1815
1816 /* paranoia checks */
1817 if (il->n_blocks == 0)
1818 goto bad_srom;
1819 if ((sizeof(ee_data) - ofs) <
1820 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1821 goto bad_srom;
1822
1823 /* get default media type */
445854f4 1824 switch (get_unaligned(&il->default_media)) {
1da177e4
LT
1825 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1826 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1827 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1828 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1829 }
f3b197ac 1830
1da177e4 1831 if (netif_msg_probe(de))
f639dc7d
JP
1832 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1833 de->board_idx, ofs, media_name[de->media_type]);
1da177e4
LT
1834
1835 /* init SIA register values to defaults */
1836 for (i = 0; i < DE_MAX_MEDIA; i++) {
1837 de->media[i].type = DE_MEDIA_INVALID;
1838 de->media[i].csr13 = 0xffff;
1839 de->media[i].csr14 = 0xffff;
1840 de->media[i].csr15 = 0xffff;
1841 }
1842
1843 /* parse media blocks to see what medias are supported,
1844 * and if any custom CSR values are provided
1845 */
1846 bufp = ((void *)il) + sizeof(*il);
1847 for (i = 0; i < il->n_blocks; i++) {
1848 struct de_srom_media_block *ib = bufp;
1849 unsigned idx;
1850
1851 /* index based on media type in media block */
1852 switch(ib->opts & MediaBlockMask) {
1853 case 0: /* 10baseT */
1854 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1855 | SUPPORTED_Autoneg;
1856 idx = DE_MEDIA_TP;
1857 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1858 break;
1859 case 1: /* BNC */
1860 de->media_supported |= SUPPORTED_BNC;
1861 idx = DE_MEDIA_BNC;
1862 break;
1863 case 2: /* AUI */
1864 de->media_supported |= SUPPORTED_AUI;
1865 idx = DE_MEDIA_AUI;
1866 break;
1867 case 4: /* 10baseT-FD */
1868 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1869 | SUPPORTED_Autoneg;
1870 idx = DE_MEDIA_TP_FD;
1871 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1872 break;
1873 default:
1874 goto bad_srom;
1875 }
1876
1877 de->media[idx].type = idx;
1878
1879 if (netif_msg_probe(de))
f639dc7d
JP
1880 pr_info("de%d: media block #%u: %s",
1881 de->board_idx, i,
1882 media_name[de->media[idx].type]);
1da177e4
LT
1883
1884 bufp += sizeof (ib->opts);
1885
1886 if (ib->opts & MediaCustomCSRs) {
445854f4
HH
1887 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1888 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1889 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1da177e4
LT
1890 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1891 sizeof(ib->csr15);
1892
1893 if (netif_msg_probe(de))
f639dc7d
JP
1894 pr_cont(" (%x,%x,%x)\n",
1895 de->media[idx].csr13,
1896 de->media[idx].csr14,
1897 de->media[idx].csr15);
f3b197ac 1898
1da177e4 1899 } else if (netif_msg_probe(de))
f639dc7d 1900 pr_cont("\n");
1da177e4
LT
1901
1902 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1903 break;
1904 }
1905
1906 de->media_advertise = de->media_supported;
1907
1908fill_defaults:
1909 /* fill in defaults, for cases where custom CSRs not used */
1910 for (i = 0; i < DE_MAX_MEDIA; i++) {
1911 if (de->media[i].csr13 == 0xffff)
1912 de->media[i].csr13 = t21041_csr13[i];
1913 if (de->media[i].csr14 == 0xffff)
1914 de->media[i].csr14 = t21041_csr14[i];
1915 if (de->media[i].csr15 == 0xffff)
1916 de->media[i].csr15 = t21041_csr15[i];
1917 }
1918
c3a9392e 1919 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1da177e4
LT
1920
1921 return;
1922
1923bad_srom:
1924 /* for error cases, it's ok to assume we support all these */
1925 for (i = 0; i < DE_MAX_MEDIA; i++)
1926 de->media[i].type = i;
1927 de->media_supported =
1928 SUPPORTED_10baseT_Half |
1929 SUPPORTED_10baseT_Full |
1930 SUPPORTED_Autoneg |
1931 SUPPORTED_TP |
1932 SUPPORTED_AUI |
1933 SUPPORTED_BNC;
1934 goto fill_defaults;
1935}
1936
90d8743d
SH
1937static const struct net_device_ops de_netdev_ops = {
1938 .ndo_open = de_open,
1939 .ndo_stop = de_close,
1940 .ndo_set_multicast_list = de_set_rx_mode,
1941 .ndo_start_xmit = de_start_xmit,
1942 .ndo_get_stats = de_get_stats,
1943 .ndo_tx_timeout = de_tx_timeout,
1944 .ndo_change_mtu = eth_change_mtu,
1945 .ndo_set_mac_address = eth_mac_addr,
1946 .ndo_validate_addr = eth_validate_addr,
1947};
1948
4a1d2d81 1949static int __devinit de_init_one (struct pci_dev *pdev,
1da177e4
LT
1950 const struct pci_device_id *ent)
1951{
1952 struct net_device *dev;
1953 struct de_private *de;
1954 int rc;
1955 void __iomem *regs;
afc7097f 1956 unsigned long pciaddr;
1da177e4
LT
1957 static int board_idx = -1;
1958
1959 board_idx++;
1960
1961#ifndef MODULE
1962 if (board_idx == 0)
1963 printk("%s", version);
1964#endif
1965
1966 /* allocate a new ethernet device structure, and fill in defaults */
1967 dev = alloc_etherdev(sizeof(struct de_private));
1968 if (!dev)
1969 return -ENOMEM;
1970
90d8743d 1971 dev->netdev_ops = &de_netdev_ops;
1da177e4 1972 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4 1973 dev->ethtool_ops = &de_ethtool_ops;
1da177e4
LT
1974 dev->watchdog_timeo = TX_TIMEOUT;
1975
8f15ea42 1976 de = netdev_priv(dev);
1da177e4
LT
1977 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1978 de->pdev = pdev;
1979 de->dev = dev;
1980 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1981 de->board_idx = board_idx;
1982 spin_lock_init (&de->lock);
1983 init_timer(&de->media_timer);
1984 if (de->de21040)
1985 de->media_timer.function = de21040_media_timer;
1986 else
1987 de->media_timer.function = de21041_media_timer;
1988 de->media_timer.data = (unsigned long) de;
1989
1990 netif_carrier_off(dev);
1991 netif_stop_queue(dev);
1992
1993 /* wake up device, assign resources */
1994 rc = pci_enable_device(pdev);
1995 if (rc)
1996 goto err_out_free;
1997
1998 /* reserve PCI resources to ensure driver atomicity */
1999 rc = pci_request_regions(pdev, DRV_NAME);
2000 if (rc)
2001 goto err_out_disable;
2002
2003 /* check for invalid IRQ value */
2004 if (pdev->irq < 2) {
2005 rc = -EIO;
f639dc7d 2006 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
1da177e4
LT
2007 pdev->irq, pci_name(pdev));
2008 goto err_out_res;
2009 }
2010
2011 dev->irq = pdev->irq;
2012
2013 /* obtain and check validity of PCI I/O address */
2014 pciaddr = pci_resource_start(pdev, 1);
2015 if (!pciaddr) {
2016 rc = -EIO;
f639dc7d 2017 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
1da177e4
LT
2018 goto err_out_res;
2019 }
2020 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2021 rc = -EIO;
f639dc7d
JP
2022 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2023 (unsigned long long)pci_resource_len(pdev, 1),
2024 pci_name(pdev));
1da177e4
LT
2025 goto err_out_res;
2026 }
2027
2028 /* remap CSR registers */
2029 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2030 if (!regs) {
2031 rc = -EIO;
f639dc7d
JP
2032 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2033 (unsigned long long)pci_resource_len(pdev, 1),
2034 pciaddr, pci_name(pdev));
1da177e4
LT
2035 goto err_out_res;
2036 }
2037 dev->base_addr = (unsigned long) regs;
2038 de->regs = regs;
2039
2040 de_adapter_wake(de);
2041
2042 /* make sure hardware is not running */
2043 rc = de_reset_mac(de);
2044 if (rc) {
f639dc7d 2045 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
1da177e4
LT
2046 goto err_out_iomap;
2047 }
2048
2049 /* get MAC address, initialize default media type and
2050 * get list of supported media
2051 */
2052 if (de->de21040) {
2053 de21040_get_mac_address(de);
2054 de21040_get_media_info(de);
2055 } else {
2056 de21041_get_srom_info(de);
2057 }
2058
2059 /* register new network interface with kernel */
2060 rc = register_netdev(dev);
2061 if (rc)
2062 goto err_out_iomap;
2063
2064 /* print info about board and interface just registered */
f639dc7d
JP
2065 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2066 de->de21040 ? "21040" : "21041",
2067 dev->base_addr,
2068 dev->dev_addr,
2069 dev->irq);
1da177e4
LT
2070
2071 pci_set_drvdata(pdev, dev);
2072
2073 /* enable busmastering */
2074 pci_set_master(pdev);
2075
2076 /* put adapter to sleep */
2077 de_adapter_sleep(de);
2078
2079 return 0;
2080
2081err_out_iomap:
b4558ea9 2082 kfree(de->ee_data);
1da177e4
LT
2083 iounmap(regs);
2084err_out_res:
2085 pci_release_regions(pdev);
2086err_out_disable:
2087 pci_disable_device(pdev);
2088err_out_free:
2089 free_netdev(dev);
2090 return rc;
2091}
2092
4a1d2d81 2093static void __devexit de_remove_one (struct pci_dev *pdev)
1da177e4
LT
2094{
2095 struct net_device *dev = pci_get_drvdata(pdev);
8f15ea42 2096 struct de_private *de = netdev_priv(dev);
1da177e4 2097
7e0b58f3 2098 BUG_ON(!dev);
1da177e4 2099 unregister_netdev(dev);
b4558ea9 2100 kfree(de->ee_data);
1da177e4
LT
2101 iounmap(de->regs);
2102 pci_release_regions(pdev);
2103 pci_disable_device(pdev);
2104 pci_set_drvdata(pdev, NULL);
2105 free_netdev(dev);
2106}
2107
2108#ifdef CONFIG_PM
2109
05adc3b7 2110static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2111{
2112 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2113 struct de_private *de = netdev_priv(dev);
1da177e4
LT
2114
2115 rtnl_lock();
2116 if (netif_running (dev)) {
2117 del_timer_sync(&de->media_timer);
2118
2119 disable_irq(dev->irq);
2120 spin_lock_irq(&de->lock);
2121
2122 de_stop_hw(de);
2123 netif_stop_queue(dev);
2124 netif_device_detach(dev);
2125 netif_carrier_off(dev);
2126
2127 spin_unlock_irq(&de->lock);
2128 enable_irq(dev->irq);
f3b197ac 2129
1da177e4
LT
2130 /* Update the error counts. */
2131 __de_get_stats(de);
2132
2133 synchronize_irq(dev->irq);
2134 de_clean_rings(de);
2135
2136 de_adapter_sleep(de);
2137 pci_disable_device(pdev);
2138 } else {
2139 netif_device_detach(dev);
2140 }
2141 rtnl_unlock();
2142 return 0;
2143}
2144
2145static int de_resume (struct pci_dev *pdev)
2146{
2147 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2148 struct de_private *de = netdev_priv(dev);
9f486ae1 2149 int retval = 0;
1da177e4
LT
2150
2151 rtnl_lock();
2152 if (netif_device_present(dev))
2153 goto out;
9f486ae1
VH
2154 if (!netif_running(dev))
2155 goto out_attach;
2156 if ((retval = pci_enable_device(pdev))) {
f639dc7d 2157 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
9f486ae1 2158 goto out;
1da177e4 2159 }
9f486ae1
VH
2160 de_init_hw(de);
2161out_attach:
2162 netif_device_attach(dev);
1da177e4
LT
2163out:
2164 rtnl_unlock();
2165 return 0;
2166}
2167
2168#endif /* CONFIG_PM */
2169
2170static struct pci_driver de_driver = {
2171 .name = DRV_NAME,
2172 .id_table = de_pci_tbl,
2173 .probe = de_init_one,
4a1d2d81 2174 .remove = __devexit_p(de_remove_one),
1da177e4
LT
2175#ifdef CONFIG_PM
2176 .suspend = de_suspend,
2177 .resume = de_resume,
2178#endif
2179};
2180
2181static int __init de_init (void)
2182{
2183#ifdef MODULE
2184 printk("%s", version);
2185#endif
29917620 2186 return pci_register_driver(&de_driver);
1da177e4
LT
2187}
2188
2189static void __exit de_exit (void)
2190{
2191 pci_unregister_driver (&de_driver);
2192}
2193
2194module_init(de_init);
2195module_exit(de_exit);