]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/tulip/de2104x.c
tulip: Use pr_<level> where appropriate
[mirror_ubuntu-artful-kernel.git] / drivers / net / tulip / de2104x.c
1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2 /*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #define DRV_NAME "de2104x"
33 #define DRV_VERSION "0.7"
34 #define DRV_RELDATE "Mar 17, 2004"
35
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/compiler.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/crc32.h>
47 #include <linux/slab.h>
48
49 #include <asm/io.h>
50 #include <asm/irq.h>
51 #include <asm/uaccess.h>
52 #include <asm/unaligned.h>
53
54 /* These identify the driver base version and may not be removed. */
55 static char version[] =
56 "PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
57
58 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
59 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
60 MODULE_LICENSE("GPL");
61 MODULE_VERSION(DRV_VERSION);
62
63 static int debug = -1;
64 module_param (debug, int, 0);
65 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
66
67 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
68 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
69 defined(CONFIG_SPARC) || defined(__ia64__) || \
70 defined(__sh__) || defined(__mips__)
71 static int rx_copybreak = 1518;
72 #else
73 static int rx_copybreak = 100;
74 #endif
75 module_param (rx_copybreak, int, 0);
76 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
77
78 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
86 /* Descriptor skip length in 32 bit longwords. */
87 #ifndef CONFIG_DE2104X_DSL
88 #define DSL 0
89 #else
90 #define DSL CONFIG_DE2104X_DSL
91 #endif
92
93 #define DE_RX_RING_SIZE 64
94 #define DE_TX_RING_SIZE 64
95 #define DE_RING_BYTES \
96 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
97 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
98 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
99 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
100 #define TX_BUFFS_AVAIL(CP) \
101 (((CP)->tx_tail <= (CP)->tx_head) ? \
102 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
103 (CP)->tx_tail - (CP)->tx_head - 1)
104
105 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
106 #define RX_OFFSET 2
107
108 #define DE_SETUP_SKB ((struct sk_buff *) 1)
109 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
110 #define DE_SETUP_FRAME_WORDS 96
111 #define DE_EEPROM_WORDS 256
112 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
113 #define DE_MAX_MEDIA 5
114
115 #define DE_MEDIA_TP_AUTO 0
116 #define DE_MEDIA_BNC 1
117 #define DE_MEDIA_AUI 2
118 #define DE_MEDIA_TP 3
119 #define DE_MEDIA_TP_FD 4
120 #define DE_MEDIA_INVALID DE_MAX_MEDIA
121 #define DE_MEDIA_FIRST 0
122 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
123 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
124
125 #define DE_TIMER_LINK (60 * HZ)
126 #define DE_TIMER_NO_LINK (5 * HZ)
127
128 #define DE_NUM_REGS 16
129 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
130 #define DE_REGS_VER 1
131
132 /* Time in jiffies before concluding the transmitter is hung. */
133 #define TX_TIMEOUT (6*HZ)
134
135 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
136 to support a pre-NWay full-duplex signaling mechanism using short frames.
137 No one knows what it should be, but if left at its default value some
138 10base2(!) packets trigger a full-duplex-request interrupt. */
139 #define FULL_DUPLEX_MAGIC 0x6969
140
141 enum {
142 /* NIC registers */
143 BusMode = 0x00,
144 TxPoll = 0x08,
145 RxPoll = 0x10,
146 RxRingAddr = 0x18,
147 TxRingAddr = 0x20,
148 MacStatus = 0x28,
149 MacMode = 0x30,
150 IntrMask = 0x38,
151 RxMissed = 0x40,
152 ROMCmd = 0x48,
153 CSR11 = 0x58,
154 SIAStatus = 0x60,
155 CSR13 = 0x68,
156 CSR14 = 0x70,
157 CSR15 = 0x78,
158 PCIPM = 0x40,
159
160 /* BusMode bits */
161 CmdReset = (1 << 0),
162 CacheAlign16 = 0x00008000,
163 BurstLen4 = 0x00000400,
164 DescSkipLen = (DSL << 2),
165
166 /* Rx/TxPoll bits */
167 NormalTxPoll = (1 << 0),
168 NormalRxPoll = (1 << 0),
169
170 /* Tx/Rx descriptor status bits */
171 DescOwn = (1 << 31),
172 RxError = (1 << 15),
173 RxErrLong = (1 << 7),
174 RxErrCRC = (1 << 1),
175 RxErrFIFO = (1 << 0),
176 RxErrRunt = (1 << 11),
177 RxErrFrame = (1 << 14),
178 RingEnd = (1 << 25),
179 FirstFrag = (1 << 29),
180 LastFrag = (1 << 30),
181 TxError = (1 << 15),
182 TxFIFOUnder = (1 << 1),
183 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
184 TxMaxCol = (1 << 8),
185 TxOWC = (1 << 9),
186 TxJabber = (1 << 14),
187 SetupFrame = (1 << 27),
188 TxSwInt = (1 << 31),
189
190 /* MacStatus bits */
191 IntrOK = (1 << 16),
192 IntrErr = (1 << 15),
193 RxIntr = (1 << 6),
194 RxEmpty = (1 << 7),
195 TxIntr = (1 << 0),
196 TxEmpty = (1 << 2),
197 PciErr = (1 << 13),
198 TxState = (1 << 22) | (1 << 21) | (1 << 20),
199 RxState = (1 << 19) | (1 << 18) | (1 << 17),
200 LinkFail = (1 << 12),
201 LinkPass = (1 << 4),
202 RxStopped = (1 << 8),
203 TxStopped = (1 << 1),
204
205 /* MacMode bits */
206 TxEnable = (1 << 13),
207 RxEnable = (1 << 1),
208 RxTx = TxEnable | RxEnable,
209 FullDuplex = (1 << 9),
210 AcceptAllMulticast = (1 << 7),
211 AcceptAllPhys = (1 << 6),
212 BOCnt = (1 << 5),
213 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
214 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
215
216 /* ROMCmd bits */
217 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
218 EE_CS = 0x01, /* EEPROM chip select. */
219 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
220 EE_WRITE_0 = 0x01,
221 EE_WRITE_1 = 0x05,
222 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
223 EE_ENB = (0x4800 | EE_CS),
224
225 /* The EEPROM commands include the alway-set leading bit. */
226 EE_READ_CMD = 6,
227
228 /* RxMissed bits */
229 RxMissedOver = (1 << 16),
230 RxMissedMask = 0xffff,
231
232 /* SROM-related bits */
233 SROMC0InfoLeaf = 27,
234 MediaBlockMask = 0x3f,
235 MediaCustomCSRs = (1 << 6),
236
237 /* PCIPM bits */
238 PM_Sleep = (1 << 31),
239 PM_Snooze = (1 << 30),
240 PM_Mask = PM_Sleep | PM_Snooze,
241
242 /* SIAStatus bits */
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9),
246 SelPortActive = (1 << 8),
247 LinkFailStatus = (1 << 2),
248 NetCxnErr = (1 << 1),
249 };
250
251 static const u32 de_intr_mask =
252 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
253 LinkPass | LinkFail | PciErr;
254
255 /*
256 * Set the programmable burst length to 4 longwords for all:
257 * DMA errors result without these values. Cache align 16 long.
258 */
259 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
260
261 struct de_srom_media_block {
262 u8 opts;
263 u16 csr13;
264 u16 csr14;
265 u16 csr15;
266 } __packed;
267
268 struct de_srom_info_leaf {
269 u16 default_media;
270 u8 n_blocks;
271 u8 unused;
272 } __packed;
273
274 struct de_desc {
275 __le32 opts1;
276 __le32 opts2;
277 __le32 addr1;
278 __le32 addr2;
279 #if DSL
280 __le32 skip[DSL];
281 #endif
282 };
283
284 struct media_info {
285 u16 type; /* DE_MEDIA_xxx */
286 u16 csr13;
287 u16 csr14;
288 u16 csr15;
289 };
290
291 struct ring_info {
292 struct sk_buff *skb;
293 dma_addr_t mapping;
294 };
295
296 struct de_private {
297 unsigned tx_head;
298 unsigned tx_tail;
299 unsigned rx_tail;
300
301 void __iomem *regs;
302 struct net_device *dev;
303 spinlock_t lock;
304
305 struct de_desc *rx_ring;
306 struct de_desc *tx_ring;
307 struct ring_info tx_skb[DE_TX_RING_SIZE];
308 struct ring_info rx_skb[DE_RX_RING_SIZE];
309 unsigned rx_buf_sz;
310 dma_addr_t ring_dma;
311
312 u32 msg_enable;
313
314 struct net_device_stats net_stats;
315
316 struct pci_dev *pdev;
317
318 u16 setup_frame[DE_SETUP_FRAME_WORDS];
319
320 u32 media_type;
321 u32 media_supported;
322 u32 media_advertise;
323 struct media_info media[DE_MAX_MEDIA];
324 struct timer_list media_timer;
325
326 u8 *ee_data;
327 unsigned board_idx;
328 unsigned de21040 : 1;
329 unsigned media_lock : 1;
330 };
331
332
333 static void de_set_rx_mode (struct net_device *dev);
334 static void de_tx (struct de_private *de);
335 static void de_clean_rings (struct de_private *de);
336 static void de_media_interrupt (struct de_private *de, u32 status);
337 static void de21040_media_timer (unsigned long data);
338 static void de21041_media_timer (unsigned long data);
339 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
340
341
342 static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
344 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
345 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
346 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
347 { },
348 };
349 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
350
351 static const char * const media_name[DE_MAX_MEDIA] = {
352 "10baseT auto",
353 "BNC",
354 "AUI",
355 "10baseT-HD",
356 "10baseT-FD"
357 };
358
359 /* 21040 transceiver register settings:
360 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
361 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
362 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
363 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
364
365 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
366 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
367 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
368 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
369 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
370 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
371
372
373 #define dr32(reg) ioread32(de->regs + (reg))
374 #define dw32(reg, val) iowrite32((val), de->regs + (reg))
375
376
377 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
378 u32 status, u32 len)
379 {
380 netif_dbg(de, rx_err, de->dev,
381 "rx err, slot %d status 0x%x len %d\n",
382 rx_tail, status, len);
383
384 if ((status & 0x38000300) != 0x0300) {
385 /* Ingore earlier buffers. */
386 if ((status & 0xffff) != 0x7fff) {
387 netif_warn(de, rx_err, de->dev,
388 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
389 status);
390 de->net_stats.rx_length_errors++;
391 }
392 } else if (status & RxError) {
393 /* There was a fatal error. */
394 de->net_stats.rx_errors++; /* end of a packet.*/
395 if (status & 0x0890) de->net_stats.rx_length_errors++;
396 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
397 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
398 }
399 }
400
401 static void de_rx (struct de_private *de)
402 {
403 unsigned rx_tail = de->rx_tail;
404 unsigned rx_work = DE_RX_RING_SIZE;
405 unsigned drop = 0;
406 int rc;
407
408 while (--rx_work) {
409 u32 status, len;
410 dma_addr_t mapping;
411 struct sk_buff *skb, *copy_skb;
412 unsigned copying_skb, buflen;
413
414 skb = de->rx_skb[rx_tail].skb;
415 BUG_ON(!skb);
416 rmb();
417 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
418 if (status & DescOwn)
419 break;
420
421 len = ((status >> 16) & 0x7ff) - 4;
422 mapping = de->rx_skb[rx_tail].mapping;
423
424 if (unlikely(drop)) {
425 de->net_stats.rx_dropped++;
426 goto rx_next;
427 }
428
429 if (unlikely((status & 0x38008300) != 0x0300)) {
430 de_rx_err_acct(de, rx_tail, status, len);
431 goto rx_next;
432 }
433
434 copying_skb = (len <= rx_copybreak);
435
436 netif_dbg(de, rx_status, de->dev,
437 "rx slot %d status 0x%x len %d copying? %d\n",
438 rx_tail, status, len, copying_skb);
439
440 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
441 copy_skb = dev_alloc_skb (buflen);
442 if (unlikely(!copy_skb)) {
443 de->net_stats.rx_dropped++;
444 drop = 1;
445 rx_work = 100;
446 goto rx_next;
447 }
448
449 if (!copying_skb) {
450 pci_unmap_single(de->pdev, mapping,
451 buflen, PCI_DMA_FROMDEVICE);
452 skb_put(skb, len);
453
454 mapping =
455 de->rx_skb[rx_tail].mapping =
456 pci_map_single(de->pdev, copy_skb->data,
457 buflen, PCI_DMA_FROMDEVICE);
458 de->rx_skb[rx_tail].skb = copy_skb;
459 } else {
460 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
461 skb_reserve(copy_skb, RX_OFFSET);
462 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
463 len);
464 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
465
466 /* We'll reuse the original ring buffer. */
467 skb = copy_skb;
468 }
469
470 skb->protocol = eth_type_trans (skb, de->dev);
471
472 de->net_stats.rx_packets++;
473 de->net_stats.rx_bytes += skb->len;
474 rc = netif_rx (skb);
475 if (rc == NET_RX_DROP)
476 drop = 1;
477
478 rx_next:
479 if (rx_tail == (DE_RX_RING_SIZE - 1))
480 de->rx_ring[rx_tail].opts2 =
481 cpu_to_le32(RingEnd | de->rx_buf_sz);
482 else
483 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
484 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
485 wmb();
486 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
487 rx_tail = NEXT_RX(rx_tail);
488 }
489
490 if (!rx_work)
491 netdev_warn(de->dev, "rx work limit reached\n");
492
493 de->rx_tail = rx_tail;
494 }
495
496 static irqreturn_t de_interrupt (int irq, void *dev_instance)
497 {
498 struct net_device *dev = dev_instance;
499 struct de_private *de = netdev_priv(dev);
500 u32 status;
501
502 status = dr32(MacStatus);
503 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
504 return IRQ_NONE;
505
506 netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
507 status, dr32(MacMode),
508 de->rx_tail, de->tx_head, de->tx_tail);
509
510 dw32(MacStatus, status);
511
512 if (status & (RxIntr | RxEmpty)) {
513 de_rx(de);
514 if (status & RxEmpty)
515 dw32(RxPoll, NormalRxPoll);
516 }
517
518 spin_lock(&de->lock);
519
520 if (status & (TxIntr | TxEmpty))
521 de_tx(de);
522
523 if (status & (LinkPass | LinkFail))
524 de_media_interrupt(de, status);
525
526 spin_unlock(&de->lock);
527
528 if (status & PciErr) {
529 u16 pci_status;
530
531 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
532 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
533 netdev_err(de->dev,
534 "PCI bus error, status=%08x, PCI status=%04x\n",
535 status, pci_status);
536 }
537
538 return IRQ_HANDLED;
539 }
540
541 static void de_tx (struct de_private *de)
542 {
543 unsigned tx_head = de->tx_head;
544 unsigned tx_tail = de->tx_tail;
545
546 while (tx_tail != tx_head) {
547 struct sk_buff *skb;
548 u32 status;
549
550 rmb();
551 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
552 if (status & DescOwn)
553 break;
554
555 skb = de->tx_skb[tx_tail].skb;
556 BUG_ON(!skb);
557 if (unlikely(skb == DE_DUMMY_SKB))
558 goto next;
559
560 if (unlikely(skb == DE_SETUP_SKB)) {
561 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
562 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
563 goto next;
564 }
565
566 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
567 skb->len, PCI_DMA_TODEVICE);
568
569 if (status & LastFrag) {
570 if (status & TxError) {
571 netif_dbg(de, tx_err, de->dev,
572 "tx err, status 0x%x\n",
573 status);
574 de->net_stats.tx_errors++;
575 if (status & TxOWC)
576 de->net_stats.tx_window_errors++;
577 if (status & TxMaxCol)
578 de->net_stats.tx_aborted_errors++;
579 if (status & TxLinkFail)
580 de->net_stats.tx_carrier_errors++;
581 if (status & TxFIFOUnder)
582 de->net_stats.tx_fifo_errors++;
583 } else {
584 de->net_stats.tx_packets++;
585 de->net_stats.tx_bytes += skb->len;
586 netif_dbg(de, tx_done, de->dev,
587 "tx done, slot %d\n", tx_tail);
588 }
589 dev_kfree_skb_irq(skb);
590 }
591
592 next:
593 de->tx_skb[tx_tail].skb = NULL;
594
595 tx_tail = NEXT_TX(tx_tail);
596 }
597
598 de->tx_tail = tx_tail;
599
600 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
601 netif_wake_queue(de->dev);
602 }
603
604 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
605 struct net_device *dev)
606 {
607 struct de_private *de = netdev_priv(dev);
608 unsigned int entry, tx_free;
609 u32 mapping, len, flags = FirstFrag | LastFrag;
610 struct de_desc *txd;
611
612 spin_lock_irq(&de->lock);
613
614 tx_free = TX_BUFFS_AVAIL(de);
615 if (tx_free == 0) {
616 netif_stop_queue(dev);
617 spin_unlock_irq(&de->lock);
618 return NETDEV_TX_BUSY;
619 }
620 tx_free--;
621
622 entry = de->tx_head;
623
624 txd = &de->tx_ring[entry];
625
626 len = skb->len;
627 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
628 if (entry == (DE_TX_RING_SIZE - 1))
629 flags |= RingEnd;
630 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
631 flags |= TxSwInt;
632 flags |= len;
633 txd->opts2 = cpu_to_le32(flags);
634 txd->addr1 = cpu_to_le32(mapping);
635
636 de->tx_skb[entry].skb = skb;
637 de->tx_skb[entry].mapping = mapping;
638 wmb();
639
640 txd->opts1 = cpu_to_le32(DescOwn);
641 wmb();
642
643 de->tx_head = NEXT_TX(entry);
644 netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
645 entry, skb->len);
646
647 if (tx_free == 0)
648 netif_stop_queue(dev);
649
650 spin_unlock_irq(&de->lock);
651
652 /* Trigger an immediate transmit demand. */
653 dw32(TxPoll, NormalTxPoll);
654
655 return NETDEV_TX_OK;
656 }
657
658 /* Set or clear the multicast filter for this adaptor.
659 Note that we only use exclusion around actually queueing the
660 new frame, not around filling de->setup_frame. This is non-deterministic
661 when re-entered but still correct. */
662
663 #undef set_bit_le
664 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
665
666 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
667 {
668 struct de_private *de = netdev_priv(dev);
669 u16 hash_table[32];
670 struct netdev_hw_addr *ha;
671 int i;
672 u16 *eaddrs;
673
674 memset(hash_table, 0, sizeof(hash_table));
675 set_bit_le(255, hash_table); /* Broadcast entry */
676 /* This should work on big-endian machines as well. */
677 netdev_for_each_mc_addr(ha, dev) {
678 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
679
680 set_bit_le(index, hash_table);
681 }
682
683 for (i = 0; i < 32; i++) {
684 *setup_frm++ = hash_table[i];
685 *setup_frm++ = hash_table[i];
686 }
687 setup_frm = &de->setup_frame[13*6];
688
689 /* Fill the final entry with our physical address. */
690 eaddrs = (u16 *)dev->dev_addr;
691 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
692 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
693 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
694 }
695
696 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
697 {
698 struct de_private *de = netdev_priv(dev);
699 struct netdev_hw_addr *ha;
700 u16 *eaddrs;
701
702 /* We have <= 14 addresses so we can use the wonderful
703 16 address perfect filtering of the Tulip. */
704 netdev_for_each_mc_addr(ha, dev) {
705 eaddrs = (u16 *) ha->addr;
706 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
707 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
708 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
709 }
710 /* Fill the unused entries with the broadcast address. */
711 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
712 setup_frm = &de->setup_frame[15*6];
713
714 /* Fill the final entry with our physical address. */
715 eaddrs = (u16 *)dev->dev_addr;
716 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
717 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
718 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
719 }
720
721
722 static void __de_set_rx_mode (struct net_device *dev)
723 {
724 struct de_private *de = netdev_priv(dev);
725 u32 macmode;
726 unsigned int entry;
727 u32 mapping;
728 struct de_desc *txd;
729 struct de_desc *dummy_txd = NULL;
730
731 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
732
733 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
734 macmode |= AcceptAllMulticast | AcceptAllPhys;
735 goto out;
736 }
737
738 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
739 /* Too many to filter well -- accept all multicasts. */
740 macmode |= AcceptAllMulticast;
741 goto out;
742 }
743
744 /* Note that only the low-address shortword of setup_frame is valid!
745 The values are doubled for big-endian architectures. */
746 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
747 build_setup_frame_hash (de->setup_frame, dev);
748 else
749 build_setup_frame_perfect (de->setup_frame, dev);
750
751 /*
752 * Now add this frame to the Tx list.
753 */
754
755 entry = de->tx_head;
756
757 /* Avoid a chip errata by prefixing a dummy entry. */
758 if (entry != 0) {
759 de->tx_skb[entry].skb = DE_DUMMY_SKB;
760
761 dummy_txd = &de->tx_ring[entry];
762 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
763 cpu_to_le32(RingEnd) : 0;
764 dummy_txd->addr1 = 0;
765
766 /* Must set DescOwned later to avoid race with chip */
767
768 entry = NEXT_TX(entry);
769 }
770
771 de->tx_skb[entry].skb = DE_SETUP_SKB;
772 de->tx_skb[entry].mapping = mapping =
773 pci_map_single (de->pdev, de->setup_frame,
774 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
775
776 /* Put the setup frame on the Tx list. */
777 txd = &de->tx_ring[entry];
778 if (entry == (DE_TX_RING_SIZE - 1))
779 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
780 else
781 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
782 txd->addr1 = cpu_to_le32(mapping);
783 wmb();
784
785 txd->opts1 = cpu_to_le32(DescOwn);
786 wmb();
787
788 if (dummy_txd) {
789 dummy_txd->opts1 = cpu_to_le32(DescOwn);
790 wmb();
791 }
792
793 de->tx_head = NEXT_TX(entry);
794
795 if (TX_BUFFS_AVAIL(de) == 0)
796 netif_stop_queue(dev);
797
798 /* Trigger an immediate transmit demand. */
799 dw32(TxPoll, NormalTxPoll);
800
801 out:
802 if (macmode != dr32(MacMode))
803 dw32(MacMode, macmode);
804 }
805
806 static void de_set_rx_mode (struct net_device *dev)
807 {
808 unsigned long flags;
809 struct de_private *de = netdev_priv(dev);
810
811 spin_lock_irqsave (&de->lock, flags);
812 __de_set_rx_mode(dev);
813 spin_unlock_irqrestore (&de->lock, flags);
814 }
815
816 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
817 {
818 if (unlikely(rx_missed & RxMissedOver))
819 de->net_stats.rx_missed_errors += RxMissedMask;
820 else
821 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
822 }
823
824 static void __de_get_stats(struct de_private *de)
825 {
826 u32 tmp = dr32(RxMissed); /* self-clearing */
827
828 de_rx_missed(de, tmp);
829 }
830
831 static struct net_device_stats *de_get_stats(struct net_device *dev)
832 {
833 struct de_private *de = netdev_priv(dev);
834
835 /* The chip only need report frame silently dropped. */
836 spin_lock_irq(&de->lock);
837 if (netif_running(dev) && netif_device_present(dev))
838 __de_get_stats(de);
839 spin_unlock_irq(&de->lock);
840
841 return &de->net_stats;
842 }
843
844 static inline int de_is_running (struct de_private *de)
845 {
846 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
847 }
848
849 static void de_stop_rxtx (struct de_private *de)
850 {
851 u32 macmode;
852 unsigned int i = 1300/100;
853
854 macmode = dr32(MacMode);
855 if (macmode & RxTx) {
856 dw32(MacMode, macmode & ~RxTx);
857 dr32(MacMode);
858 }
859
860 /* wait until in-flight frame completes.
861 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
862 * Typically expect this loop to end in < 50 us on 100BT.
863 */
864 while (--i) {
865 if (!de_is_running(de))
866 return;
867 udelay(100);
868 }
869
870 netdev_warn(de->dev, "timeout expired, stopping DMA\n");
871 }
872
873 static inline void de_start_rxtx (struct de_private *de)
874 {
875 u32 macmode;
876
877 macmode = dr32(MacMode);
878 if ((macmode & RxTx) != RxTx) {
879 dw32(MacMode, macmode | RxTx);
880 dr32(MacMode);
881 }
882 }
883
884 static void de_stop_hw (struct de_private *de)
885 {
886
887 udelay(5);
888 dw32(IntrMask, 0);
889
890 de_stop_rxtx(de);
891
892 dw32(MacStatus, dr32(MacStatus));
893
894 udelay(10);
895
896 de->rx_tail = 0;
897 de->tx_head = de->tx_tail = 0;
898 }
899
900 static void de_link_up(struct de_private *de)
901 {
902 if (!netif_carrier_ok(de->dev)) {
903 netif_carrier_on(de->dev);
904 netif_info(de, link, de->dev, "link up, media %s\n",
905 media_name[de->media_type]);
906 }
907 }
908
909 static void de_link_down(struct de_private *de)
910 {
911 if (netif_carrier_ok(de->dev)) {
912 netif_carrier_off(de->dev);
913 netif_info(de, link, de->dev, "link down\n");
914 }
915 }
916
917 static void de_set_media (struct de_private *de)
918 {
919 unsigned media = de->media_type;
920 u32 macmode = dr32(MacMode);
921
922 if (de_is_running(de))
923 netdev_warn(de->dev, "chip is running while changing media!\n");
924
925 if (de->de21040)
926 dw32(CSR11, FULL_DUPLEX_MAGIC);
927 dw32(CSR13, 0); /* Reset phy */
928 dw32(CSR14, de->media[media].csr14);
929 dw32(CSR15, de->media[media].csr15);
930 dw32(CSR13, de->media[media].csr13);
931
932 /* must delay 10ms before writing to other registers,
933 * especially CSR6
934 */
935 mdelay(10);
936
937 if (media == DE_MEDIA_TP_FD)
938 macmode |= FullDuplex;
939 else
940 macmode &= ~FullDuplex;
941
942 netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
943 netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
944 dr32(MacMode), dr32(SIAStatus),
945 dr32(CSR13), dr32(CSR14), dr32(CSR15));
946 netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
947 macmode, de->media[media].csr13,
948 de->media[media].csr14, de->media[media].csr15);
949 if (macmode != dr32(MacMode))
950 dw32(MacMode, macmode);
951 }
952
953 static void de_next_media (struct de_private *de, const u32 *media,
954 unsigned int n_media)
955 {
956 unsigned int i;
957
958 for (i = 0; i < n_media; i++) {
959 if (de_ok_to_advertise(de, media[i])) {
960 de->media_type = media[i];
961 return;
962 }
963 }
964 }
965
966 static void de21040_media_timer (unsigned long data)
967 {
968 struct de_private *de = (struct de_private *) data;
969 struct net_device *dev = de->dev;
970 u32 status = dr32(SIAStatus);
971 unsigned int carrier;
972 unsigned long flags;
973
974 carrier = (status & NetCxnErr) ? 0 : 1;
975
976 if (carrier) {
977 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
978 goto no_link_yet;
979
980 de->media_timer.expires = jiffies + DE_TIMER_LINK;
981 add_timer(&de->media_timer);
982 if (!netif_carrier_ok(dev))
983 de_link_up(de);
984 else
985 netif_info(de, timer, dev, "%s link ok, status %x\n",
986 media_name[de->media_type], status);
987 return;
988 }
989
990 de_link_down(de);
991
992 if (de->media_lock)
993 return;
994
995 if (de->media_type == DE_MEDIA_AUI) {
996 static const u32 next_state = DE_MEDIA_TP;
997 de_next_media(de, &next_state, 1);
998 } else {
999 static const u32 next_state = DE_MEDIA_AUI;
1000 de_next_media(de, &next_state, 1);
1001 }
1002
1003 spin_lock_irqsave(&de->lock, flags);
1004 de_stop_rxtx(de);
1005 spin_unlock_irqrestore(&de->lock, flags);
1006 de_set_media(de);
1007 de_start_rxtx(de);
1008
1009 no_link_yet:
1010 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1011 add_timer(&de->media_timer);
1012
1013 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1014 media_name[de->media_type], status);
1015 }
1016
1017 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1018 {
1019 switch (new_media) {
1020 case DE_MEDIA_TP_AUTO:
1021 if (!(de->media_advertise & ADVERTISED_Autoneg))
1022 return 0;
1023 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1024 return 0;
1025 break;
1026 case DE_MEDIA_BNC:
1027 if (!(de->media_advertise & ADVERTISED_BNC))
1028 return 0;
1029 break;
1030 case DE_MEDIA_AUI:
1031 if (!(de->media_advertise & ADVERTISED_AUI))
1032 return 0;
1033 break;
1034 case DE_MEDIA_TP:
1035 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1036 return 0;
1037 break;
1038 case DE_MEDIA_TP_FD:
1039 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1040 return 0;
1041 break;
1042 }
1043
1044 return 1;
1045 }
1046
1047 static void de21041_media_timer (unsigned long data)
1048 {
1049 struct de_private *de = (struct de_private *) data;
1050 struct net_device *dev = de->dev;
1051 u32 status = dr32(SIAStatus);
1052 unsigned int carrier;
1053 unsigned long flags;
1054
1055 /* clear port active bits */
1056 dw32(SIAStatus, NonselPortActive | SelPortActive);
1057
1058 carrier = (status & NetCxnErr) ? 0 : 1;
1059
1060 if (carrier) {
1061 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1062 de->media_type == DE_MEDIA_TP ||
1063 de->media_type == DE_MEDIA_TP_FD) &&
1064 (status & LinkFailStatus))
1065 goto no_link_yet;
1066
1067 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1068 add_timer(&de->media_timer);
1069 if (!netif_carrier_ok(dev))
1070 de_link_up(de);
1071 else
1072 netif_info(de, timer, dev,
1073 "%s link ok, mode %x status %x\n",
1074 media_name[de->media_type],
1075 dr32(MacMode), status);
1076 return;
1077 }
1078
1079 de_link_down(de);
1080
1081 /* if media type locked, don't switch media */
1082 if (de->media_lock)
1083 goto set_media;
1084
1085 /* if activity detected, use that as hint for new media type */
1086 if (status & NonselPortActive) {
1087 unsigned int have_media = 1;
1088
1089 /* if AUI/BNC selected, then activity is on TP port */
1090 if (de->media_type == DE_MEDIA_AUI ||
1091 de->media_type == DE_MEDIA_BNC) {
1092 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1093 de->media_type = DE_MEDIA_TP_AUTO;
1094 else
1095 have_media = 0;
1096 }
1097
1098 /* TP selected. If there is only TP and BNC, then it's BNC */
1099 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1100 de_ok_to_advertise(de, DE_MEDIA_BNC))
1101 de->media_type = DE_MEDIA_BNC;
1102
1103 /* TP selected. If there is only TP and AUI, then it's AUI */
1104 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1105 de_ok_to_advertise(de, DE_MEDIA_AUI))
1106 de->media_type = DE_MEDIA_AUI;
1107
1108 /* otherwise, ignore the hint */
1109 else
1110 have_media = 0;
1111
1112 if (have_media)
1113 goto set_media;
1114 }
1115
1116 /*
1117 * Absent or ambiguous activity hint, move to next advertised
1118 * media state. If de->media_type is left unchanged, this
1119 * simply resets the PHY and reloads the current media settings.
1120 */
1121 if (de->media_type == DE_MEDIA_AUI) {
1122 static const u32 next_states[] = {
1123 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1124 };
1125 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1126 } else if (de->media_type == DE_MEDIA_BNC) {
1127 static const u32 next_states[] = {
1128 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1129 };
1130 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1131 } else {
1132 static const u32 next_states[] = {
1133 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1134 };
1135 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1136 }
1137
1138 set_media:
1139 spin_lock_irqsave(&de->lock, flags);
1140 de_stop_rxtx(de);
1141 spin_unlock_irqrestore(&de->lock, flags);
1142 de_set_media(de);
1143 de_start_rxtx(de);
1144
1145 no_link_yet:
1146 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1147 add_timer(&de->media_timer);
1148
1149 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1150 media_name[de->media_type], status);
1151 }
1152
1153 static void de_media_interrupt (struct de_private *de, u32 status)
1154 {
1155 if (status & LinkPass) {
1156 /* Ignore if current media is AUI or BNC and we can't use TP */
1157 if ((de->media_type == DE_MEDIA_AUI ||
1158 de->media_type == DE_MEDIA_BNC) &&
1159 (de->media_lock ||
1160 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1161 return;
1162 /* If current media is not TP, change it to TP */
1163 if ((de->media_type == DE_MEDIA_AUI ||
1164 de->media_type == DE_MEDIA_BNC)) {
1165 de->media_type = DE_MEDIA_TP_AUTO;
1166 de_stop_rxtx(de);
1167 de_set_media(de);
1168 de_start_rxtx(de);
1169 }
1170 de_link_up(de);
1171 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1172 return;
1173 }
1174
1175 BUG_ON(!(status & LinkFail));
1176 /* Mark the link as down only if current media is TP */
1177 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1178 de->media_type != DE_MEDIA_BNC) {
1179 de_link_down(de);
1180 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1181 }
1182 }
1183
1184 static int de_reset_mac (struct de_private *de)
1185 {
1186 u32 status, tmp;
1187
1188 /*
1189 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1190 * in this area.
1191 */
1192
1193 if (dr32(BusMode) == 0xffffffff)
1194 return -EBUSY;
1195
1196 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1197 dw32 (BusMode, CmdReset);
1198 mdelay (1);
1199
1200 dw32 (BusMode, de_bus_mode);
1201 mdelay (1);
1202
1203 for (tmp = 0; tmp < 5; tmp++) {
1204 dr32 (BusMode);
1205 mdelay (1);
1206 }
1207
1208 mdelay (1);
1209
1210 status = dr32(MacStatus);
1211 if (status & (RxState | TxState))
1212 return -EBUSY;
1213 if (status == 0xffffffff)
1214 return -ENODEV;
1215 return 0;
1216 }
1217
1218 static void de_adapter_wake (struct de_private *de)
1219 {
1220 u32 pmctl;
1221
1222 if (de->de21040)
1223 return;
1224
1225 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1226 if (pmctl & PM_Mask) {
1227 pmctl &= ~PM_Mask;
1228 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1229
1230 /* de4x5.c delays, so we do too */
1231 msleep(10);
1232 }
1233 }
1234
1235 static void de_adapter_sleep (struct de_private *de)
1236 {
1237 u32 pmctl;
1238
1239 if (de->de21040)
1240 return;
1241
1242 dw32(CSR13, 0); /* Reset phy */
1243 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1244 pmctl |= PM_Sleep;
1245 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1246 }
1247
1248 static int de_init_hw (struct de_private *de)
1249 {
1250 struct net_device *dev = de->dev;
1251 u32 macmode;
1252 int rc;
1253
1254 de_adapter_wake(de);
1255
1256 macmode = dr32(MacMode) & ~MacModeClear;
1257
1258 rc = de_reset_mac(de);
1259 if (rc)
1260 return rc;
1261
1262 de_set_media(de); /* reset phy */
1263
1264 dw32(RxRingAddr, de->ring_dma);
1265 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1266
1267 dw32(MacMode, RxTx | macmode);
1268
1269 dr32(RxMissed); /* self-clearing */
1270
1271 dw32(IntrMask, de_intr_mask);
1272
1273 de_set_rx_mode(dev);
1274
1275 return 0;
1276 }
1277
1278 static int de_refill_rx (struct de_private *de)
1279 {
1280 unsigned i;
1281
1282 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1283 struct sk_buff *skb;
1284
1285 skb = dev_alloc_skb(de->rx_buf_sz);
1286 if (!skb)
1287 goto err_out;
1288
1289 skb->dev = de->dev;
1290
1291 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1292 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1293 de->rx_skb[i].skb = skb;
1294
1295 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1296 if (i == (DE_RX_RING_SIZE - 1))
1297 de->rx_ring[i].opts2 =
1298 cpu_to_le32(RingEnd | de->rx_buf_sz);
1299 else
1300 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1301 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1302 de->rx_ring[i].addr2 = 0;
1303 }
1304
1305 return 0;
1306
1307 err_out:
1308 de_clean_rings(de);
1309 return -ENOMEM;
1310 }
1311
1312 static int de_init_rings (struct de_private *de)
1313 {
1314 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1315 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1316
1317 de->rx_tail = 0;
1318 de->tx_head = de->tx_tail = 0;
1319
1320 return de_refill_rx (de);
1321 }
1322
1323 static int de_alloc_rings (struct de_private *de)
1324 {
1325 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1326 if (!de->rx_ring)
1327 return -ENOMEM;
1328 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1329 return de_init_rings(de);
1330 }
1331
1332 static void de_clean_rings (struct de_private *de)
1333 {
1334 unsigned i;
1335
1336 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1337 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1338 wmb();
1339 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1340 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1341 wmb();
1342
1343 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1344 if (de->rx_skb[i].skb) {
1345 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1346 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1347 dev_kfree_skb(de->rx_skb[i].skb);
1348 }
1349 }
1350
1351 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1352 struct sk_buff *skb = de->tx_skb[i].skb;
1353 if ((skb) && (skb != DE_DUMMY_SKB)) {
1354 if (skb != DE_SETUP_SKB) {
1355 de->net_stats.tx_dropped++;
1356 pci_unmap_single(de->pdev,
1357 de->tx_skb[i].mapping,
1358 skb->len, PCI_DMA_TODEVICE);
1359 dev_kfree_skb(skb);
1360 } else {
1361 pci_unmap_single(de->pdev,
1362 de->tx_skb[i].mapping,
1363 sizeof(de->setup_frame),
1364 PCI_DMA_TODEVICE);
1365 }
1366 }
1367 }
1368
1369 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1370 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1371 }
1372
1373 static void de_free_rings (struct de_private *de)
1374 {
1375 de_clean_rings(de);
1376 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1377 de->rx_ring = NULL;
1378 de->tx_ring = NULL;
1379 }
1380
1381 static int de_open (struct net_device *dev)
1382 {
1383 struct de_private *de = netdev_priv(dev);
1384 int rc;
1385
1386 netif_dbg(de, ifup, dev, "enabling interface\n");
1387
1388 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1389
1390 rc = de_alloc_rings(de);
1391 if (rc) {
1392 netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1393 return rc;
1394 }
1395
1396 dw32(IntrMask, 0);
1397
1398 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1399 if (rc) {
1400 netdev_err(dev, "IRQ %d request failure, err=%d\n",
1401 dev->irq, rc);
1402 goto err_out_free;
1403 }
1404
1405 rc = de_init_hw(de);
1406 if (rc) {
1407 netdev_err(dev, "h/w init failure, err=%d\n", rc);
1408 goto err_out_free_irq;
1409 }
1410
1411 netif_start_queue(dev);
1412 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1413
1414 return 0;
1415
1416 err_out_free_irq:
1417 free_irq(dev->irq, dev);
1418 err_out_free:
1419 de_free_rings(de);
1420 return rc;
1421 }
1422
1423 static int de_close (struct net_device *dev)
1424 {
1425 struct de_private *de = netdev_priv(dev);
1426 unsigned long flags;
1427
1428 netif_dbg(de, ifdown, dev, "disabling interface\n");
1429
1430 del_timer_sync(&de->media_timer);
1431
1432 spin_lock_irqsave(&de->lock, flags);
1433 de_stop_hw(de);
1434 netif_stop_queue(dev);
1435 netif_carrier_off(dev);
1436 spin_unlock_irqrestore(&de->lock, flags);
1437
1438 free_irq(dev->irq, dev);
1439
1440 de_free_rings(de);
1441 de_adapter_sleep(de);
1442 return 0;
1443 }
1444
1445 static void de_tx_timeout (struct net_device *dev)
1446 {
1447 struct de_private *de = netdev_priv(dev);
1448
1449 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1450 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1451 de->rx_tail, de->tx_head, de->tx_tail);
1452
1453 del_timer_sync(&de->media_timer);
1454
1455 disable_irq(dev->irq);
1456 spin_lock_irq(&de->lock);
1457
1458 de_stop_hw(de);
1459 netif_stop_queue(dev);
1460 netif_carrier_off(dev);
1461
1462 spin_unlock_irq(&de->lock);
1463 enable_irq(dev->irq);
1464
1465 /* Update the error counts. */
1466 __de_get_stats(de);
1467
1468 synchronize_irq(dev->irq);
1469 de_clean_rings(de);
1470
1471 de_init_rings(de);
1472
1473 de_init_hw(de);
1474
1475 netif_wake_queue(dev);
1476 }
1477
1478 static void __de_get_regs(struct de_private *de, u8 *buf)
1479 {
1480 int i;
1481 u32 *rbuf = (u32 *)buf;
1482
1483 /* read all CSRs */
1484 for (i = 0; i < DE_NUM_REGS; i++)
1485 rbuf[i] = dr32(i * 8);
1486
1487 /* handle self-clearing RxMissed counter, CSR8 */
1488 de_rx_missed(de, rbuf[8]);
1489 }
1490
1491 static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1492 {
1493 ecmd->supported = de->media_supported;
1494 ecmd->transceiver = XCVR_INTERNAL;
1495 ecmd->phy_address = 0;
1496 ecmd->advertising = de->media_advertise;
1497
1498 switch (de->media_type) {
1499 case DE_MEDIA_AUI:
1500 ecmd->port = PORT_AUI;
1501 break;
1502 case DE_MEDIA_BNC:
1503 ecmd->port = PORT_BNC;
1504 break;
1505 default:
1506 ecmd->port = PORT_TP;
1507 break;
1508 }
1509
1510 ethtool_cmd_speed_set(ecmd, 10);
1511
1512 if (dr32(MacMode) & FullDuplex)
1513 ecmd->duplex = DUPLEX_FULL;
1514 else
1515 ecmd->duplex = DUPLEX_HALF;
1516
1517 if (de->media_lock)
1518 ecmd->autoneg = AUTONEG_DISABLE;
1519 else
1520 ecmd->autoneg = AUTONEG_ENABLE;
1521
1522 /* ignore maxtxpkt, maxrxpkt for now */
1523
1524 return 0;
1525 }
1526
1527 static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1528 {
1529 u32 new_media;
1530 unsigned int media_lock;
1531
1532 if (ethtool_cmd_speed(ecmd) != 10)
1533 return -EINVAL;
1534 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1535 return -EINVAL;
1536 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1537 return -EINVAL;
1538 if (de->de21040 && ecmd->port == PORT_BNC)
1539 return -EINVAL;
1540 if (ecmd->transceiver != XCVR_INTERNAL)
1541 return -EINVAL;
1542 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1543 return -EINVAL;
1544 if (ecmd->advertising & ~de->media_supported)
1545 return -EINVAL;
1546 if (ecmd->autoneg == AUTONEG_ENABLE &&
1547 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1548 return -EINVAL;
1549
1550 switch (ecmd->port) {
1551 case PORT_AUI:
1552 new_media = DE_MEDIA_AUI;
1553 if (!(ecmd->advertising & ADVERTISED_AUI))
1554 return -EINVAL;
1555 break;
1556 case PORT_BNC:
1557 new_media = DE_MEDIA_BNC;
1558 if (!(ecmd->advertising & ADVERTISED_BNC))
1559 return -EINVAL;
1560 break;
1561 default:
1562 if (ecmd->autoneg == AUTONEG_ENABLE)
1563 new_media = DE_MEDIA_TP_AUTO;
1564 else if (ecmd->duplex == DUPLEX_FULL)
1565 new_media = DE_MEDIA_TP_FD;
1566 else
1567 new_media = DE_MEDIA_TP;
1568 if (!(ecmd->advertising & ADVERTISED_TP))
1569 return -EINVAL;
1570 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1571 return -EINVAL;
1572 break;
1573 }
1574
1575 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1576
1577 if ((new_media == de->media_type) &&
1578 (media_lock == de->media_lock) &&
1579 (ecmd->advertising == de->media_advertise))
1580 return 0; /* nothing to change */
1581
1582 de_link_down(de);
1583 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1584 de_stop_rxtx(de);
1585
1586 de->media_type = new_media;
1587 de->media_lock = media_lock;
1588 de->media_advertise = ecmd->advertising;
1589 de_set_media(de);
1590 if (netif_running(de->dev))
1591 de_start_rxtx(de);
1592
1593 return 0;
1594 }
1595
1596 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1597 {
1598 struct de_private *de = netdev_priv(dev);
1599
1600 strcpy (info->driver, DRV_NAME);
1601 strcpy (info->version, DRV_VERSION);
1602 strcpy (info->bus_info, pci_name(de->pdev));
1603 info->eedump_len = DE_EEPROM_SIZE;
1604 }
1605
1606 static int de_get_regs_len(struct net_device *dev)
1607 {
1608 return DE_REGS_SIZE;
1609 }
1610
1611 static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1612 {
1613 struct de_private *de = netdev_priv(dev);
1614 int rc;
1615
1616 spin_lock_irq(&de->lock);
1617 rc = __de_get_settings(de, ecmd);
1618 spin_unlock_irq(&de->lock);
1619
1620 return rc;
1621 }
1622
1623 static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1624 {
1625 struct de_private *de = netdev_priv(dev);
1626 int rc;
1627
1628 spin_lock_irq(&de->lock);
1629 rc = __de_set_settings(de, ecmd);
1630 spin_unlock_irq(&de->lock);
1631
1632 return rc;
1633 }
1634
1635 static u32 de_get_msglevel(struct net_device *dev)
1636 {
1637 struct de_private *de = netdev_priv(dev);
1638
1639 return de->msg_enable;
1640 }
1641
1642 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1643 {
1644 struct de_private *de = netdev_priv(dev);
1645
1646 de->msg_enable = msglvl;
1647 }
1648
1649 static int de_get_eeprom(struct net_device *dev,
1650 struct ethtool_eeprom *eeprom, u8 *data)
1651 {
1652 struct de_private *de = netdev_priv(dev);
1653
1654 if (!de->ee_data)
1655 return -EOPNOTSUPP;
1656 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1657 (eeprom->len != DE_EEPROM_SIZE))
1658 return -EINVAL;
1659 memcpy(data, de->ee_data, eeprom->len);
1660
1661 return 0;
1662 }
1663
1664 static int de_nway_reset(struct net_device *dev)
1665 {
1666 struct de_private *de = netdev_priv(dev);
1667 u32 status;
1668
1669 if (de->media_type != DE_MEDIA_TP_AUTO)
1670 return -EINVAL;
1671 if (netif_carrier_ok(de->dev))
1672 de_link_down(de);
1673
1674 status = dr32(SIAStatus);
1675 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1676 netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1677 status, dr32(SIAStatus));
1678 return 0;
1679 }
1680
1681 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1682 void *data)
1683 {
1684 struct de_private *de = netdev_priv(dev);
1685
1686 regs->version = (DE_REGS_VER << 2) | de->de21040;
1687
1688 spin_lock_irq(&de->lock);
1689 __de_get_regs(de, data);
1690 spin_unlock_irq(&de->lock);
1691 }
1692
1693 static const struct ethtool_ops de_ethtool_ops = {
1694 .get_link = ethtool_op_get_link,
1695 .get_drvinfo = de_get_drvinfo,
1696 .get_regs_len = de_get_regs_len,
1697 .get_settings = de_get_settings,
1698 .set_settings = de_set_settings,
1699 .get_msglevel = de_get_msglevel,
1700 .set_msglevel = de_set_msglevel,
1701 .get_eeprom = de_get_eeprom,
1702 .nway_reset = de_nway_reset,
1703 .get_regs = de_get_regs,
1704 };
1705
1706 static void __devinit de21040_get_mac_address (struct de_private *de)
1707 {
1708 unsigned i;
1709
1710 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1711 udelay(5);
1712
1713 for (i = 0; i < 6; i++) {
1714 int value, boguscnt = 100000;
1715 do {
1716 value = dr32(ROMCmd);
1717 rmb();
1718 } while (value < 0 && --boguscnt > 0);
1719 de->dev->dev_addr[i] = value;
1720 udelay(1);
1721 if (boguscnt <= 0)
1722 pr_warn("timeout reading 21040 MAC address byte %u\n",
1723 i);
1724 }
1725 }
1726
1727 static void __devinit de21040_get_media_info(struct de_private *de)
1728 {
1729 unsigned int i;
1730
1731 de->media_type = DE_MEDIA_TP;
1732 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1733 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1734 de->media_advertise = de->media_supported;
1735
1736 for (i = 0; i < DE_MAX_MEDIA; i++) {
1737 switch (i) {
1738 case DE_MEDIA_AUI:
1739 case DE_MEDIA_TP:
1740 case DE_MEDIA_TP_FD:
1741 de->media[i].type = i;
1742 de->media[i].csr13 = t21040_csr13[i];
1743 de->media[i].csr14 = t21040_csr14[i];
1744 de->media[i].csr15 = t21040_csr15[i];
1745 break;
1746 default:
1747 de->media[i].type = DE_MEDIA_INVALID;
1748 break;
1749 }
1750 }
1751 }
1752
1753 /* Note: this routine returns extra data bits for size detection. */
1754 static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1755 {
1756 int i;
1757 unsigned retval = 0;
1758 void __iomem *ee_addr = regs + ROMCmd;
1759 int read_cmd = location | (EE_READ_CMD << addr_len);
1760
1761 writel(EE_ENB & ~EE_CS, ee_addr);
1762 writel(EE_ENB, ee_addr);
1763
1764 /* Shift the read command bits out. */
1765 for (i = 4 + addr_len; i >= 0; i--) {
1766 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1767 writel(EE_ENB | dataval, ee_addr);
1768 readl(ee_addr);
1769 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1770 readl(ee_addr);
1771 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1772 }
1773 writel(EE_ENB, ee_addr);
1774 readl(ee_addr);
1775
1776 for (i = 16; i > 0; i--) {
1777 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1778 readl(ee_addr);
1779 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1780 writel(EE_ENB, ee_addr);
1781 readl(ee_addr);
1782 }
1783
1784 /* Terminate the EEPROM access. */
1785 writel(EE_ENB & ~EE_CS, ee_addr);
1786 return retval;
1787 }
1788
1789 static void __devinit de21041_get_srom_info (struct de_private *de)
1790 {
1791 unsigned i, sa_offset = 0, ofs;
1792 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1793 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1794 struct de_srom_info_leaf *il;
1795 void *bufp;
1796
1797 /* download entire eeprom */
1798 for (i = 0; i < DE_EEPROM_WORDS; i++)
1799 ((__le16 *)ee_data)[i] =
1800 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1801
1802 /* DEC now has a specification but early board makers
1803 just put the address in the first EEPROM locations. */
1804 /* This does memcmp(eedata, eedata+16, 8) */
1805
1806 #ifndef CONFIG_MIPS_COBALT
1807
1808 for (i = 0; i < 8; i ++)
1809 if (ee_data[i] != ee_data[16+i])
1810 sa_offset = 20;
1811
1812 #endif
1813
1814 /* store MAC address */
1815 for (i = 0; i < 6; i ++)
1816 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1817
1818 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1819 ofs = ee_data[SROMC0InfoLeaf];
1820 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1821 goto bad_srom;
1822
1823 /* get pointer to info leaf */
1824 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1825
1826 /* paranoia checks */
1827 if (il->n_blocks == 0)
1828 goto bad_srom;
1829 if ((sizeof(ee_data) - ofs) <
1830 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1831 goto bad_srom;
1832
1833 /* get default media type */
1834 switch (get_unaligned(&il->default_media)) {
1835 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1836 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1837 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1838 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1839 }
1840
1841 if (netif_msg_probe(de))
1842 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1843 de->board_idx, ofs, media_name[de->media_type]);
1844
1845 /* init SIA register values to defaults */
1846 for (i = 0; i < DE_MAX_MEDIA; i++) {
1847 de->media[i].type = DE_MEDIA_INVALID;
1848 de->media[i].csr13 = 0xffff;
1849 de->media[i].csr14 = 0xffff;
1850 de->media[i].csr15 = 0xffff;
1851 }
1852
1853 /* parse media blocks to see what medias are supported,
1854 * and if any custom CSR values are provided
1855 */
1856 bufp = ((void *)il) + sizeof(*il);
1857 for (i = 0; i < il->n_blocks; i++) {
1858 struct de_srom_media_block *ib = bufp;
1859 unsigned idx;
1860
1861 /* index based on media type in media block */
1862 switch(ib->opts & MediaBlockMask) {
1863 case 0: /* 10baseT */
1864 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1865 | SUPPORTED_Autoneg;
1866 idx = DE_MEDIA_TP;
1867 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1868 break;
1869 case 1: /* BNC */
1870 de->media_supported |= SUPPORTED_BNC;
1871 idx = DE_MEDIA_BNC;
1872 break;
1873 case 2: /* AUI */
1874 de->media_supported |= SUPPORTED_AUI;
1875 idx = DE_MEDIA_AUI;
1876 break;
1877 case 4: /* 10baseT-FD */
1878 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1879 | SUPPORTED_Autoneg;
1880 idx = DE_MEDIA_TP_FD;
1881 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1882 break;
1883 default:
1884 goto bad_srom;
1885 }
1886
1887 de->media[idx].type = idx;
1888
1889 if (netif_msg_probe(de))
1890 pr_info("de%d: media block #%u: %s",
1891 de->board_idx, i,
1892 media_name[de->media[idx].type]);
1893
1894 bufp += sizeof (ib->opts);
1895
1896 if (ib->opts & MediaCustomCSRs) {
1897 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1898 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1899 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1900 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1901 sizeof(ib->csr15);
1902
1903 if (netif_msg_probe(de))
1904 pr_cont(" (%x,%x,%x)\n",
1905 de->media[idx].csr13,
1906 de->media[idx].csr14,
1907 de->media[idx].csr15);
1908
1909 } else {
1910 if (netif_msg_probe(de))
1911 pr_cont("\n");
1912 }
1913
1914 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1915 break;
1916 }
1917
1918 de->media_advertise = de->media_supported;
1919
1920 fill_defaults:
1921 /* fill in defaults, for cases where custom CSRs not used */
1922 for (i = 0; i < DE_MAX_MEDIA; i++) {
1923 if (de->media[i].csr13 == 0xffff)
1924 de->media[i].csr13 = t21041_csr13[i];
1925 if (de->media[i].csr14 == 0xffff) {
1926 /* autonegotiation is broken at least on some chip
1927 revisions - rev. 0x21 works, 0x11 does not */
1928 if (de->pdev->revision < 0x20)
1929 de->media[i].csr14 = t21041_csr14_brk[i];
1930 else
1931 de->media[i].csr14 = t21041_csr14[i];
1932 }
1933 if (de->media[i].csr15 == 0xffff)
1934 de->media[i].csr15 = t21041_csr15[i];
1935 }
1936
1937 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1938
1939 return;
1940
1941 bad_srom:
1942 /* for error cases, it's ok to assume we support all these */
1943 for (i = 0; i < DE_MAX_MEDIA; i++)
1944 de->media[i].type = i;
1945 de->media_supported =
1946 SUPPORTED_10baseT_Half |
1947 SUPPORTED_10baseT_Full |
1948 SUPPORTED_Autoneg |
1949 SUPPORTED_TP |
1950 SUPPORTED_AUI |
1951 SUPPORTED_BNC;
1952 goto fill_defaults;
1953 }
1954
1955 static const struct net_device_ops de_netdev_ops = {
1956 .ndo_open = de_open,
1957 .ndo_stop = de_close,
1958 .ndo_set_multicast_list = de_set_rx_mode,
1959 .ndo_start_xmit = de_start_xmit,
1960 .ndo_get_stats = de_get_stats,
1961 .ndo_tx_timeout = de_tx_timeout,
1962 .ndo_change_mtu = eth_change_mtu,
1963 .ndo_set_mac_address = eth_mac_addr,
1964 .ndo_validate_addr = eth_validate_addr,
1965 };
1966
1967 static int __devinit de_init_one (struct pci_dev *pdev,
1968 const struct pci_device_id *ent)
1969 {
1970 struct net_device *dev;
1971 struct de_private *de;
1972 int rc;
1973 void __iomem *regs;
1974 unsigned long pciaddr;
1975 static int board_idx = -1;
1976
1977 board_idx++;
1978
1979 #ifndef MODULE
1980 if (board_idx == 0)
1981 pr_info("%s\n", version);
1982 #endif
1983
1984 /* allocate a new ethernet device structure, and fill in defaults */
1985 dev = alloc_etherdev(sizeof(struct de_private));
1986 if (!dev)
1987 return -ENOMEM;
1988
1989 dev->netdev_ops = &de_netdev_ops;
1990 SET_NETDEV_DEV(dev, &pdev->dev);
1991 dev->ethtool_ops = &de_ethtool_ops;
1992 dev->watchdog_timeo = TX_TIMEOUT;
1993
1994 de = netdev_priv(dev);
1995 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1996 de->pdev = pdev;
1997 de->dev = dev;
1998 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1999 de->board_idx = board_idx;
2000 spin_lock_init (&de->lock);
2001 init_timer(&de->media_timer);
2002 if (de->de21040)
2003 de->media_timer.function = de21040_media_timer;
2004 else
2005 de->media_timer.function = de21041_media_timer;
2006 de->media_timer.data = (unsigned long) de;
2007
2008 netif_carrier_off(dev);
2009
2010 /* wake up device, assign resources */
2011 rc = pci_enable_device(pdev);
2012 if (rc)
2013 goto err_out_free;
2014
2015 /* reserve PCI resources to ensure driver atomicity */
2016 rc = pci_request_regions(pdev, DRV_NAME);
2017 if (rc)
2018 goto err_out_disable;
2019
2020 /* check for invalid IRQ value */
2021 if (pdev->irq < 2) {
2022 rc = -EIO;
2023 pr_err("invalid irq (%d) for pci dev %s\n",
2024 pdev->irq, pci_name(pdev));
2025 goto err_out_res;
2026 }
2027
2028 dev->irq = pdev->irq;
2029
2030 /* obtain and check validity of PCI I/O address */
2031 pciaddr = pci_resource_start(pdev, 1);
2032 if (!pciaddr) {
2033 rc = -EIO;
2034 pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2035 goto err_out_res;
2036 }
2037 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2038 rc = -EIO;
2039 pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2040 (unsigned long long)pci_resource_len(pdev, 1),
2041 pci_name(pdev));
2042 goto err_out_res;
2043 }
2044
2045 /* remap CSR registers */
2046 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2047 if (!regs) {
2048 rc = -EIO;
2049 pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2050 (unsigned long long)pci_resource_len(pdev, 1),
2051 pciaddr, pci_name(pdev));
2052 goto err_out_res;
2053 }
2054 dev->base_addr = (unsigned long) regs;
2055 de->regs = regs;
2056
2057 de_adapter_wake(de);
2058
2059 /* make sure hardware is not running */
2060 rc = de_reset_mac(de);
2061 if (rc) {
2062 pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2063 goto err_out_iomap;
2064 }
2065
2066 /* get MAC address, initialize default media type and
2067 * get list of supported media
2068 */
2069 if (de->de21040) {
2070 de21040_get_mac_address(de);
2071 de21040_get_media_info(de);
2072 } else {
2073 de21041_get_srom_info(de);
2074 }
2075
2076 /* register new network interface with kernel */
2077 rc = register_netdev(dev);
2078 if (rc)
2079 goto err_out_iomap;
2080
2081 /* print info about board and interface just registered */
2082 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
2083 de->de21040 ? "21040" : "21041",
2084 dev->base_addr,
2085 dev->dev_addr,
2086 dev->irq);
2087
2088 pci_set_drvdata(pdev, dev);
2089
2090 /* enable busmastering */
2091 pci_set_master(pdev);
2092
2093 /* put adapter to sleep */
2094 de_adapter_sleep(de);
2095
2096 return 0;
2097
2098 err_out_iomap:
2099 kfree(de->ee_data);
2100 iounmap(regs);
2101 err_out_res:
2102 pci_release_regions(pdev);
2103 err_out_disable:
2104 pci_disable_device(pdev);
2105 err_out_free:
2106 free_netdev(dev);
2107 return rc;
2108 }
2109
2110 static void __devexit de_remove_one (struct pci_dev *pdev)
2111 {
2112 struct net_device *dev = pci_get_drvdata(pdev);
2113 struct de_private *de = netdev_priv(dev);
2114
2115 BUG_ON(!dev);
2116 unregister_netdev(dev);
2117 kfree(de->ee_data);
2118 iounmap(de->regs);
2119 pci_release_regions(pdev);
2120 pci_disable_device(pdev);
2121 pci_set_drvdata(pdev, NULL);
2122 free_netdev(dev);
2123 }
2124
2125 #ifdef CONFIG_PM
2126
2127 static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2128 {
2129 struct net_device *dev = pci_get_drvdata (pdev);
2130 struct de_private *de = netdev_priv(dev);
2131
2132 rtnl_lock();
2133 if (netif_running (dev)) {
2134 del_timer_sync(&de->media_timer);
2135
2136 disable_irq(dev->irq);
2137 spin_lock_irq(&de->lock);
2138
2139 de_stop_hw(de);
2140 netif_stop_queue(dev);
2141 netif_device_detach(dev);
2142 netif_carrier_off(dev);
2143
2144 spin_unlock_irq(&de->lock);
2145 enable_irq(dev->irq);
2146
2147 /* Update the error counts. */
2148 __de_get_stats(de);
2149
2150 synchronize_irq(dev->irq);
2151 de_clean_rings(de);
2152
2153 de_adapter_sleep(de);
2154 pci_disable_device(pdev);
2155 } else {
2156 netif_device_detach(dev);
2157 }
2158 rtnl_unlock();
2159 return 0;
2160 }
2161
2162 static int de_resume (struct pci_dev *pdev)
2163 {
2164 struct net_device *dev = pci_get_drvdata (pdev);
2165 struct de_private *de = netdev_priv(dev);
2166 int retval = 0;
2167
2168 rtnl_lock();
2169 if (netif_device_present(dev))
2170 goto out;
2171 if (!netif_running(dev))
2172 goto out_attach;
2173 if ((retval = pci_enable_device(pdev))) {
2174 netdev_err(dev, "pci_enable_device failed in resume\n");
2175 goto out;
2176 }
2177 pci_set_master(pdev);
2178 de_init_rings(de);
2179 de_init_hw(de);
2180 out_attach:
2181 netif_device_attach(dev);
2182 out:
2183 rtnl_unlock();
2184 return 0;
2185 }
2186
2187 #endif /* CONFIG_PM */
2188
2189 static struct pci_driver de_driver = {
2190 .name = DRV_NAME,
2191 .id_table = de_pci_tbl,
2192 .probe = de_init_one,
2193 .remove = __devexit_p(de_remove_one),
2194 #ifdef CONFIG_PM
2195 .suspend = de_suspend,
2196 .resume = de_resume,
2197 #endif
2198 };
2199
2200 static int __init de_init (void)
2201 {
2202 #ifdef MODULE
2203 pr_info("%s\n", version);
2204 #endif
2205 return pci_register_driver(&de_driver);
2206 }
2207
2208 static void __exit de_exit (void)
2209 {
2210 pci_unregister_driver (&de_driver);
2211 }
2212
2213 module_init(de_init);
2214 module_exit(de_exit);