]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/mace.c
2 * Network device driver for the MACE ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1996 Paul Mackerras.
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/string.h>
14 #include <linux/timer.h>
15 #include <linux/init.h>
16 #include <linux/crc32.h>
17 #include <linux/spinlock.h>
18 #include <linux/bitrev.h>
20 #include <asm/dbdma.h>
22 #include <asm/pgtable.h>
23 #include <asm/macio.h>
27 static int port_aaui
= -1;
31 #define MAX_TX_ACTIVE 1
32 #define NCMDS_TX 1 /* dma commands per element in tx ring */
33 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
34 #define TX_TIMEOUT HZ /* 1 second */
36 /* Chip rev needs workaround on HW & multicast addr change */
37 #define BROKEN_ADDRCHG_REV 0x0941
39 /* Bits in transmit DMA status */
40 #define TX_DMA_ERR 0x80
43 volatile struct mace __iomem
*mace
;
44 volatile struct dbdma_regs __iomem
*tx_dma
;
46 volatile struct dbdma_regs __iomem
*rx_dma
;
48 volatile struct dbdma_cmd
*tx_cmds
; /* xmit dma command list */
49 volatile struct dbdma_cmd
*rx_cmds
; /* recv dma command list */
50 struct sk_buff
*rx_bufs
[N_RX_RING
];
53 struct sk_buff
*tx_bufs
[N_TX_RING
];
57 unsigned char tx_fullup
;
58 unsigned char tx_active
;
59 unsigned char tx_bad_runt
;
60 struct net_device_stats stats
;
61 struct timer_list tx_timeout
;
65 struct macio_dev
*mdev
;
70 * Number of bytes of private data per MACE: allow enough for
71 * the rx and tx dma commands plus a branch dma command each,
72 * and another 16 bytes to allow us to align the dma command
73 * buffers on a 16 byte boundary.
75 #define PRIV_BYTES (sizeof(struct mace_data) \
76 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
78 static int mace_open(struct net_device
*dev
);
79 static int mace_close(struct net_device
*dev
);
80 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
);
81 static struct net_device_stats
*mace_stats(struct net_device
*dev
);
82 static void mace_set_multicast(struct net_device
*dev
);
83 static void mace_reset(struct net_device
*dev
);
84 static int mace_set_address(struct net_device
*dev
, void *addr
);
85 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
);
86 static irqreturn_t
mace_txdma_intr(int irq
, void *dev_id
);
87 static irqreturn_t
mace_rxdma_intr(int irq
, void *dev_id
);
88 static void mace_set_timeout(struct net_device
*dev
);
89 static void mace_tx_timeout(unsigned long data
);
90 static inline void dbdma_reset(volatile struct dbdma_regs __iomem
*dma
);
91 static inline void mace_clean_rings(struct mace_data
*mp
);
92 static void __mace_set_address(struct net_device
*dev
, void *addr
);
95 * If we can't get a skbuff when we need it, we use this area for DMA.
97 static unsigned char *dummy_buf
;
99 static int __devinit
mace_probe(struct macio_dev
*mdev
, const struct of_device_id
*match
)
101 struct device_node
*mace
= macio_get_of_node(mdev
);
102 struct net_device
*dev
;
103 struct mace_data
*mp
;
104 const unsigned char *addr
;
105 int j
, rev
, rc
= -EBUSY
;
107 if (macio_resource_count(mdev
) != 3 || macio_irq_count(mdev
) != 3) {
108 printk(KERN_ERR
"can't use MACE %s: need 3 addrs and 3 irqs\n",
113 addr
= of_get_property(mace
, "mac-address", NULL
);
115 addr
= of_get_property(mace
, "local-mac-address", NULL
);
117 printk(KERN_ERR
"Can't get mac-address for MACE %s\n",
124 * lazy allocate the driver-wide dummy buffer. (Note that we
125 * never have more than one MACE in the system anyway)
127 if (dummy_buf
== NULL
) {
128 dummy_buf
= kmalloc(RX_BUFLEN
+2, GFP_KERNEL
);
129 if (dummy_buf
== NULL
) {
130 printk(KERN_ERR
"MACE: couldn't allocate dummy buffer\n");
135 if (macio_request_resources(mdev
, "mace")) {
136 printk(KERN_ERR
"MACE: can't request IO resources !\n");
140 dev
= alloc_etherdev(PRIV_BYTES
);
142 printk(KERN_ERR
"MACE: can't allocate ethernet device !\n");
146 SET_NETDEV_DEV(dev
, &mdev
->ofdev
.dev
);
150 macio_set_drvdata(mdev
, dev
);
152 dev
->base_addr
= macio_resource_start(mdev
, 0);
153 mp
->mace
= ioremap(dev
->base_addr
, 0x1000);
154 if (mp
->mace
== NULL
) {
155 printk(KERN_ERR
"MACE: can't map IO resources !\n");
159 dev
->irq
= macio_irq(mdev
, 0);
161 rev
= addr
[0] == 0 && addr
[1] == 0xA0;
162 for (j
= 0; j
< 6; ++j
) {
163 dev
->dev_addr
[j
] = rev
? bitrev8(addr
[j
]): addr
[j
];
165 mp
->chipid
= (in_8(&mp
->mace
->chipid_hi
) << 8) |
166 in_8(&mp
->mace
->chipid_lo
);
169 mp
= (struct mace_data
*) dev
->priv
;
170 mp
->maccc
= ENXMT
| ENRCV
;
172 mp
->tx_dma
= ioremap(macio_resource_start(mdev
, 1), 0x1000);
173 if (mp
->tx_dma
== NULL
) {
174 printk(KERN_ERR
"MACE: can't map TX DMA resources !\n");
178 mp
->tx_dma_intr
= macio_irq(mdev
, 1);
180 mp
->rx_dma
= ioremap(macio_resource_start(mdev
, 2), 0x1000);
181 if (mp
->rx_dma
== NULL
) {
182 printk(KERN_ERR
"MACE: can't map RX DMA resources !\n");
184 goto err_unmap_tx_dma
;
186 mp
->rx_dma_intr
= macio_irq(mdev
, 2);
188 mp
->tx_cmds
= (volatile struct dbdma_cmd
*) DBDMA_ALIGN(mp
+ 1);
189 mp
->rx_cmds
= mp
->tx_cmds
+ NCMDS_TX
* N_TX_RING
+ 1;
191 memset(&mp
->stats
, 0, sizeof(mp
->stats
));
192 memset((char *) mp
->tx_cmds
, 0,
193 (NCMDS_TX
*N_TX_RING
+ N_RX_RING
+ 2) * sizeof(struct dbdma_cmd
));
194 init_timer(&mp
->tx_timeout
);
195 spin_lock_init(&mp
->lock
);
196 mp
->timeout_active
= 0;
199 mp
->port_aaui
= port_aaui
;
201 /* Apple Network Server uses the AAUI port */
202 if (machine_is_compatible("AAPL,ShinerESB"))
205 #ifdef CONFIG_MACE_AAUI_PORT
213 dev
->open
= mace_open
;
214 dev
->stop
= mace_close
;
215 dev
->hard_start_xmit
= mace_xmit_start
;
216 dev
->get_stats
= mace_stats
;
217 dev
->set_multicast_list
= mace_set_multicast
;
218 dev
->set_mac_address
= mace_set_address
;
221 * Most of what is below could be moved to mace_open()
225 rc
= request_irq(dev
->irq
, mace_interrupt
, 0, "MACE", dev
);
227 printk(KERN_ERR
"MACE: can't get irq %d\n", dev
->irq
);
228 goto err_unmap_rx_dma
;
230 rc
= request_irq(mp
->tx_dma_intr
, mace_txdma_intr
, 0, "MACE-txdma", dev
);
232 printk(KERN_ERR
"MACE: can't get irq %d\n", mp
->tx_dma_intr
);
235 rc
= request_irq(mp
->rx_dma_intr
, mace_rxdma_intr
, 0, "MACE-rxdma", dev
);
237 printk(KERN_ERR
"MACE: can't get irq %d\n", mp
->rx_dma_intr
);
238 goto err_free_tx_irq
;
241 rc
= register_netdev(dev
);
243 printk(KERN_ERR
"MACE: Cannot register net device, aborting.\n");
244 goto err_free_rx_irq
;
247 printk(KERN_INFO
"%s: MACE at", dev
->name
);
248 for (j
= 0; j
< 6; ++j
) {
249 printk("%c%.2x", (j
? ':': ' '), dev
->dev_addr
[j
]);
251 printk(", chip revision %d.%d\n", mp
->chipid
>> 8, mp
->chipid
& 0xff);
256 free_irq(macio_irq(mdev
, 2), dev
);
258 free_irq(macio_irq(mdev
, 1), dev
);
260 free_irq(macio_irq(mdev
, 0), dev
);
270 macio_release_resources(mdev
);
275 static int __devexit
mace_remove(struct macio_dev
*mdev
)
277 struct net_device
*dev
= macio_get_drvdata(mdev
);
278 struct mace_data
*mp
;
282 macio_set_drvdata(mdev
, NULL
);
286 unregister_netdev(dev
);
288 free_irq(dev
->irq
, dev
);
289 free_irq(mp
->tx_dma_intr
, dev
);
290 free_irq(mp
->rx_dma_intr
, dev
);
298 macio_release_resources(mdev
);
303 static void dbdma_reset(volatile struct dbdma_regs __iomem
*dma
)
307 out_le32(&dma
->control
, (WAKE
|FLUSH
|PAUSE
|RUN
) << 16);
310 * Yes this looks peculiar, but apparently it needs to be this
311 * way on some machines.
313 for (i
= 200; i
> 0; --i
)
314 if (ld_le32(&dma
->control
) & RUN
)
318 static void mace_reset(struct net_device
*dev
)
320 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
321 volatile struct mace __iomem
*mb
= mp
->mace
;
324 /* soft-reset the chip */
327 out_8(&mb
->biucc
, SWRST
);
328 if (in_8(&mb
->biucc
) & SWRST
) {
335 printk(KERN_ERR
"mace: cannot reset chip!\n");
339 out_8(&mb
->imr
, 0xff); /* disable all intrs for now */
341 out_8(&mb
->maccc
, 0); /* turn off tx, rx */
343 out_8(&mb
->biucc
, XMTSP_64
);
344 out_8(&mb
->utr
, RTRD
);
345 out_8(&mb
->fifocc
, RCVFW_32
| XMTFW_16
| XMTFWU
| RCVFWU
| XMTBRST
);
346 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
); /* auto-pad short frames */
347 out_8(&mb
->rcvfc
, 0);
349 /* load up the hardware address */
350 __mace_set_address(dev
, dev
->dev_addr
);
352 /* clear the multicast filter */
353 if (mp
->chipid
== BROKEN_ADDRCHG_REV
)
354 out_8(&mb
->iac
, LOGADDR
);
356 out_8(&mb
->iac
, ADDRCHG
| LOGADDR
);
357 while ((in_8(&mb
->iac
) & ADDRCHG
) != 0)
360 for (i
= 0; i
< 8; ++i
)
361 out_8(&mb
->ladrf
, 0);
363 /* done changing address */
364 if (mp
->chipid
!= BROKEN_ADDRCHG_REV
)
368 out_8(&mb
->plscc
, PORTSEL_AUI
+ ENPLSIO
);
370 out_8(&mb
->plscc
, PORTSEL_GPSI
+ ENPLSIO
);
373 static void __mace_set_address(struct net_device
*dev
, void *addr
)
375 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
376 volatile struct mace __iomem
*mb
= mp
->mace
;
377 unsigned char *p
= addr
;
380 /* load up the hardware address */
381 if (mp
->chipid
== BROKEN_ADDRCHG_REV
)
382 out_8(&mb
->iac
, PHYADDR
);
384 out_8(&mb
->iac
, ADDRCHG
| PHYADDR
);
385 while ((in_8(&mb
->iac
) & ADDRCHG
) != 0)
388 for (i
= 0; i
< 6; ++i
)
389 out_8(&mb
->padr
, dev
->dev_addr
[i
] = p
[i
]);
390 if (mp
->chipid
!= BROKEN_ADDRCHG_REV
)
394 static int mace_set_address(struct net_device
*dev
, void *addr
)
396 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
397 volatile struct mace __iomem
*mb
= mp
->mace
;
400 spin_lock_irqsave(&mp
->lock
, flags
);
402 __mace_set_address(dev
, addr
);
404 /* note: setting ADDRCHG clears ENRCV */
405 out_8(&mb
->maccc
, mp
->maccc
);
407 spin_unlock_irqrestore(&mp
->lock
, flags
);
411 static inline void mace_clean_rings(struct mace_data
*mp
)
415 /* free some skb's */
416 for (i
= 0; i
< N_RX_RING
; ++i
) {
417 if (mp
->rx_bufs
[i
] != 0) {
418 dev_kfree_skb(mp
->rx_bufs
[i
]);
419 mp
->rx_bufs
[i
] = NULL
;
422 for (i
= mp
->tx_empty
; i
!= mp
->tx_fill
; ) {
423 dev_kfree_skb(mp
->tx_bufs
[i
]);
424 if (++i
>= N_TX_RING
)
429 static int mace_open(struct net_device
*dev
)
431 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
432 volatile struct mace __iomem
*mb
= mp
->mace
;
433 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
434 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
435 volatile struct dbdma_cmd
*cp
;
443 /* initialize list of sk_buffs for receiving and set up recv dma */
444 mace_clean_rings(mp
);
445 memset((char *)mp
->rx_cmds
, 0, N_RX_RING
* sizeof(struct dbdma_cmd
));
447 for (i
= 0; i
< N_RX_RING
- 1; ++i
) {
448 skb
= dev_alloc_skb(RX_BUFLEN
+ 2);
452 skb_reserve(skb
, 2); /* so IP header lands on 4-byte bdry */
455 mp
->rx_bufs
[i
] = skb
;
456 st_le16(&cp
->req_count
, RX_BUFLEN
);
457 st_le16(&cp
->command
, INPUT_LAST
+ INTR_ALWAYS
);
458 st_le32(&cp
->phy_addr
, virt_to_bus(data
));
462 mp
->rx_bufs
[i
] = NULL
;
463 st_le16(&cp
->command
, DBDMA_STOP
);
467 /* Put a branch back to the beginning of the receive command list */
469 st_le16(&cp
->command
, DBDMA_NOP
+ BR_ALWAYS
);
470 st_le32(&cp
->cmd_dep
, virt_to_bus(mp
->rx_cmds
));
473 out_le32(&rd
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) << 16); /* clear run bit */
474 out_le32(&rd
->cmdptr
, virt_to_bus(mp
->rx_cmds
));
475 out_le32(&rd
->control
, (RUN
<< 16) | RUN
);
477 /* put a branch at the end of the tx command list */
478 cp
= mp
->tx_cmds
+ NCMDS_TX
* N_TX_RING
;
479 st_le16(&cp
->command
, DBDMA_NOP
+ BR_ALWAYS
);
480 st_le32(&cp
->cmd_dep
, virt_to_bus(mp
->tx_cmds
));
483 out_le32(&td
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) << 16);
484 out_le32(&td
->cmdptr
, virt_to_bus(mp
->tx_cmds
));
492 out_8(&mb
->maccc
, mp
->maccc
);
493 /* enable all interrupts except receive interrupts */
494 out_8(&mb
->imr
, RCVINT
);
499 static int mace_close(struct net_device
*dev
)
501 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
502 volatile struct mace __iomem
*mb
= mp
->mace
;
503 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
504 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
506 /* disable rx and tx */
507 out_8(&mb
->maccc
, 0);
508 out_8(&mb
->imr
, 0xff); /* disable all intrs */
510 /* disable rx and tx dma */
511 st_le32(&rd
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) << 16); /* clear run bit */
512 st_le32(&td
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) << 16); /* clear run bit */
514 mace_clean_rings(mp
);
519 static inline void mace_set_timeout(struct net_device
*dev
)
521 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
523 if (mp
->timeout_active
)
524 del_timer(&mp
->tx_timeout
);
525 mp
->tx_timeout
.expires
= jiffies
+ TX_TIMEOUT
;
526 mp
->tx_timeout
.function
= mace_tx_timeout
;
527 mp
->tx_timeout
.data
= (unsigned long) dev
;
528 add_timer(&mp
->tx_timeout
);
529 mp
->timeout_active
= 1;
532 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
)
534 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
535 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
536 volatile struct dbdma_cmd
*cp
, *np
;
540 /* see if there's a free slot in the tx ring */
541 spin_lock_irqsave(&mp
->lock
, flags
);
544 if (next
>= N_TX_RING
)
546 if (next
== mp
->tx_empty
) {
547 netif_stop_queue(dev
);
549 spin_unlock_irqrestore(&mp
->lock
, flags
);
550 return 1; /* can't take it at the moment */
552 spin_unlock_irqrestore(&mp
->lock
, flags
);
554 /* partially fill in the dma command block */
556 if (len
> ETH_FRAME_LEN
) {
557 printk(KERN_DEBUG
"mace: xmit frame too long (%d)\n", len
);
560 mp
->tx_bufs
[fill
] = skb
;
561 cp
= mp
->tx_cmds
+ NCMDS_TX
* fill
;
562 st_le16(&cp
->req_count
, len
);
563 st_le32(&cp
->phy_addr
, virt_to_bus(skb
->data
));
565 np
= mp
->tx_cmds
+ NCMDS_TX
* next
;
566 out_le16(&np
->command
, DBDMA_STOP
);
568 /* poke the tx dma channel */
569 spin_lock_irqsave(&mp
->lock
, flags
);
571 if (!mp
->tx_bad_runt
&& mp
->tx_active
< MAX_TX_ACTIVE
) {
572 out_le16(&cp
->xfer_status
, 0);
573 out_le16(&cp
->command
, OUTPUT_LAST
);
574 out_le32(&td
->control
, ((RUN
|WAKE
) << 16) + (RUN
|WAKE
));
576 mace_set_timeout(dev
);
578 if (++next
>= N_TX_RING
)
580 if (next
== mp
->tx_empty
)
581 netif_stop_queue(dev
);
582 spin_unlock_irqrestore(&mp
->lock
, flags
);
587 static struct net_device_stats
*mace_stats(struct net_device
*dev
)
589 struct mace_data
*p
= (struct mace_data
*) dev
->priv
;
594 static void mace_set_multicast(struct net_device
*dev
)
596 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
597 volatile struct mace __iomem
*mb
= mp
->mace
;
602 spin_lock_irqsave(&mp
->lock
, flags
);
604 if (dev
->flags
& IFF_PROMISC
) {
607 unsigned char multicast_filter
[8];
608 struct dev_mc_list
*dmi
= dev
->mc_list
;
610 if (dev
->flags
& IFF_ALLMULTI
) {
611 for (i
= 0; i
< 8; i
++)
612 multicast_filter
[i
] = 0xff;
614 for (i
= 0; i
< 8; i
++)
615 multicast_filter
[i
] = 0;
616 for (i
= 0; i
< dev
->mc_count
; i
++) {
617 crc
= ether_crc_le(6, dmi
->dmi_addr
);
618 j
= crc
>> 26; /* bit number in multicast_filter */
619 multicast_filter
[j
>> 3] |= 1 << (j
& 7);
624 printk("Multicast filter :");
625 for (i
= 0; i
< 8; i
++)
626 printk("%02x ", multicast_filter
[i
]);
630 if (mp
->chipid
== BROKEN_ADDRCHG_REV
)
631 out_8(&mb
->iac
, LOGADDR
);
633 out_8(&mb
->iac
, ADDRCHG
| LOGADDR
);
634 while ((in_8(&mb
->iac
) & ADDRCHG
) != 0)
637 for (i
= 0; i
< 8; ++i
)
638 out_8(&mb
->ladrf
, multicast_filter
[i
]);
639 if (mp
->chipid
!= BROKEN_ADDRCHG_REV
)
643 out_8(&mb
->maccc
, mp
->maccc
);
644 spin_unlock_irqrestore(&mp
->lock
, flags
);
647 static void mace_handle_misc_intrs(struct mace_data
*mp
, int intr
)
649 volatile struct mace __iomem
*mb
= mp
->mace
;
650 static int mace_babbles
, mace_jabbers
;
653 mp
->stats
.rx_missed_errors
+= 256;
654 mp
->stats
.rx_missed_errors
+= in_8(&mb
->mpc
); /* reading clears it */
656 mp
->stats
.rx_length_errors
+= 256;
657 mp
->stats
.rx_length_errors
+= in_8(&mb
->rntpc
); /* reading clears it */
659 ++mp
->stats
.tx_heartbeat_errors
;
661 if (mace_babbles
++ < 4)
662 printk(KERN_DEBUG
"mace: babbling transmitter\n");
664 if (mace_jabbers
++ < 4)
665 printk(KERN_DEBUG
"mace: jabbering transceiver\n");
668 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
)
670 struct net_device
*dev
= (struct net_device
*) dev_id
;
671 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
672 volatile struct mace __iomem
*mb
= mp
->mace
;
673 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
674 volatile struct dbdma_cmd
*cp
;
675 int intr
, fs
, i
, stat
, x
;
678 /* static int mace_last_fs, mace_last_xcount; */
680 spin_lock_irqsave(&mp
->lock
, flags
);
681 intr
= in_8(&mb
->ir
); /* read interrupt register */
682 in_8(&mb
->xmtrc
); /* get retries */
683 mace_handle_misc_intrs(mp
, intr
);
686 while (in_8(&mb
->pr
) & XMTSV
) {
687 del_timer(&mp
->tx_timeout
);
688 mp
->timeout_active
= 0;
690 * Clear any interrupt indication associated with this status
691 * word. This appears to unlatch any error indication from
692 * the DMA controller.
694 intr
= in_8(&mb
->ir
);
696 mace_handle_misc_intrs(mp
, intr
);
697 if (mp
->tx_bad_runt
) {
698 fs
= in_8(&mb
->xmtfs
);
700 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
);
703 dstat
= ld_le32(&td
->status
);
704 /* stop DMA controller */
705 out_le32(&td
->control
, RUN
<< 16);
707 * xcount is the number of complete frames which have been
708 * written to the fifo but for which status has not been read.
710 xcount
= (in_8(&mb
->fifofc
) >> XMTFC_SH
) & XMTFC_MASK
;
711 if (xcount
== 0 || (dstat
& DEAD
)) {
713 * If a packet was aborted before the DMA controller has
714 * finished transferring it, it seems that there are 2 bytes
715 * which are stuck in some buffer somewhere. These will get
716 * transmitted as soon as we read the frame status (which
717 * reenables the transmit data transfer request). Turning
718 * off the DMA controller and/or resetting the MACE doesn't
719 * help. So we disable auto-padding and FCS transmission
720 * so the two bytes will only be a runt packet which should
721 * be ignored by other stations.
723 out_8(&mb
->xmtfc
, DXMTFCS
);
725 fs
= in_8(&mb
->xmtfs
);
726 if ((fs
& XMTSV
) == 0) {
727 printk(KERN_ERR
"mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
731 * XXX mace likes to hang the machine after a xmtfs error.
732 * This is hard to reproduce, reseting *may* help
735 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
;
736 stat
= ld_le16(&cp
->xfer_status
);
737 if ((fs
& (UFLO
|LCOL
|LCAR
|RTRY
)) || (dstat
& DEAD
) || xcount
== 0) {
739 * Check whether there were in fact 2 bytes written to
743 x
= (in_8(&mb
->fifofc
) >> XMTFC_SH
) & XMTFC_MASK
;
745 /* there were two bytes with an end-of-packet indication */
747 mace_set_timeout(dev
);
750 * Either there weren't the two bytes buffered up, or they
751 * didn't have an end-of-packet indication.
752 * We flush the transmit FIFO just in case (by setting the
753 * XMTFWU bit with the transmitter disabled).
755 out_8(&mb
->maccc
, in_8(&mb
->maccc
) & ~ENXMT
);
756 out_8(&mb
->fifocc
, in_8(&mb
->fifocc
) | XMTFWU
);
758 out_8(&mb
->maccc
, in_8(&mb
->maccc
) | ENXMT
);
759 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
);
762 /* dma should have finished */
763 if (i
== mp
->tx_fill
) {
764 printk(KERN_DEBUG
"mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
769 if (fs
& (UFLO
|LCOL
|LCAR
|RTRY
)) {
770 ++mp
->stats
.tx_errors
;
772 ++mp
->stats
.tx_carrier_errors
;
773 if (fs
& (UFLO
|LCOL
|RTRY
))
774 ++mp
->stats
.tx_aborted_errors
;
776 mp
->stats
.tx_bytes
+= mp
->tx_bufs
[i
]->len
;
777 ++mp
->stats
.tx_packets
;
779 dev_kfree_skb_irq(mp
->tx_bufs
[i
]);
781 if (++i
>= N_TX_RING
)
785 mace_last_xcount
= xcount
;
789 if (i
!= mp
->tx_empty
) {
791 netif_wake_queue(dev
);
797 if (!mp
->tx_bad_runt
&& i
!= mp
->tx_fill
&& mp
->tx_active
< MAX_TX_ACTIVE
) {
799 /* set up the next one */
800 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
;
801 out_le16(&cp
->xfer_status
, 0);
802 out_le16(&cp
->command
, OUTPUT_LAST
);
804 if (++i
>= N_TX_RING
)
806 } while (i
!= mp
->tx_fill
&& mp
->tx_active
< MAX_TX_ACTIVE
);
807 out_le32(&td
->control
, ((RUN
|WAKE
) << 16) + (RUN
|WAKE
));
808 mace_set_timeout(dev
);
810 spin_unlock_irqrestore(&mp
->lock
, flags
);
814 static void mace_tx_timeout(unsigned long data
)
816 struct net_device
*dev
= (struct net_device
*) data
;
817 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
818 volatile struct mace __iomem
*mb
= mp
->mace
;
819 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
820 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
821 volatile struct dbdma_cmd
*cp
;
825 spin_lock_irqsave(&mp
->lock
, flags
);
826 mp
->timeout_active
= 0;
827 if (mp
->tx_active
== 0 && !mp
->tx_bad_runt
)
830 /* update various counters */
831 mace_handle_misc_intrs(mp
, in_8(&mb
->ir
));
833 cp
= mp
->tx_cmds
+ NCMDS_TX
* mp
->tx_empty
;
835 /* turn off both tx and rx and reset the chip */
836 out_8(&mb
->maccc
, 0);
837 printk(KERN_ERR
"mace: transmit timeout - resetting\n");
842 cp
= bus_to_virt(ld_le32(&rd
->cmdptr
));
844 out_le16(&cp
->xfer_status
, 0);
845 out_le32(&rd
->cmdptr
, virt_to_bus(cp
));
846 out_le32(&rd
->control
, (RUN
<< 16) | RUN
);
848 /* fix up the transmit side */
851 ++mp
->stats
.tx_errors
;
852 if (mp
->tx_bad_runt
) {
854 } else if (i
!= mp
->tx_fill
) {
855 dev_kfree_skb(mp
->tx_bufs
[i
]);
856 if (++i
>= N_TX_RING
)
861 netif_wake_queue(dev
);
862 if (i
!= mp
->tx_fill
) {
863 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
;
864 out_le16(&cp
->xfer_status
, 0);
865 out_le16(&cp
->command
, OUTPUT_LAST
);
866 out_le32(&td
->cmdptr
, virt_to_bus(cp
));
867 out_le32(&td
->control
, (RUN
<< 16) | RUN
);
869 mace_set_timeout(dev
);
872 /* turn it back on */
873 out_8(&mb
->imr
, RCVINT
);
874 out_8(&mb
->maccc
, mp
->maccc
);
877 spin_unlock_irqrestore(&mp
->lock
, flags
);
880 static irqreturn_t
mace_txdma_intr(int irq
, void *dev_id
)
885 static irqreturn_t
mace_rxdma_intr(int irq
, void *dev_id
)
887 struct net_device
*dev
= (struct net_device
*) dev_id
;
888 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
889 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
890 volatile struct dbdma_cmd
*cp
, *np
;
891 int i
, nb
, stat
, next
;
893 unsigned frame_status
;
894 static int mace_lost_status
;
898 spin_lock_irqsave(&mp
->lock
, flags
);
899 for (i
= mp
->rx_empty
; i
!= mp
->rx_fill
; ) {
900 cp
= mp
->rx_cmds
+ i
;
901 stat
= ld_le16(&cp
->xfer_status
);
902 if ((stat
& ACTIVE
) == 0) {
904 if (next
>= N_RX_RING
)
906 np
= mp
->rx_cmds
+ next
;
907 if (next
!= mp
->rx_fill
908 && (ld_le16(&np
->xfer_status
) & ACTIVE
) != 0) {
909 printk(KERN_DEBUG
"mace: lost a status word\n");
914 nb
= ld_le16(&cp
->req_count
) - ld_le16(&cp
->res_count
);
915 out_le16(&cp
->command
, DBDMA_STOP
);
916 /* got a packet, have a look at it */
917 skb
= mp
->rx_bufs
[i
];
919 ++mp
->stats
.rx_dropped
;
922 frame_status
= (data
[nb
-3] << 8) + data
[nb
-4];
923 if (frame_status
& (RS_OFLO
|RS_CLSN
|RS_FRAMERR
|RS_FCSERR
)) {
924 ++mp
->stats
.rx_errors
;
925 if (frame_status
& RS_OFLO
)
926 ++mp
->stats
.rx_over_errors
;
927 if (frame_status
& RS_FRAMERR
)
928 ++mp
->stats
.rx_frame_errors
;
929 if (frame_status
& RS_FCSERR
)
930 ++mp
->stats
.rx_crc_errors
;
932 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
933 * FCS on frames with 802.3 headers. This means that Ethernet
934 * frames have 8 extra octets at the end, while 802.3 frames
935 * have only 4. We need to correctly account for this. */
936 if (*(unsigned short *)(data
+12) < 1536) /* 802.3 header */
938 else /* Ethernet header; mace includes FCS */
941 skb
->protocol
= eth_type_trans(skb
, dev
);
942 mp
->stats
.rx_bytes
+= skb
->len
;
944 dev
->last_rx
= jiffies
;
945 mp
->rx_bufs
[i
] = NULL
;
946 ++mp
->stats
.rx_packets
;
949 ++mp
->stats
.rx_errors
;
950 ++mp
->stats
.rx_length_errors
;
953 /* advance to next */
954 if (++i
>= N_RX_RING
)
962 if (next
>= N_RX_RING
)
964 if (next
== mp
->rx_empty
)
966 cp
= mp
->rx_cmds
+ i
;
967 skb
= mp
->rx_bufs
[i
];
969 skb
= dev_alloc_skb(RX_BUFLEN
+ 2);
972 mp
->rx_bufs
[i
] = skb
;
975 st_le16(&cp
->req_count
, RX_BUFLEN
);
976 data
= skb
? skb
->data
: dummy_buf
;
977 st_le32(&cp
->phy_addr
, virt_to_bus(data
));
978 out_le16(&cp
->xfer_status
, 0);
979 out_le16(&cp
->command
, INPUT_LAST
+ INTR_ALWAYS
);
981 if ((ld_le32(&rd
->status
) & ACTIVE
) != 0) {
982 out_le32(&rd
->control
, (PAUSE
<< 16) | PAUSE
);
983 while ((in_le32(&rd
->status
) & ACTIVE
) != 0)
989 if (i
!= mp
->rx_fill
) {
990 out_le32(&rd
->control
, ((RUN
|WAKE
) << 16) | (RUN
|WAKE
));
993 spin_unlock_irqrestore(&mp
->lock
, flags
);
997 static struct of_device_id mace_match
[] =
1004 MODULE_DEVICE_TABLE (of
, mace_match
);
1006 static struct macio_driver mace_driver
=
1009 .match_table
= mace_match
,
1010 .probe
= mace_probe
,
1011 .remove
= mace_remove
,
1015 static int __init
mace_init(void)
1017 return macio_register_driver(&mace_driver
);
1020 static void __exit
mace_cleanup(void)
1022 macio_unregister_driver(&mace_driver
);
1028 MODULE_AUTHOR("Paul Mackerras");
1029 MODULE_DESCRIPTION("PowerMac MACE driver.");
1030 module_param(port_aaui
, int, 0);
1031 MODULE_PARM_DESC(port_aaui
, "MACE uses AAUI port (0-1)");
1032 MODULE_LICENSE("GPL");
1034 module_init(mace_init
);
1035 module_exit(mace_cleanup
);