]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Network device driver for the MACE ethernet controller on | |
3 | * Apple Powermacs. Assumes it's under a DBDMA controller. | |
4 | * | |
5 | * Copyright (C) 1996 Paul Mackerras. | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/kernel.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/etherdevice.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/timer.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/crc32.h> | |
17 | #include <linux/spinlock.h> | |
bc63eb9c | 18 | #include <linux/bitrev.h> |
1da177e4 LT |
19 | #include <asm/prom.h> |
20 | #include <asm/dbdma.h> | |
21 | #include <asm/io.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/macio.h> | |
24 | ||
25 | #include "mace.h" | |
26 | ||
27 | static int port_aaui = -1; | |
28 | ||
29 | #define N_RX_RING 8 | |
30 | #define N_TX_RING 6 | |
31 | #define MAX_TX_ACTIVE 1 | |
32 | #define NCMDS_TX 1 /* dma commands per element in tx ring */ | |
33 | #define RX_BUFLEN (ETH_FRAME_LEN + 8) | |
34 | #define TX_TIMEOUT HZ /* 1 second */ | |
35 | ||
36 | /* Chip rev needs workaround on HW & multicast addr change */ | |
37 | #define BROKEN_ADDRCHG_REV 0x0941 | |
38 | ||
39 | /* Bits in transmit DMA status */ | |
40 | #define TX_DMA_ERR 0x80 | |
41 | ||
42 | struct mace_data { | |
43 | volatile struct mace __iomem *mace; | |
44 | volatile struct dbdma_regs __iomem *tx_dma; | |
45 | int tx_dma_intr; | |
46 | volatile struct dbdma_regs __iomem *rx_dma; | |
47 | int rx_dma_intr; | |
48 | volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ | |
49 | volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ | |
50 | struct sk_buff *rx_bufs[N_RX_RING]; | |
51 | int rx_fill; | |
52 | int rx_empty; | |
53 | struct sk_buff *tx_bufs[N_TX_RING]; | |
54 | int tx_fill; | |
55 | int tx_empty; | |
56 | unsigned char maccc; | |
57 | unsigned char tx_fullup; | |
58 | unsigned char tx_active; | |
59 | unsigned char tx_bad_runt; | |
1da177e4 LT |
60 | struct timer_list tx_timeout; |
61 | int timeout_active; | |
62 | int port_aaui; | |
63 | int chipid; | |
64 | struct macio_dev *mdev; | |
65 | spinlock_t lock; | |
66 | }; | |
67 | ||
68 | /* | |
69 | * Number of bytes of private data per MACE: allow enough for | |
70 | * the rx and tx dma commands plus a branch dma command each, | |
71 | * and another 16 bytes to allow us to align the dma command | |
72 | * buffers on a 16 byte boundary. | |
73 | */ | |
74 | #define PRIV_BYTES (sizeof(struct mace_data) \ | |
75 | + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) | |
76 | ||
1da177e4 LT |
77 | static int mace_open(struct net_device *dev); |
78 | static int mace_close(struct net_device *dev); | |
79 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | |
1da177e4 LT |
80 | static void mace_set_multicast(struct net_device *dev); |
81 | static void mace_reset(struct net_device *dev); | |
82 | static int mace_set_address(struct net_device *dev, void *addr); | |
7d12e780 DH |
83 | static irqreturn_t mace_interrupt(int irq, void *dev_id); |
84 | static irqreturn_t mace_txdma_intr(int irq, void *dev_id); | |
85 | static irqreturn_t mace_rxdma_intr(int irq, void *dev_id); | |
1da177e4 LT |
86 | static void mace_set_timeout(struct net_device *dev); |
87 | static void mace_tx_timeout(unsigned long data); | |
88 | static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); | |
89 | static inline void mace_clean_rings(struct mace_data *mp); | |
90 | static void __mace_set_address(struct net_device *dev, void *addr); | |
91 | ||
92 | /* | |
93 | * If we can't get a skbuff when we need it, we use this area for DMA. | |
94 | */ | |
95 | static unsigned char *dummy_buf; | |
96 | ||
5e655772 | 97 | static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) |
1da177e4 LT |
98 | { |
99 | struct device_node *mace = macio_get_of_node(mdev); | |
100 | struct net_device *dev; | |
101 | struct mace_data *mp; | |
1a2509c9 | 102 | const unsigned char *addr; |
1da177e4 LT |
103 | int j, rev, rc = -EBUSY; |
104 | ||
105 | if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { | |
106 | printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", | |
107 | mace->full_name); | |
108 | return -ENODEV; | |
109 | } | |
110 | ||
40cd3a45 | 111 | addr = of_get_property(mace, "mac-address", NULL); |
1da177e4 | 112 | if (addr == NULL) { |
40cd3a45 | 113 | addr = of_get_property(mace, "local-mac-address", NULL); |
1da177e4 LT |
114 | if (addr == NULL) { |
115 | printk(KERN_ERR "Can't get mac-address for MACE %s\n", | |
116 | mace->full_name); | |
117 | return -ENODEV; | |
118 | } | |
119 | } | |
120 | ||
121 | /* | |
122 | * lazy allocate the driver-wide dummy buffer. (Note that we | |
123 | * never have more than one MACE in the system anyway) | |
124 | */ | |
125 | if (dummy_buf == NULL) { | |
126 | dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); | |
127 | if (dummy_buf == NULL) { | |
128 | printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n"); | |
129 | return -ENOMEM; | |
130 | } | |
131 | } | |
132 | ||
133 | if (macio_request_resources(mdev, "mace")) { | |
134 | printk(KERN_ERR "MACE: can't request IO resources !\n"); | |
135 | return -EBUSY; | |
136 | } | |
137 | ||
138 | dev = alloc_etherdev(PRIV_BYTES); | |
139 | if (!dev) { | |
140 | printk(KERN_ERR "MACE: can't allocate ethernet device !\n"); | |
141 | rc = -ENOMEM; | |
142 | goto err_release; | |
143 | } | |
1da177e4 LT |
144 | SET_NETDEV_DEV(dev, &mdev->ofdev.dev); |
145 | ||
146 | mp = dev->priv; | |
147 | mp->mdev = mdev; | |
148 | macio_set_drvdata(mdev, dev); | |
149 | ||
150 | dev->base_addr = macio_resource_start(mdev, 0); | |
151 | mp->mace = ioremap(dev->base_addr, 0x1000); | |
152 | if (mp->mace == NULL) { | |
153 | printk(KERN_ERR "MACE: can't map IO resources !\n"); | |
154 | rc = -ENOMEM; | |
155 | goto err_free; | |
156 | } | |
157 | dev->irq = macio_irq(mdev, 0); | |
158 | ||
159 | rev = addr[0] == 0 && addr[1] == 0xA0; | |
160 | for (j = 0; j < 6; ++j) { | |
bc63eb9c | 161 | dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; |
1da177e4 LT |
162 | } |
163 | mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | | |
164 | in_8(&mp->mace->chipid_lo); | |
6aa20a22 | 165 | |
1da177e4 LT |
166 | |
167 | mp = (struct mace_data *) dev->priv; | |
168 | mp->maccc = ENXMT | ENRCV; | |
169 | ||
170 | mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); | |
171 | if (mp->tx_dma == NULL) { | |
172 | printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); | |
173 | rc = -ENOMEM; | |
174 | goto err_unmap_io; | |
175 | } | |
176 | mp->tx_dma_intr = macio_irq(mdev, 1); | |
177 | ||
178 | mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); | |
179 | if (mp->rx_dma == NULL) { | |
180 | printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); | |
181 | rc = -ENOMEM; | |
182 | goto err_unmap_tx_dma; | |
183 | } | |
184 | mp->rx_dma_intr = macio_irq(mdev, 2); | |
185 | ||
186 | mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); | |
187 | mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; | |
188 | ||
1da177e4 LT |
189 | memset((char *) mp->tx_cmds, 0, |
190 | (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); | |
191 | init_timer(&mp->tx_timeout); | |
192 | spin_lock_init(&mp->lock); | |
193 | mp->timeout_active = 0; | |
194 | ||
195 | if (port_aaui >= 0) | |
196 | mp->port_aaui = port_aaui; | |
197 | else { | |
198 | /* Apple Network Server uses the AAUI port */ | |
199 | if (machine_is_compatible("AAPL,ShinerESB")) | |
200 | mp->port_aaui = 1; | |
201 | else { | |
202 | #ifdef CONFIG_MACE_AAUI_PORT | |
203 | mp->port_aaui = 1; | |
204 | #else | |
205 | mp->port_aaui = 0; | |
6aa20a22 | 206 | #endif |
1da177e4 LT |
207 | } |
208 | } | |
209 | ||
210 | dev->open = mace_open; | |
211 | dev->stop = mace_close; | |
212 | dev->hard_start_xmit = mace_xmit_start; | |
1da177e4 LT |
213 | dev->set_multicast_list = mace_set_multicast; |
214 | dev->set_mac_address = mace_set_address; | |
215 | ||
216 | /* | |
217 | * Most of what is below could be moved to mace_open() | |
218 | */ | |
219 | mace_reset(dev); | |
220 | ||
221 | rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); | |
222 | if (rc) { | |
223 | printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); | |
224 | goto err_unmap_rx_dma; | |
225 | } | |
226 | rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); | |
227 | if (rc) { | |
0ebfff14 | 228 | printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); |
1da177e4 LT |
229 | goto err_free_irq; |
230 | } | |
231 | rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); | |
232 | if (rc) { | |
0ebfff14 | 233 | printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); |
1da177e4 LT |
234 | goto err_free_tx_irq; |
235 | } | |
236 | ||
237 | rc = register_netdev(dev); | |
238 | if (rc) { | |
239 | printk(KERN_ERR "MACE: Cannot register net device, aborting.\n"); | |
240 | goto err_free_rx_irq; | |
241 | } | |
242 | ||
e174961c JB |
243 | printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n", |
244 | dev->name, dev->dev_addr, | |
0795af57 | 245 | mp->chipid >> 8, mp->chipid & 0xff); |
1da177e4 LT |
246 | |
247 | return 0; | |
6aa20a22 | 248 | |
1da177e4 LT |
249 | err_free_rx_irq: |
250 | free_irq(macio_irq(mdev, 2), dev); | |
251 | err_free_tx_irq: | |
252 | free_irq(macio_irq(mdev, 1), dev); | |
253 | err_free_irq: | |
254 | free_irq(macio_irq(mdev, 0), dev); | |
255 | err_unmap_rx_dma: | |
256 | iounmap(mp->rx_dma); | |
257 | err_unmap_tx_dma: | |
258 | iounmap(mp->tx_dma); | |
259 | err_unmap_io: | |
260 | iounmap(mp->mace); | |
261 | err_free: | |
262 | free_netdev(dev); | |
263 | err_release: | |
264 | macio_release_resources(mdev); | |
265 | ||
266 | return rc; | |
267 | } | |
268 | ||
269 | static int __devexit mace_remove(struct macio_dev *mdev) | |
270 | { | |
271 | struct net_device *dev = macio_get_drvdata(mdev); | |
272 | struct mace_data *mp; | |
273 | ||
274 | BUG_ON(dev == NULL); | |
275 | ||
276 | macio_set_drvdata(mdev, NULL); | |
277 | ||
278 | mp = dev->priv; | |
279 | ||
280 | unregister_netdev(dev); | |
281 | ||
282 | free_irq(dev->irq, dev); | |
283 | free_irq(mp->tx_dma_intr, dev); | |
284 | free_irq(mp->rx_dma_intr, dev); | |
285 | ||
286 | iounmap(mp->rx_dma); | |
287 | iounmap(mp->tx_dma); | |
288 | iounmap(mp->mace); | |
289 | ||
290 | free_netdev(dev); | |
291 | ||
292 | macio_release_resources(mdev); | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
297 | static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) | |
298 | { | |
299 | int i; | |
300 | ||
301 | out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); | |
302 | ||
303 | /* | |
304 | * Yes this looks peculiar, but apparently it needs to be this | |
305 | * way on some machines. | |
306 | */ | |
307 | for (i = 200; i > 0; --i) | |
308 | if (ld_le32(&dma->control) & RUN) | |
309 | udelay(1); | |
310 | } | |
311 | ||
312 | static void mace_reset(struct net_device *dev) | |
313 | { | |
314 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
315 | volatile struct mace __iomem *mb = mp->mace; | |
316 | int i; | |
317 | ||
318 | /* soft-reset the chip */ | |
319 | i = 200; | |
320 | while (--i) { | |
321 | out_8(&mb->biucc, SWRST); | |
322 | if (in_8(&mb->biucc) & SWRST) { | |
323 | udelay(10); | |
324 | continue; | |
325 | } | |
326 | break; | |
327 | } | |
328 | if (!i) { | |
329 | printk(KERN_ERR "mace: cannot reset chip!\n"); | |
330 | return; | |
331 | } | |
332 | ||
333 | out_8(&mb->imr, 0xff); /* disable all intrs for now */ | |
334 | i = in_8(&mb->ir); | |
335 | out_8(&mb->maccc, 0); /* turn off tx, rx */ | |
336 | ||
337 | out_8(&mb->biucc, XMTSP_64); | |
338 | out_8(&mb->utr, RTRD); | |
339 | out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); | |
340 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ | |
341 | out_8(&mb->rcvfc, 0); | |
342 | ||
343 | /* load up the hardware address */ | |
344 | __mace_set_address(dev, dev->dev_addr); | |
345 | ||
346 | /* clear the multicast filter */ | |
347 | if (mp->chipid == BROKEN_ADDRCHG_REV) | |
348 | out_8(&mb->iac, LOGADDR); | |
349 | else { | |
350 | out_8(&mb->iac, ADDRCHG | LOGADDR); | |
351 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | |
352 | ; | |
353 | } | |
354 | for (i = 0; i < 8; ++i) | |
355 | out_8(&mb->ladrf, 0); | |
356 | ||
357 | /* done changing address */ | |
358 | if (mp->chipid != BROKEN_ADDRCHG_REV) | |
359 | out_8(&mb->iac, 0); | |
360 | ||
361 | if (mp->port_aaui) | |
362 | out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); | |
363 | else | |
364 | out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); | |
365 | } | |
366 | ||
367 | static void __mace_set_address(struct net_device *dev, void *addr) | |
368 | { | |
369 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
370 | volatile struct mace __iomem *mb = mp->mace; | |
371 | unsigned char *p = addr; | |
372 | int i; | |
373 | ||
374 | /* load up the hardware address */ | |
375 | if (mp->chipid == BROKEN_ADDRCHG_REV) | |
376 | out_8(&mb->iac, PHYADDR); | |
377 | else { | |
378 | out_8(&mb->iac, ADDRCHG | PHYADDR); | |
379 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | |
380 | ; | |
381 | } | |
382 | for (i = 0; i < 6; ++i) | |
383 | out_8(&mb->padr, dev->dev_addr[i] = p[i]); | |
384 | if (mp->chipid != BROKEN_ADDRCHG_REV) | |
385 | out_8(&mb->iac, 0); | |
386 | } | |
387 | ||
388 | static int mace_set_address(struct net_device *dev, void *addr) | |
389 | { | |
390 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
391 | volatile struct mace __iomem *mb = mp->mace; | |
392 | unsigned long flags; | |
393 | ||
394 | spin_lock_irqsave(&mp->lock, flags); | |
395 | ||
396 | __mace_set_address(dev, addr); | |
397 | ||
398 | /* note: setting ADDRCHG clears ENRCV */ | |
399 | out_8(&mb->maccc, mp->maccc); | |
400 | ||
401 | spin_unlock_irqrestore(&mp->lock, flags); | |
402 | return 0; | |
403 | } | |
404 | ||
405 | static inline void mace_clean_rings(struct mace_data *mp) | |
406 | { | |
407 | int i; | |
408 | ||
409 | /* free some skb's */ | |
410 | for (i = 0; i < N_RX_RING; ++i) { | |
79ea13ce | 411 | if (mp->rx_bufs[i] != NULL) { |
1da177e4 LT |
412 | dev_kfree_skb(mp->rx_bufs[i]); |
413 | mp->rx_bufs[i] = NULL; | |
414 | } | |
415 | } | |
416 | for (i = mp->tx_empty; i != mp->tx_fill; ) { | |
417 | dev_kfree_skb(mp->tx_bufs[i]); | |
418 | if (++i >= N_TX_RING) | |
419 | i = 0; | |
420 | } | |
421 | } | |
422 | ||
423 | static int mace_open(struct net_device *dev) | |
424 | { | |
425 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
426 | volatile struct mace __iomem *mb = mp->mace; | |
427 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
428 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
429 | volatile struct dbdma_cmd *cp; | |
430 | int i; | |
431 | struct sk_buff *skb; | |
432 | unsigned char *data; | |
433 | ||
434 | /* reset the chip */ | |
435 | mace_reset(dev); | |
436 | ||
437 | /* initialize list of sk_buffs for receiving and set up recv dma */ | |
438 | mace_clean_rings(mp); | |
439 | memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); | |
440 | cp = mp->rx_cmds; | |
441 | for (i = 0; i < N_RX_RING - 1; ++i) { | |
442 | skb = dev_alloc_skb(RX_BUFLEN + 2); | |
79ea13ce | 443 | if (!skb) { |
1da177e4 LT |
444 | data = dummy_buf; |
445 | } else { | |
446 | skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ | |
447 | data = skb->data; | |
448 | } | |
449 | mp->rx_bufs[i] = skb; | |
450 | st_le16(&cp->req_count, RX_BUFLEN); | |
451 | st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); | |
452 | st_le32(&cp->phy_addr, virt_to_bus(data)); | |
453 | cp->xfer_status = 0; | |
454 | ++cp; | |
455 | } | |
456 | mp->rx_bufs[i] = NULL; | |
457 | st_le16(&cp->command, DBDMA_STOP); | |
458 | mp->rx_fill = i; | |
459 | mp->rx_empty = 0; | |
460 | ||
461 | /* Put a branch back to the beginning of the receive command list */ | |
462 | ++cp; | |
463 | st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); | |
464 | st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds)); | |
465 | ||
466 | /* start rx dma */ | |
467 | out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | |
468 | out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); | |
469 | out_le32(&rd->control, (RUN << 16) | RUN); | |
470 | ||
471 | /* put a branch at the end of the tx command list */ | |
472 | cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; | |
473 | st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); | |
474 | st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds)); | |
475 | ||
476 | /* reset tx dma */ | |
477 | out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); | |
478 | out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); | |
479 | mp->tx_fill = 0; | |
480 | mp->tx_empty = 0; | |
481 | mp->tx_fullup = 0; | |
482 | mp->tx_active = 0; | |
483 | mp->tx_bad_runt = 0; | |
484 | ||
485 | /* turn it on! */ | |
486 | out_8(&mb->maccc, mp->maccc); | |
487 | /* enable all interrupts except receive interrupts */ | |
488 | out_8(&mb->imr, RCVINT); | |
489 | ||
490 | return 0; | |
491 | } | |
492 | ||
493 | static int mace_close(struct net_device *dev) | |
494 | { | |
495 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
496 | volatile struct mace __iomem *mb = mp->mace; | |
497 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
498 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
499 | ||
500 | /* disable rx and tx */ | |
501 | out_8(&mb->maccc, 0); | |
502 | out_8(&mb->imr, 0xff); /* disable all intrs */ | |
503 | ||
504 | /* disable rx and tx dma */ | |
505 | st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | |
506 | st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | |
507 | ||
508 | mace_clean_rings(mp); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static inline void mace_set_timeout(struct net_device *dev) | |
514 | { | |
515 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
516 | ||
517 | if (mp->timeout_active) | |
518 | del_timer(&mp->tx_timeout); | |
519 | mp->tx_timeout.expires = jiffies + TX_TIMEOUT; | |
520 | mp->tx_timeout.function = mace_tx_timeout; | |
521 | mp->tx_timeout.data = (unsigned long) dev; | |
522 | add_timer(&mp->tx_timeout); | |
523 | mp->timeout_active = 1; | |
524 | } | |
525 | ||
526 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |
527 | { | |
528 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
529 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
530 | volatile struct dbdma_cmd *cp, *np; | |
531 | unsigned long flags; | |
532 | int fill, next, len; | |
533 | ||
534 | /* see if there's a free slot in the tx ring */ | |
535 | spin_lock_irqsave(&mp->lock, flags); | |
536 | fill = mp->tx_fill; | |
537 | next = fill + 1; | |
538 | if (next >= N_TX_RING) | |
539 | next = 0; | |
540 | if (next == mp->tx_empty) { | |
541 | netif_stop_queue(dev); | |
542 | mp->tx_fullup = 1; | |
543 | spin_unlock_irqrestore(&mp->lock, flags); | |
544 | return 1; /* can't take it at the moment */ | |
545 | } | |
546 | spin_unlock_irqrestore(&mp->lock, flags); | |
547 | ||
548 | /* partially fill in the dma command block */ | |
549 | len = skb->len; | |
550 | if (len > ETH_FRAME_LEN) { | |
551 | printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len); | |
552 | len = ETH_FRAME_LEN; | |
553 | } | |
554 | mp->tx_bufs[fill] = skb; | |
555 | cp = mp->tx_cmds + NCMDS_TX * fill; | |
556 | st_le16(&cp->req_count, len); | |
557 | st_le32(&cp->phy_addr, virt_to_bus(skb->data)); | |
558 | ||
559 | np = mp->tx_cmds + NCMDS_TX * next; | |
560 | out_le16(&np->command, DBDMA_STOP); | |
561 | ||
562 | /* poke the tx dma channel */ | |
563 | spin_lock_irqsave(&mp->lock, flags); | |
564 | mp->tx_fill = next; | |
565 | if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { | |
566 | out_le16(&cp->xfer_status, 0); | |
567 | out_le16(&cp->command, OUTPUT_LAST); | |
568 | out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); | |
569 | ++mp->tx_active; | |
570 | mace_set_timeout(dev); | |
571 | } | |
572 | if (++next >= N_TX_RING) | |
573 | next = 0; | |
574 | if (next == mp->tx_empty) | |
575 | netif_stop_queue(dev); | |
576 | spin_unlock_irqrestore(&mp->lock, flags); | |
577 | ||
578 | return 0; | |
579 | } | |
580 | ||
1da177e4 LT |
581 | static void mace_set_multicast(struct net_device *dev) |
582 | { | |
583 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
584 | volatile struct mace __iomem *mb = mp->mace; | |
585 | int i, j; | |
586 | u32 crc; | |
587 | unsigned long flags; | |
588 | ||
589 | spin_lock_irqsave(&mp->lock, flags); | |
590 | mp->maccc &= ~PROM; | |
591 | if (dev->flags & IFF_PROMISC) { | |
592 | mp->maccc |= PROM; | |
593 | } else { | |
594 | unsigned char multicast_filter[8]; | |
595 | struct dev_mc_list *dmi = dev->mc_list; | |
596 | ||
597 | if (dev->flags & IFF_ALLMULTI) { | |
598 | for (i = 0; i < 8; i++) | |
599 | multicast_filter[i] = 0xff; | |
600 | } else { | |
601 | for (i = 0; i < 8; i++) | |
602 | multicast_filter[i] = 0; | |
603 | for (i = 0; i < dev->mc_count; i++) { | |
604 | crc = ether_crc_le(6, dmi->dmi_addr); | |
605 | j = crc >> 26; /* bit number in multicast_filter */ | |
606 | multicast_filter[j >> 3] |= 1 << (j & 7); | |
607 | dmi = dmi->next; | |
608 | } | |
609 | } | |
610 | #if 0 | |
611 | printk("Multicast filter :"); | |
612 | for (i = 0; i < 8; i++) | |
613 | printk("%02x ", multicast_filter[i]); | |
614 | printk("\n"); | |
615 | #endif | |
616 | ||
617 | if (mp->chipid == BROKEN_ADDRCHG_REV) | |
618 | out_8(&mb->iac, LOGADDR); | |
619 | else { | |
620 | out_8(&mb->iac, ADDRCHG | LOGADDR); | |
621 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | |
622 | ; | |
623 | } | |
624 | for (i = 0; i < 8; ++i) | |
625 | out_8(&mb->ladrf, multicast_filter[i]); | |
626 | if (mp->chipid != BROKEN_ADDRCHG_REV) | |
627 | out_8(&mb->iac, 0); | |
628 | } | |
629 | /* reset maccc */ | |
630 | out_8(&mb->maccc, mp->maccc); | |
631 | spin_unlock_irqrestore(&mp->lock, flags); | |
632 | } | |
633 | ||
09f75cd7 | 634 | static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) |
1da177e4 LT |
635 | { |
636 | volatile struct mace __iomem *mb = mp->mace; | |
637 | static int mace_babbles, mace_jabbers; | |
638 | ||
639 | if (intr & MPCO) | |
09f75cd7 JG |
640 | dev->stats.rx_missed_errors += 256; |
641 | dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ | |
1da177e4 | 642 | if (intr & RNTPCO) |
09f75cd7 JG |
643 | dev->stats.rx_length_errors += 256; |
644 | dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ | |
1da177e4 | 645 | if (intr & CERR) |
09f75cd7 | 646 | ++dev->stats.tx_heartbeat_errors; |
1da177e4 LT |
647 | if (intr & BABBLE) |
648 | if (mace_babbles++ < 4) | |
649 | printk(KERN_DEBUG "mace: babbling transmitter\n"); | |
650 | if (intr & JABBER) | |
651 | if (mace_jabbers++ < 4) | |
652 | printk(KERN_DEBUG "mace: jabbering transceiver\n"); | |
653 | } | |
654 | ||
7d12e780 | 655 | static irqreturn_t mace_interrupt(int irq, void *dev_id) |
1da177e4 LT |
656 | { |
657 | struct net_device *dev = (struct net_device *) dev_id; | |
658 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
659 | volatile struct mace __iomem *mb = mp->mace; | |
660 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
661 | volatile struct dbdma_cmd *cp; | |
662 | int intr, fs, i, stat, x; | |
663 | int xcount, dstat; | |
664 | unsigned long flags; | |
665 | /* static int mace_last_fs, mace_last_xcount; */ | |
666 | ||
667 | spin_lock_irqsave(&mp->lock, flags); | |
668 | intr = in_8(&mb->ir); /* read interrupt register */ | |
669 | in_8(&mb->xmtrc); /* get retries */ | |
09f75cd7 | 670 | mace_handle_misc_intrs(mp, intr, dev); |
1da177e4 LT |
671 | |
672 | i = mp->tx_empty; | |
673 | while (in_8(&mb->pr) & XMTSV) { | |
674 | del_timer(&mp->tx_timeout); | |
675 | mp->timeout_active = 0; | |
676 | /* | |
677 | * Clear any interrupt indication associated with this status | |
678 | * word. This appears to unlatch any error indication from | |
679 | * the DMA controller. | |
680 | */ | |
681 | intr = in_8(&mb->ir); | |
682 | if (intr != 0) | |
09f75cd7 | 683 | mace_handle_misc_intrs(mp, intr, dev); |
1da177e4 LT |
684 | if (mp->tx_bad_runt) { |
685 | fs = in_8(&mb->xmtfs); | |
686 | mp->tx_bad_runt = 0; | |
687 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); | |
688 | continue; | |
689 | } | |
690 | dstat = ld_le32(&td->status); | |
691 | /* stop DMA controller */ | |
692 | out_le32(&td->control, RUN << 16); | |
693 | /* | |
694 | * xcount is the number of complete frames which have been | |
695 | * written to the fifo but for which status has not been read. | |
696 | */ | |
697 | xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; | |
698 | if (xcount == 0 || (dstat & DEAD)) { | |
699 | /* | |
700 | * If a packet was aborted before the DMA controller has | |
701 | * finished transferring it, it seems that there are 2 bytes | |
702 | * which are stuck in some buffer somewhere. These will get | |
703 | * transmitted as soon as we read the frame status (which | |
704 | * reenables the transmit data transfer request). Turning | |
705 | * off the DMA controller and/or resetting the MACE doesn't | |
706 | * help. So we disable auto-padding and FCS transmission | |
707 | * so the two bytes will only be a runt packet which should | |
708 | * be ignored by other stations. | |
709 | */ | |
710 | out_8(&mb->xmtfc, DXMTFCS); | |
711 | } | |
712 | fs = in_8(&mb->xmtfs); | |
713 | if ((fs & XMTSV) == 0) { | |
714 | printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", | |
715 | fs, xcount, dstat); | |
716 | mace_reset(dev); | |
717 | /* | |
718 | * XXX mace likes to hang the machine after a xmtfs error. | |
719 | * This is hard to reproduce, reseting *may* help | |
720 | */ | |
721 | } | |
722 | cp = mp->tx_cmds + NCMDS_TX * i; | |
723 | stat = ld_le16(&cp->xfer_status); | |
724 | if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { | |
725 | /* | |
726 | * Check whether there were in fact 2 bytes written to | |
727 | * the transmit FIFO. | |
728 | */ | |
729 | udelay(1); | |
730 | x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; | |
731 | if (x != 0) { | |
732 | /* there were two bytes with an end-of-packet indication */ | |
733 | mp->tx_bad_runt = 1; | |
734 | mace_set_timeout(dev); | |
735 | } else { | |
736 | /* | |
737 | * Either there weren't the two bytes buffered up, or they | |
738 | * didn't have an end-of-packet indication. | |
739 | * We flush the transmit FIFO just in case (by setting the | |
740 | * XMTFWU bit with the transmitter disabled). | |
741 | */ | |
742 | out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); | |
743 | out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); | |
744 | udelay(1); | |
745 | out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); | |
746 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); | |
747 | } | |
748 | } | |
749 | /* dma should have finished */ | |
750 | if (i == mp->tx_fill) { | |
751 | printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", | |
752 | fs, xcount, dstat); | |
753 | continue; | |
754 | } | |
755 | /* Update stats */ | |
756 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { | |
09f75cd7 | 757 | ++dev->stats.tx_errors; |
1da177e4 | 758 | if (fs & LCAR) |
09f75cd7 | 759 | ++dev->stats.tx_carrier_errors; |
1da177e4 | 760 | if (fs & (UFLO|LCOL|RTRY)) |
09f75cd7 | 761 | ++dev->stats.tx_aborted_errors; |
1da177e4 | 762 | } else { |
09f75cd7 JG |
763 | dev->stats.tx_bytes += mp->tx_bufs[i]->len; |
764 | ++dev->stats.tx_packets; | |
1da177e4 LT |
765 | } |
766 | dev_kfree_skb_irq(mp->tx_bufs[i]); | |
767 | --mp->tx_active; | |
768 | if (++i >= N_TX_RING) | |
769 | i = 0; | |
770 | #if 0 | |
771 | mace_last_fs = fs; | |
772 | mace_last_xcount = xcount; | |
773 | #endif | |
774 | } | |
775 | ||
776 | if (i != mp->tx_empty) { | |
777 | mp->tx_fullup = 0; | |
778 | netif_wake_queue(dev); | |
779 | } | |
780 | mp->tx_empty = i; | |
781 | i += mp->tx_active; | |
782 | if (i >= N_TX_RING) | |
783 | i -= N_TX_RING; | |
784 | if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { | |
785 | do { | |
786 | /* set up the next one */ | |
787 | cp = mp->tx_cmds + NCMDS_TX * i; | |
788 | out_le16(&cp->xfer_status, 0); | |
789 | out_le16(&cp->command, OUTPUT_LAST); | |
790 | ++mp->tx_active; | |
791 | if (++i >= N_TX_RING) | |
792 | i = 0; | |
793 | } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); | |
794 | out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); | |
795 | mace_set_timeout(dev); | |
796 | } | |
797 | spin_unlock_irqrestore(&mp->lock, flags); | |
798 | return IRQ_HANDLED; | |
799 | } | |
800 | ||
801 | static void mace_tx_timeout(unsigned long data) | |
802 | { | |
803 | struct net_device *dev = (struct net_device *) data; | |
804 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
805 | volatile struct mace __iomem *mb = mp->mace; | |
806 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
807 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
808 | volatile struct dbdma_cmd *cp; | |
809 | unsigned long flags; | |
810 | int i; | |
811 | ||
812 | spin_lock_irqsave(&mp->lock, flags); | |
813 | mp->timeout_active = 0; | |
814 | if (mp->tx_active == 0 && !mp->tx_bad_runt) | |
815 | goto out; | |
816 | ||
817 | /* update various counters */ | |
09f75cd7 | 818 | mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); |
1da177e4 LT |
819 | |
820 | cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; | |
821 | ||
822 | /* turn off both tx and rx and reset the chip */ | |
823 | out_8(&mb->maccc, 0); | |
824 | printk(KERN_ERR "mace: transmit timeout - resetting\n"); | |
825 | dbdma_reset(td); | |
826 | mace_reset(dev); | |
827 | ||
828 | /* restart rx dma */ | |
829 | cp = bus_to_virt(ld_le32(&rd->cmdptr)); | |
830 | dbdma_reset(rd); | |
831 | out_le16(&cp->xfer_status, 0); | |
832 | out_le32(&rd->cmdptr, virt_to_bus(cp)); | |
833 | out_le32(&rd->control, (RUN << 16) | RUN); | |
834 | ||
835 | /* fix up the transmit side */ | |
836 | i = mp->tx_empty; | |
837 | mp->tx_active = 0; | |
09f75cd7 | 838 | ++dev->stats.tx_errors; |
1da177e4 LT |
839 | if (mp->tx_bad_runt) { |
840 | mp->tx_bad_runt = 0; | |
841 | } else if (i != mp->tx_fill) { | |
842 | dev_kfree_skb(mp->tx_bufs[i]); | |
843 | if (++i >= N_TX_RING) | |
844 | i = 0; | |
845 | mp->tx_empty = i; | |
846 | } | |
847 | mp->tx_fullup = 0; | |
848 | netif_wake_queue(dev); | |
849 | if (i != mp->tx_fill) { | |
850 | cp = mp->tx_cmds + NCMDS_TX * i; | |
851 | out_le16(&cp->xfer_status, 0); | |
852 | out_le16(&cp->command, OUTPUT_LAST); | |
853 | out_le32(&td->cmdptr, virt_to_bus(cp)); | |
854 | out_le32(&td->control, (RUN << 16) | RUN); | |
855 | ++mp->tx_active; | |
856 | mace_set_timeout(dev); | |
857 | } | |
858 | ||
859 | /* turn it back on */ | |
860 | out_8(&mb->imr, RCVINT); | |
861 | out_8(&mb->maccc, mp->maccc); | |
862 | ||
863 | out: | |
864 | spin_unlock_irqrestore(&mp->lock, flags); | |
865 | } | |
866 | ||
7d12e780 | 867 | static irqreturn_t mace_txdma_intr(int irq, void *dev_id) |
1da177e4 LT |
868 | { |
869 | return IRQ_HANDLED; | |
870 | } | |
871 | ||
7d12e780 | 872 | static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) |
1da177e4 LT |
873 | { |
874 | struct net_device *dev = (struct net_device *) dev_id; | |
875 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
876 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
877 | volatile struct dbdma_cmd *cp, *np; | |
878 | int i, nb, stat, next; | |
879 | struct sk_buff *skb; | |
880 | unsigned frame_status; | |
881 | static int mace_lost_status; | |
882 | unsigned char *data; | |
883 | unsigned long flags; | |
884 | ||
885 | spin_lock_irqsave(&mp->lock, flags); | |
886 | for (i = mp->rx_empty; i != mp->rx_fill; ) { | |
887 | cp = mp->rx_cmds + i; | |
888 | stat = ld_le16(&cp->xfer_status); | |
889 | if ((stat & ACTIVE) == 0) { | |
890 | next = i + 1; | |
891 | if (next >= N_RX_RING) | |
892 | next = 0; | |
893 | np = mp->rx_cmds + next; | |
894 | if (next != mp->rx_fill | |
895 | && (ld_le16(&np->xfer_status) & ACTIVE) != 0) { | |
896 | printk(KERN_DEBUG "mace: lost a status word\n"); | |
897 | ++mace_lost_status; | |
898 | } else | |
899 | break; | |
900 | } | |
901 | nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count); | |
902 | out_le16(&cp->command, DBDMA_STOP); | |
903 | /* got a packet, have a look at it */ | |
904 | skb = mp->rx_bufs[i]; | |
79ea13ce | 905 | if (!skb) { |
09f75cd7 | 906 | ++dev->stats.rx_dropped; |
1da177e4 LT |
907 | } else if (nb > 8) { |
908 | data = skb->data; | |
909 | frame_status = (data[nb-3] << 8) + data[nb-4]; | |
910 | if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { | |
09f75cd7 | 911 | ++dev->stats.rx_errors; |
1da177e4 | 912 | if (frame_status & RS_OFLO) |
09f75cd7 | 913 | ++dev->stats.rx_over_errors; |
1da177e4 | 914 | if (frame_status & RS_FRAMERR) |
09f75cd7 | 915 | ++dev->stats.rx_frame_errors; |
1da177e4 | 916 | if (frame_status & RS_FCSERR) |
09f75cd7 | 917 | ++dev->stats.rx_crc_errors; |
1da177e4 LT |
918 | } else { |
919 | /* Mace feature AUTO_STRIP_RCV is on by default, dropping the | |
920 | * FCS on frames with 802.3 headers. This means that Ethernet | |
921 | * frames have 8 extra octets at the end, while 802.3 frames | |
922 | * have only 4. We need to correctly account for this. */ | |
923 | if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */ | |
924 | nb -= 4; | |
925 | else /* Ethernet header; mace includes FCS */ | |
926 | nb -= 8; | |
927 | skb_put(skb, nb); | |
1da177e4 | 928 | skb->protocol = eth_type_trans(skb, dev); |
09f75cd7 | 929 | dev->stats.rx_bytes += skb->len; |
1da177e4 | 930 | netif_rx(skb); |
1da177e4 | 931 | mp->rx_bufs[i] = NULL; |
09f75cd7 | 932 | ++dev->stats.rx_packets; |
1da177e4 LT |
933 | } |
934 | } else { | |
09f75cd7 JG |
935 | ++dev->stats.rx_errors; |
936 | ++dev->stats.rx_length_errors; | |
1da177e4 LT |
937 | } |
938 | ||
939 | /* advance to next */ | |
940 | if (++i >= N_RX_RING) | |
941 | i = 0; | |
942 | } | |
943 | mp->rx_empty = i; | |
944 | ||
945 | i = mp->rx_fill; | |
946 | for (;;) { | |
947 | next = i + 1; | |
948 | if (next >= N_RX_RING) | |
949 | next = 0; | |
950 | if (next == mp->rx_empty) | |
951 | break; | |
952 | cp = mp->rx_cmds + i; | |
953 | skb = mp->rx_bufs[i]; | |
79ea13ce | 954 | if (!skb) { |
1da177e4 | 955 | skb = dev_alloc_skb(RX_BUFLEN + 2); |
79ea13ce | 956 | if (skb) { |
1da177e4 LT |
957 | skb_reserve(skb, 2); |
958 | mp->rx_bufs[i] = skb; | |
959 | } | |
960 | } | |
961 | st_le16(&cp->req_count, RX_BUFLEN); | |
962 | data = skb? skb->data: dummy_buf; | |
963 | st_le32(&cp->phy_addr, virt_to_bus(data)); | |
964 | out_le16(&cp->xfer_status, 0); | |
965 | out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); | |
966 | #if 0 | |
967 | if ((ld_le32(&rd->status) & ACTIVE) != 0) { | |
968 | out_le32(&rd->control, (PAUSE << 16) | PAUSE); | |
969 | while ((in_le32(&rd->status) & ACTIVE) != 0) | |
970 | ; | |
971 | } | |
972 | #endif | |
973 | i = next; | |
974 | } | |
975 | if (i != mp->rx_fill) { | |
976 | out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); | |
977 | mp->rx_fill = i; | |
978 | } | |
979 | spin_unlock_irqrestore(&mp->lock, flags); | |
980 | return IRQ_HANDLED; | |
981 | } | |
982 | ||
6aa20a22 | 983 | static struct of_device_id mace_match[] = |
1da177e4 LT |
984 | { |
985 | { | |
986 | .name = "mace", | |
1da177e4 LT |
987 | }, |
988 | {}, | |
989 | }; | |
8c9795ba | 990 | MODULE_DEVICE_TABLE (of, mace_match); |
1da177e4 | 991 | |
6aa20a22 | 992 | static struct macio_driver mace_driver = |
1da177e4 LT |
993 | { |
994 | .name = "mace", | |
995 | .match_table = mace_match, | |
996 | .probe = mace_probe, | |
997 | .remove = mace_remove, | |
998 | }; | |
999 | ||
1000 | ||
1001 | static int __init mace_init(void) | |
1002 | { | |
1003 | return macio_register_driver(&mace_driver); | |
1004 | } | |
1005 | ||
1006 | static void __exit mace_cleanup(void) | |
1007 | { | |
1008 | macio_unregister_driver(&mace_driver); | |
1009 | ||
b4558ea9 JJ |
1010 | kfree(dummy_buf); |
1011 | dummy_buf = NULL; | |
1da177e4 LT |
1012 | } |
1013 | ||
1014 | MODULE_AUTHOR("Paul Mackerras"); | |
1015 | MODULE_DESCRIPTION("PowerMac MACE driver."); | |
8d3b33f6 | 1016 | module_param(port_aaui, int, 0); |
1da177e4 LT |
1017 | MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)"); |
1018 | MODULE_LICENSE("GPL"); | |
1019 | ||
1020 | module_init(mace_init); | |
1021 | module_exit(mace_cleanup); |