]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Network device driver for the MACE ethernet controller on | |
3 | * Apple Powermacs. Assumes it's under a DBDMA controller. | |
4 | * | |
5 | * Copyright (C) 1996 Paul Mackerras. | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/kernel.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/etherdevice.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/timer.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/crc32.h> | |
17 | #include <linux/spinlock.h> | |
bc63eb9c | 18 | #include <linux/bitrev.h> |
1da177e4 LT |
19 | #include <asm/prom.h> |
20 | #include <asm/dbdma.h> | |
21 | #include <asm/io.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/macio.h> | |
24 | ||
25 | #include "mace.h" | |
26 | ||
27 | static int port_aaui = -1; | |
28 | ||
29 | #define N_RX_RING 8 | |
30 | #define N_TX_RING 6 | |
31 | #define MAX_TX_ACTIVE 1 | |
32 | #define NCMDS_TX 1 /* dma commands per element in tx ring */ | |
33 | #define RX_BUFLEN (ETH_FRAME_LEN + 8) | |
34 | #define TX_TIMEOUT HZ /* 1 second */ | |
35 | ||
36 | /* Chip rev needs workaround on HW & multicast addr change */ | |
37 | #define BROKEN_ADDRCHG_REV 0x0941 | |
38 | ||
39 | /* Bits in transmit DMA status */ | |
40 | #define TX_DMA_ERR 0x80 | |
41 | ||
42 | struct mace_data { | |
43 | volatile struct mace __iomem *mace; | |
44 | volatile struct dbdma_regs __iomem *tx_dma; | |
45 | int tx_dma_intr; | |
46 | volatile struct dbdma_regs __iomem *rx_dma; | |
47 | int rx_dma_intr; | |
48 | volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ | |
49 | volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ | |
50 | struct sk_buff *rx_bufs[N_RX_RING]; | |
51 | int rx_fill; | |
52 | int rx_empty; | |
53 | struct sk_buff *tx_bufs[N_TX_RING]; | |
54 | int tx_fill; | |
55 | int tx_empty; | |
56 | unsigned char maccc; | |
57 | unsigned char tx_fullup; | |
58 | unsigned char tx_active; | |
59 | unsigned char tx_bad_runt; | |
1da177e4 LT |
60 | struct timer_list tx_timeout; |
61 | int timeout_active; | |
62 | int port_aaui; | |
63 | int chipid; | |
64 | struct macio_dev *mdev; | |
65 | spinlock_t lock; | |
66 | }; | |
67 | ||
68 | /* | |
69 | * Number of bytes of private data per MACE: allow enough for | |
70 | * the rx and tx dma commands plus a branch dma command each, | |
71 | * and another 16 bytes to allow us to align the dma command | |
72 | * buffers on a 16 byte boundary. | |
73 | */ | |
74 | #define PRIV_BYTES (sizeof(struct mace_data) \ | |
75 | + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) | |
76 | ||
1da177e4 LT |
77 | static int mace_open(struct net_device *dev); |
78 | static int mace_close(struct net_device *dev); | |
79 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | |
1da177e4 LT |
80 | static void mace_set_multicast(struct net_device *dev); |
81 | static void mace_reset(struct net_device *dev); | |
82 | static int mace_set_address(struct net_device *dev, void *addr); | |
7d12e780 DH |
83 | static irqreturn_t mace_interrupt(int irq, void *dev_id); |
84 | static irqreturn_t mace_txdma_intr(int irq, void *dev_id); | |
85 | static irqreturn_t mace_rxdma_intr(int irq, void *dev_id); | |
1da177e4 LT |
86 | static void mace_set_timeout(struct net_device *dev); |
87 | static void mace_tx_timeout(unsigned long data); | |
88 | static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); | |
89 | static inline void mace_clean_rings(struct mace_data *mp); | |
90 | static void __mace_set_address(struct net_device *dev, void *addr); | |
91 | ||
92 | /* | |
93 | * If we can't get a skbuff when we need it, we use this area for DMA. | |
94 | */ | |
95 | static unsigned char *dummy_buf; | |
96 | ||
5e655772 | 97 | static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) |
1da177e4 LT |
98 | { |
99 | struct device_node *mace = macio_get_of_node(mdev); | |
100 | struct net_device *dev; | |
101 | struct mace_data *mp; | |
1a2509c9 | 102 | const unsigned char *addr; |
1da177e4 LT |
103 | int j, rev, rc = -EBUSY; |
104 | ||
105 | if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { | |
106 | printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", | |
107 | mace->full_name); | |
108 | return -ENODEV; | |
109 | } | |
110 | ||
40cd3a45 | 111 | addr = of_get_property(mace, "mac-address", NULL); |
1da177e4 | 112 | if (addr == NULL) { |
40cd3a45 | 113 | addr = of_get_property(mace, "local-mac-address", NULL); |
1da177e4 LT |
114 | if (addr == NULL) { |
115 | printk(KERN_ERR "Can't get mac-address for MACE %s\n", | |
116 | mace->full_name); | |
117 | return -ENODEV; | |
118 | } | |
119 | } | |
120 | ||
121 | /* | |
122 | * lazy allocate the driver-wide dummy buffer. (Note that we | |
123 | * never have more than one MACE in the system anyway) | |
124 | */ | |
125 | if (dummy_buf == NULL) { | |
126 | dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); | |
127 | if (dummy_buf == NULL) { | |
128 | printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n"); | |
129 | return -ENOMEM; | |
130 | } | |
131 | } | |
132 | ||
133 | if (macio_request_resources(mdev, "mace")) { | |
134 | printk(KERN_ERR "MACE: can't request IO resources !\n"); | |
135 | return -EBUSY; | |
136 | } | |
137 | ||
138 | dev = alloc_etherdev(PRIV_BYTES); | |
139 | if (!dev) { | |
140 | printk(KERN_ERR "MACE: can't allocate ethernet device !\n"); | |
141 | rc = -ENOMEM; | |
142 | goto err_release; | |
143 | } | |
1da177e4 LT |
144 | SET_NETDEV_DEV(dev, &mdev->ofdev.dev); |
145 | ||
146 | mp = dev->priv; | |
147 | mp->mdev = mdev; | |
148 | macio_set_drvdata(mdev, dev); | |
149 | ||
150 | dev->base_addr = macio_resource_start(mdev, 0); | |
151 | mp->mace = ioremap(dev->base_addr, 0x1000); | |
152 | if (mp->mace == NULL) { | |
153 | printk(KERN_ERR "MACE: can't map IO resources !\n"); | |
154 | rc = -ENOMEM; | |
155 | goto err_free; | |
156 | } | |
157 | dev->irq = macio_irq(mdev, 0); | |
158 | ||
159 | rev = addr[0] == 0 && addr[1] == 0xA0; | |
160 | for (j = 0; j < 6; ++j) { | |
bc63eb9c | 161 | dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; |
1da177e4 LT |
162 | } |
163 | mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | | |
164 | in_8(&mp->mace->chipid_lo); | |
6aa20a22 | 165 | |
1da177e4 LT |
166 | |
167 | mp = (struct mace_data *) dev->priv; | |
168 | mp->maccc = ENXMT | ENRCV; | |
169 | ||
170 | mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); | |
171 | if (mp->tx_dma == NULL) { | |
172 | printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); | |
173 | rc = -ENOMEM; | |
174 | goto err_unmap_io; | |
175 | } | |
176 | mp->tx_dma_intr = macio_irq(mdev, 1); | |
177 | ||
178 | mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); | |
179 | if (mp->rx_dma == NULL) { | |
180 | printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); | |
181 | rc = -ENOMEM; | |
182 | goto err_unmap_tx_dma; | |
183 | } | |
184 | mp->rx_dma_intr = macio_irq(mdev, 2); | |
185 | ||
186 | mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); | |
187 | mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; | |
188 | ||
1da177e4 LT |
189 | memset((char *) mp->tx_cmds, 0, |
190 | (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); | |
191 | init_timer(&mp->tx_timeout); | |
192 | spin_lock_init(&mp->lock); | |
193 | mp->timeout_active = 0; | |
194 | ||
195 | if (port_aaui >= 0) | |
196 | mp->port_aaui = port_aaui; | |
197 | else { | |
198 | /* Apple Network Server uses the AAUI port */ | |
199 | if (machine_is_compatible("AAPL,ShinerESB")) | |
200 | mp->port_aaui = 1; | |
201 | else { | |
202 | #ifdef CONFIG_MACE_AAUI_PORT | |
203 | mp->port_aaui = 1; | |
204 | #else | |
205 | mp->port_aaui = 0; | |
6aa20a22 | 206 | #endif |
1da177e4 LT |
207 | } |
208 | } | |
209 | ||
210 | dev->open = mace_open; | |
211 | dev->stop = mace_close; | |
212 | dev->hard_start_xmit = mace_xmit_start; | |
1da177e4 LT |
213 | dev->set_multicast_list = mace_set_multicast; |
214 | dev->set_mac_address = mace_set_address; | |
215 | ||
216 | /* | |
217 | * Most of what is below could be moved to mace_open() | |
218 | */ | |
219 | mace_reset(dev); | |
220 | ||
221 | rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); | |
222 | if (rc) { | |
223 | printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); | |
224 | goto err_unmap_rx_dma; | |
225 | } | |
226 | rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); | |
227 | if (rc) { | |
0ebfff14 | 228 | printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); |
1da177e4 LT |
229 | goto err_free_irq; |
230 | } | |
231 | rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); | |
232 | if (rc) { | |
0ebfff14 | 233 | printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); |
1da177e4 LT |
234 | goto err_free_tx_irq; |
235 | } | |
236 | ||
237 | rc = register_netdev(dev); | |
238 | if (rc) { | |
239 | printk(KERN_ERR "MACE: Cannot register net device, aborting.\n"); | |
240 | goto err_free_rx_irq; | |
241 | } | |
242 | ||
243 | printk(KERN_INFO "%s: MACE at", dev->name); | |
244 | for (j = 0; j < 6; ++j) { | |
245 | printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); | |
246 | } | |
247 | printk(", chip revision %d.%d\n", mp->chipid >> 8, mp->chipid & 0xff); | |
248 | ||
249 | return 0; | |
6aa20a22 | 250 | |
1da177e4 LT |
251 | err_free_rx_irq: |
252 | free_irq(macio_irq(mdev, 2), dev); | |
253 | err_free_tx_irq: | |
254 | free_irq(macio_irq(mdev, 1), dev); | |
255 | err_free_irq: | |
256 | free_irq(macio_irq(mdev, 0), dev); | |
257 | err_unmap_rx_dma: | |
258 | iounmap(mp->rx_dma); | |
259 | err_unmap_tx_dma: | |
260 | iounmap(mp->tx_dma); | |
261 | err_unmap_io: | |
262 | iounmap(mp->mace); | |
263 | err_free: | |
264 | free_netdev(dev); | |
265 | err_release: | |
266 | macio_release_resources(mdev); | |
267 | ||
268 | return rc; | |
269 | } | |
270 | ||
271 | static int __devexit mace_remove(struct macio_dev *mdev) | |
272 | { | |
273 | struct net_device *dev = macio_get_drvdata(mdev); | |
274 | struct mace_data *mp; | |
275 | ||
276 | BUG_ON(dev == NULL); | |
277 | ||
278 | macio_set_drvdata(mdev, NULL); | |
279 | ||
280 | mp = dev->priv; | |
281 | ||
282 | unregister_netdev(dev); | |
283 | ||
284 | free_irq(dev->irq, dev); | |
285 | free_irq(mp->tx_dma_intr, dev); | |
286 | free_irq(mp->rx_dma_intr, dev); | |
287 | ||
288 | iounmap(mp->rx_dma); | |
289 | iounmap(mp->tx_dma); | |
290 | iounmap(mp->mace); | |
291 | ||
292 | free_netdev(dev); | |
293 | ||
294 | macio_release_resources(mdev); | |
295 | ||
296 | return 0; | |
297 | } | |
298 | ||
299 | static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) | |
300 | { | |
301 | int i; | |
302 | ||
303 | out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); | |
304 | ||
305 | /* | |
306 | * Yes this looks peculiar, but apparently it needs to be this | |
307 | * way on some machines. | |
308 | */ | |
309 | for (i = 200; i > 0; --i) | |
310 | if (ld_le32(&dma->control) & RUN) | |
311 | udelay(1); | |
312 | } | |
313 | ||
314 | static void mace_reset(struct net_device *dev) | |
315 | { | |
316 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
317 | volatile struct mace __iomem *mb = mp->mace; | |
318 | int i; | |
319 | ||
320 | /* soft-reset the chip */ | |
321 | i = 200; | |
322 | while (--i) { | |
323 | out_8(&mb->biucc, SWRST); | |
324 | if (in_8(&mb->biucc) & SWRST) { | |
325 | udelay(10); | |
326 | continue; | |
327 | } | |
328 | break; | |
329 | } | |
330 | if (!i) { | |
331 | printk(KERN_ERR "mace: cannot reset chip!\n"); | |
332 | return; | |
333 | } | |
334 | ||
335 | out_8(&mb->imr, 0xff); /* disable all intrs for now */ | |
336 | i = in_8(&mb->ir); | |
337 | out_8(&mb->maccc, 0); /* turn off tx, rx */ | |
338 | ||
339 | out_8(&mb->biucc, XMTSP_64); | |
340 | out_8(&mb->utr, RTRD); | |
341 | out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); | |
342 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ | |
343 | out_8(&mb->rcvfc, 0); | |
344 | ||
345 | /* load up the hardware address */ | |
346 | __mace_set_address(dev, dev->dev_addr); | |
347 | ||
348 | /* clear the multicast filter */ | |
349 | if (mp->chipid == BROKEN_ADDRCHG_REV) | |
350 | out_8(&mb->iac, LOGADDR); | |
351 | else { | |
352 | out_8(&mb->iac, ADDRCHG | LOGADDR); | |
353 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | |
354 | ; | |
355 | } | |
356 | for (i = 0; i < 8; ++i) | |
357 | out_8(&mb->ladrf, 0); | |
358 | ||
359 | /* done changing address */ | |
360 | if (mp->chipid != BROKEN_ADDRCHG_REV) | |
361 | out_8(&mb->iac, 0); | |
362 | ||
363 | if (mp->port_aaui) | |
364 | out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); | |
365 | else | |
366 | out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); | |
367 | } | |
368 | ||
369 | static void __mace_set_address(struct net_device *dev, void *addr) | |
370 | { | |
371 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
372 | volatile struct mace __iomem *mb = mp->mace; | |
373 | unsigned char *p = addr; | |
374 | int i; | |
375 | ||
376 | /* load up the hardware address */ | |
377 | if (mp->chipid == BROKEN_ADDRCHG_REV) | |
378 | out_8(&mb->iac, PHYADDR); | |
379 | else { | |
380 | out_8(&mb->iac, ADDRCHG | PHYADDR); | |
381 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | |
382 | ; | |
383 | } | |
384 | for (i = 0; i < 6; ++i) | |
385 | out_8(&mb->padr, dev->dev_addr[i] = p[i]); | |
386 | if (mp->chipid != BROKEN_ADDRCHG_REV) | |
387 | out_8(&mb->iac, 0); | |
388 | } | |
389 | ||
390 | static int mace_set_address(struct net_device *dev, void *addr) | |
391 | { | |
392 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
393 | volatile struct mace __iomem *mb = mp->mace; | |
394 | unsigned long flags; | |
395 | ||
396 | spin_lock_irqsave(&mp->lock, flags); | |
397 | ||
398 | __mace_set_address(dev, addr); | |
399 | ||
400 | /* note: setting ADDRCHG clears ENRCV */ | |
401 | out_8(&mb->maccc, mp->maccc); | |
402 | ||
403 | spin_unlock_irqrestore(&mp->lock, flags); | |
404 | return 0; | |
405 | } | |
406 | ||
407 | static inline void mace_clean_rings(struct mace_data *mp) | |
408 | { | |
409 | int i; | |
410 | ||
411 | /* free some skb's */ | |
412 | for (i = 0; i < N_RX_RING; ++i) { | |
413 | if (mp->rx_bufs[i] != 0) { | |
414 | dev_kfree_skb(mp->rx_bufs[i]); | |
415 | mp->rx_bufs[i] = NULL; | |
416 | } | |
417 | } | |
418 | for (i = mp->tx_empty; i != mp->tx_fill; ) { | |
419 | dev_kfree_skb(mp->tx_bufs[i]); | |
420 | if (++i >= N_TX_RING) | |
421 | i = 0; | |
422 | } | |
423 | } | |
424 | ||
425 | static int mace_open(struct net_device *dev) | |
426 | { | |
427 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
428 | volatile struct mace __iomem *mb = mp->mace; | |
429 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
430 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
431 | volatile struct dbdma_cmd *cp; | |
432 | int i; | |
433 | struct sk_buff *skb; | |
434 | unsigned char *data; | |
435 | ||
436 | /* reset the chip */ | |
437 | mace_reset(dev); | |
438 | ||
439 | /* initialize list of sk_buffs for receiving and set up recv dma */ | |
440 | mace_clean_rings(mp); | |
441 | memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); | |
442 | cp = mp->rx_cmds; | |
443 | for (i = 0; i < N_RX_RING - 1; ++i) { | |
444 | skb = dev_alloc_skb(RX_BUFLEN + 2); | |
445 | if (skb == 0) { | |
446 | data = dummy_buf; | |
447 | } else { | |
448 | skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ | |
449 | data = skb->data; | |
450 | } | |
451 | mp->rx_bufs[i] = skb; | |
452 | st_le16(&cp->req_count, RX_BUFLEN); | |
453 | st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); | |
454 | st_le32(&cp->phy_addr, virt_to_bus(data)); | |
455 | cp->xfer_status = 0; | |
456 | ++cp; | |
457 | } | |
458 | mp->rx_bufs[i] = NULL; | |
459 | st_le16(&cp->command, DBDMA_STOP); | |
460 | mp->rx_fill = i; | |
461 | mp->rx_empty = 0; | |
462 | ||
463 | /* Put a branch back to the beginning of the receive command list */ | |
464 | ++cp; | |
465 | st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); | |
466 | st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds)); | |
467 | ||
468 | /* start rx dma */ | |
469 | out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | |
470 | out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); | |
471 | out_le32(&rd->control, (RUN << 16) | RUN); | |
472 | ||
473 | /* put a branch at the end of the tx command list */ | |
474 | cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; | |
475 | st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); | |
476 | st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds)); | |
477 | ||
478 | /* reset tx dma */ | |
479 | out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); | |
480 | out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); | |
481 | mp->tx_fill = 0; | |
482 | mp->tx_empty = 0; | |
483 | mp->tx_fullup = 0; | |
484 | mp->tx_active = 0; | |
485 | mp->tx_bad_runt = 0; | |
486 | ||
487 | /* turn it on! */ | |
488 | out_8(&mb->maccc, mp->maccc); | |
489 | /* enable all interrupts except receive interrupts */ | |
490 | out_8(&mb->imr, RCVINT); | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
495 | static int mace_close(struct net_device *dev) | |
496 | { | |
497 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
498 | volatile struct mace __iomem *mb = mp->mace; | |
499 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
500 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
501 | ||
502 | /* disable rx and tx */ | |
503 | out_8(&mb->maccc, 0); | |
504 | out_8(&mb->imr, 0xff); /* disable all intrs */ | |
505 | ||
506 | /* disable rx and tx dma */ | |
507 | st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | |
508 | st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | |
509 | ||
510 | mace_clean_rings(mp); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | static inline void mace_set_timeout(struct net_device *dev) | |
516 | { | |
517 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
518 | ||
519 | if (mp->timeout_active) | |
520 | del_timer(&mp->tx_timeout); | |
521 | mp->tx_timeout.expires = jiffies + TX_TIMEOUT; | |
522 | mp->tx_timeout.function = mace_tx_timeout; | |
523 | mp->tx_timeout.data = (unsigned long) dev; | |
524 | add_timer(&mp->tx_timeout); | |
525 | mp->timeout_active = 1; | |
526 | } | |
527 | ||
528 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |
529 | { | |
530 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
532 | volatile struct dbdma_cmd *cp, *np; | |
533 | unsigned long flags; | |
534 | int fill, next, len; | |
535 | ||
536 | /* see if there's a free slot in the tx ring */ | |
537 | spin_lock_irqsave(&mp->lock, flags); | |
538 | fill = mp->tx_fill; | |
539 | next = fill + 1; | |
540 | if (next >= N_TX_RING) | |
541 | next = 0; | |
542 | if (next == mp->tx_empty) { | |
543 | netif_stop_queue(dev); | |
544 | mp->tx_fullup = 1; | |
545 | spin_unlock_irqrestore(&mp->lock, flags); | |
546 | return 1; /* can't take it at the moment */ | |
547 | } | |
548 | spin_unlock_irqrestore(&mp->lock, flags); | |
549 | ||
550 | /* partially fill in the dma command block */ | |
551 | len = skb->len; | |
552 | if (len > ETH_FRAME_LEN) { | |
553 | printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len); | |
554 | len = ETH_FRAME_LEN; | |
555 | } | |
556 | mp->tx_bufs[fill] = skb; | |
557 | cp = mp->tx_cmds + NCMDS_TX * fill; | |
558 | st_le16(&cp->req_count, len); | |
559 | st_le32(&cp->phy_addr, virt_to_bus(skb->data)); | |
560 | ||
561 | np = mp->tx_cmds + NCMDS_TX * next; | |
562 | out_le16(&np->command, DBDMA_STOP); | |
563 | ||
564 | /* poke the tx dma channel */ | |
565 | spin_lock_irqsave(&mp->lock, flags); | |
566 | mp->tx_fill = next; | |
567 | if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { | |
568 | out_le16(&cp->xfer_status, 0); | |
569 | out_le16(&cp->command, OUTPUT_LAST); | |
570 | out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); | |
571 | ++mp->tx_active; | |
572 | mace_set_timeout(dev); | |
573 | } | |
574 | if (++next >= N_TX_RING) | |
575 | next = 0; | |
576 | if (next == mp->tx_empty) | |
577 | netif_stop_queue(dev); | |
578 | spin_unlock_irqrestore(&mp->lock, flags); | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
1da177e4 LT |
583 | static void mace_set_multicast(struct net_device *dev) |
584 | { | |
585 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
586 | volatile struct mace __iomem *mb = mp->mace; | |
587 | int i, j; | |
588 | u32 crc; | |
589 | unsigned long flags; | |
590 | ||
591 | spin_lock_irqsave(&mp->lock, flags); | |
592 | mp->maccc &= ~PROM; | |
593 | if (dev->flags & IFF_PROMISC) { | |
594 | mp->maccc |= PROM; | |
595 | } else { | |
596 | unsigned char multicast_filter[8]; | |
597 | struct dev_mc_list *dmi = dev->mc_list; | |
598 | ||
599 | if (dev->flags & IFF_ALLMULTI) { | |
600 | for (i = 0; i < 8; i++) | |
601 | multicast_filter[i] = 0xff; | |
602 | } else { | |
603 | for (i = 0; i < 8; i++) | |
604 | multicast_filter[i] = 0; | |
605 | for (i = 0; i < dev->mc_count; i++) { | |
606 | crc = ether_crc_le(6, dmi->dmi_addr); | |
607 | j = crc >> 26; /* bit number in multicast_filter */ | |
608 | multicast_filter[j >> 3] |= 1 << (j & 7); | |
609 | dmi = dmi->next; | |
610 | } | |
611 | } | |
612 | #if 0 | |
613 | printk("Multicast filter :"); | |
614 | for (i = 0; i < 8; i++) | |
615 | printk("%02x ", multicast_filter[i]); | |
616 | printk("\n"); | |
617 | #endif | |
618 | ||
619 | if (mp->chipid == BROKEN_ADDRCHG_REV) | |
620 | out_8(&mb->iac, LOGADDR); | |
621 | else { | |
622 | out_8(&mb->iac, ADDRCHG | LOGADDR); | |
623 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | |
624 | ; | |
625 | } | |
626 | for (i = 0; i < 8; ++i) | |
627 | out_8(&mb->ladrf, multicast_filter[i]); | |
628 | if (mp->chipid != BROKEN_ADDRCHG_REV) | |
629 | out_8(&mb->iac, 0); | |
630 | } | |
631 | /* reset maccc */ | |
632 | out_8(&mb->maccc, mp->maccc); | |
633 | spin_unlock_irqrestore(&mp->lock, flags); | |
634 | } | |
635 | ||
09f75cd7 | 636 | static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) |
1da177e4 LT |
637 | { |
638 | volatile struct mace __iomem *mb = mp->mace; | |
639 | static int mace_babbles, mace_jabbers; | |
640 | ||
641 | if (intr & MPCO) | |
09f75cd7 JG |
642 | dev->stats.rx_missed_errors += 256; |
643 | dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ | |
1da177e4 | 644 | if (intr & RNTPCO) |
09f75cd7 JG |
645 | dev->stats.rx_length_errors += 256; |
646 | dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ | |
1da177e4 | 647 | if (intr & CERR) |
09f75cd7 | 648 | ++dev->stats.tx_heartbeat_errors; |
1da177e4 LT |
649 | if (intr & BABBLE) |
650 | if (mace_babbles++ < 4) | |
651 | printk(KERN_DEBUG "mace: babbling transmitter\n"); | |
652 | if (intr & JABBER) | |
653 | if (mace_jabbers++ < 4) | |
654 | printk(KERN_DEBUG "mace: jabbering transceiver\n"); | |
655 | } | |
656 | ||
7d12e780 | 657 | static irqreturn_t mace_interrupt(int irq, void *dev_id) |
1da177e4 LT |
658 | { |
659 | struct net_device *dev = (struct net_device *) dev_id; | |
660 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
661 | volatile struct mace __iomem *mb = mp->mace; | |
662 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
663 | volatile struct dbdma_cmd *cp; | |
664 | int intr, fs, i, stat, x; | |
665 | int xcount, dstat; | |
666 | unsigned long flags; | |
667 | /* static int mace_last_fs, mace_last_xcount; */ | |
668 | ||
669 | spin_lock_irqsave(&mp->lock, flags); | |
670 | intr = in_8(&mb->ir); /* read interrupt register */ | |
671 | in_8(&mb->xmtrc); /* get retries */ | |
09f75cd7 | 672 | mace_handle_misc_intrs(mp, intr, dev); |
1da177e4 LT |
673 | |
674 | i = mp->tx_empty; | |
675 | while (in_8(&mb->pr) & XMTSV) { | |
676 | del_timer(&mp->tx_timeout); | |
677 | mp->timeout_active = 0; | |
678 | /* | |
679 | * Clear any interrupt indication associated with this status | |
680 | * word. This appears to unlatch any error indication from | |
681 | * the DMA controller. | |
682 | */ | |
683 | intr = in_8(&mb->ir); | |
684 | if (intr != 0) | |
09f75cd7 | 685 | mace_handle_misc_intrs(mp, intr, dev); |
1da177e4 LT |
686 | if (mp->tx_bad_runt) { |
687 | fs = in_8(&mb->xmtfs); | |
688 | mp->tx_bad_runt = 0; | |
689 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); | |
690 | continue; | |
691 | } | |
692 | dstat = ld_le32(&td->status); | |
693 | /* stop DMA controller */ | |
694 | out_le32(&td->control, RUN << 16); | |
695 | /* | |
696 | * xcount is the number of complete frames which have been | |
697 | * written to the fifo but for which status has not been read. | |
698 | */ | |
699 | xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; | |
700 | if (xcount == 0 || (dstat & DEAD)) { | |
701 | /* | |
702 | * If a packet was aborted before the DMA controller has | |
703 | * finished transferring it, it seems that there are 2 bytes | |
704 | * which are stuck in some buffer somewhere. These will get | |
705 | * transmitted as soon as we read the frame status (which | |
706 | * reenables the transmit data transfer request). Turning | |
707 | * off the DMA controller and/or resetting the MACE doesn't | |
708 | * help. So we disable auto-padding and FCS transmission | |
709 | * so the two bytes will only be a runt packet which should | |
710 | * be ignored by other stations. | |
711 | */ | |
712 | out_8(&mb->xmtfc, DXMTFCS); | |
713 | } | |
714 | fs = in_8(&mb->xmtfs); | |
715 | if ((fs & XMTSV) == 0) { | |
716 | printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", | |
717 | fs, xcount, dstat); | |
718 | mace_reset(dev); | |
719 | /* | |
720 | * XXX mace likes to hang the machine after a xmtfs error. | |
721 | * This is hard to reproduce, reseting *may* help | |
722 | */ | |
723 | } | |
724 | cp = mp->tx_cmds + NCMDS_TX * i; | |
725 | stat = ld_le16(&cp->xfer_status); | |
726 | if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { | |
727 | /* | |
728 | * Check whether there were in fact 2 bytes written to | |
729 | * the transmit FIFO. | |
730 | */ | |
731 | udelay(1); | |
732 | x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; | |
733 | if (x != 0) { | |
734 | /* there were two bytes with an end-of-packet indication */ | |
735 | mp->tx_bad_runt = 1; | |
736 | mace_set_timeout(dev); | |
737 | } else { | |
738 | /* | |
739 | * Either there weren't the two bytes buffered up, or they | |
740 | * didn't have an end-of-packet indication. | |
741 | * We flush the transmit FIFO just in case (by setting the | |
742 | * XMTFWU bit with the transmitter disabled). | |
743 | */ | |
744 | out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); | |
745 | out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); | |
746 | udelay(1); | |
747 | out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); | |
748 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); | |
749 | } | |
750 | } | |
751 | /* dma should have finished */ | |
752 | if (i == mp->tx_fill) { | |
753 | printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", | |
754 | fs, xcount, dstat); | |
755 | continue; | |
756 | } | |
757 | /* Update stats */ | |
758 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { | |
09f75cd7 | 759 | ++dev->stats.tx_errors; |
1da177e4 | 760 | if (fs & LCAR) |
09f75cd7 | 761 | ++dev->stats.tx_carrier_errors; |
1da177e4 | 762 | if (fs & (UFLO|LCOL|RTRY)) |
09f75cd7 | 763 | ++dev->stats.tx_aborted_errors; |
1da177e4 | 764 | } else { |
09f75cd7 JG |
765 | dev->stats.tx_bytes += mp->tx_bufs[i]->len; |
766 | ++dev->stats.tx_packets; | |
1da177e4 LT |
767 | } |
768 | dev_kfree_skb_irq(mp->tx_bufs[i]); | |
769 | --mp->tx_active; | |
770 | if (++i >= N_TX_RING) | |
771 | i = 0; | |
772 | #if 0 | |
773 | mace_last_fs = fs; | |
774 | mace_last_xcount = xcount; | |
775 | #endif | |
776 | } | |
777 | ||
778 | if (i != mp->tx_empty) { | |
779 | mp->tx_fullup = 0; | |
780 | netif_wake_queue(dev); | |
781 | } | |
782 | mp->tx_empty = i; | |
783 | i += mp->tx_active; | |
784 | if (i >= N_TX_RING) | |
785 | i -= N_TX_RING; | |
786 | if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { | |
787 | do { | |
788 | /* set up the next one */ | |
789 | cp = mp->tx_cmds + NCMDS_TX * i; | |
790 | out_le16(&cp->xfer_status, 0); | |
791 | out_le16(&cp->command, OUTPUT_LAST); | |
792 | ++mp->tx_active; | |
793 | if (++i >= N_TX_RING) | |
794 | i = 0; | |
795 | } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); | |
796 | out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); | |
797 | mace_set_timeout(dev); | |
798 | } | |
799 | spin_unlock_irqrestore(&mp->lock, flags); | |
800 | return IRQ_HANDLED; | |
801 | } | |
802 | ||
803 | static void mace_tx_timeout(unsigned long data) | |
804 | { | |
805 | struct net_device *dev = (struct net_device *) data; | |
806 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
807 | volatile struct mace __iomem *mb = mp->mace; | |
808 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | |
809 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
810 | volatile struct dbdma_cmd *cp; | |
811 | unsigned long flags; | |
812 | int i; | |
813 | ||
814 | spin_lock_irqsave(&mp->lock, flags); | |
815 | mp->timeout_active = 0; | |
816 | if (mp->tx_active == 0 && !mp->tx_bad_runt) | |
817 | goto out; | |
818 | ||
819 | /* update various counters */ | |
09f75cd7 | 820 | mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); |
1da177e4 LT |
821 | |
822 | cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; | |
823 | ||
824 | /* turn off both tx and rx and reset the chip */ | |
825 | out_8(&mb->maccc, 0); | |
826 | printk(KERN_ERR "mace: transmit timeout - resetting\n"); | |
827 | dbdma_reset(td); | |
828 | mace_reset(dev); | |
829 | ||
830 | /* restart rx dma */ | |
831 | cp = bus_to_virt(ld_le32(&rd->cmdptr)); | |
832 | dbdma_reset(rd); | |
833 | out_le16(&cp->xfer_status, 0); | |
834 | out_le32(&rd->cmdptr, virt_to_bus(cp)); | |
835 | out_le32(&rd->control, (RUN << 16) | RUN); | |
836 | ||
837 | /* fix up the transmit side */ | |
838 | i = mp->tx_empty; | |
839 | mp->tx_active = 0; | |
09f75cd7 | 840 | ++dev->stats.tx_errors; |
1da177e4 LT |
841 | if (mp->tx_bad_runt) { |
842 | mp->tx_bad_runt = 0; | |
843 | } else if (i != mp->tx_fill) { | |
844 | dev_kfree_skb(mp->tx_bufs[i]); | |
845 | if (++i >= N_TX_RING) | |
846 | i = 0; | |
847 | mp->tx_empty = i; | |
848 | } | |
849 | mp->tx_fullup = 0; | |
850 | netif_wake_queue(dev); | |
851 | if (i != mp->tx_fill) { | |
852 | cp = mp->tx_cmds + NCMDS_TX * i; | |
853 | out_le16(&cp->xfer_status, 0); | |
854 | out_le16(&cp->command, OUTPUT_LAST); | |
855 | out_le32(&td->cmdptr, virt_to_bus(cp)); | |
856 | out_le32(&td->control, (RUN << 16) | RUN); | |
857 | ++mp->tx_active; | |
858 | mace_set_timeout(dev); | |
859 | } | |
860 | ||
861 | /* turn it back on */ | |
862 | out_8(&mb->imr, RCVINT); | |
863 | out_8(&mb->maccc, mp->maccc); | |
864 | ||
865 | out: | |
866 | spin_unlock_irqrestore(&mp->lock, flags); | |
867 | } | |
868 | ||
7d12e780 | 869 | static irqreturn_t mace_txdma_intr(int irq, void *dev_id) |
1da177e4 LT |
870 | { |
871 | return IRQ_HANDLED; | |
872 | } | |
873 | ||
7d12e780 | 874 | static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) |
1da177e4 LT |
875 | { |
876 | struct net_device *dev = (struct net_device *) dev_id; | |
877 | struct mace_data *mp = (struct mace_data *) dev->priv; | |
878 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | |
879 | volatile struct dbdma_cmd *cp, *np; | |
880 | int i, nb, stat, next; | |
881 | struct sk_buff *skb; | |
882 | unsigned frame_status; | |
883 | static int mace_lost_status; | |
884 | unsigned char *data; | |
885 | unsigned long flags; | |
886 | ||
887 | spin_lock_irqsave(&mp->lock, flags); | |
888 | for (i = mp->rx_empty; i != mp->rx_fill; ) { | |
889 | cp = mp->rx_cmds + i; | |
890 | stat = ld_le16(&cp->xfer_status); | |
891 | if ((stat & ACTIVE) == 0) { | |
892 | next = i + 1; | |
893 | if (next >= N_RX_RING) | |
894 | next = 0; | |
895 | np = mp->rx_cmds + next; | |
896 | if (next != mp->rx_fill | |
897 | && (ld_le16(&np->xfer_status) & ACTIVE) != 0) { | |
898 | printk(KERN_DEBUG "mace: lost a status word\n"); | |
899 | ++mace_lost_status; | |
900 | } else | |
901 | break; | |
902 | } | |
903 | nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count); | |
904 | out_le16(&cp->command, DBDMA_STOP); | |
905 | /* got a packet, have a look at it */ | |
906 | skb = mp->rx_bufs[i]; | |
907 | if (skb == 0) { | |
09f75cd7 | 908 | ++dev->stats.rx_dropped; |
1da177e4 LT |
909 | } else if (nb > 8) { |
910 | data = skb->data; | |
911 | frame_status = (data[nb-3] << 8) + data[nb-4]; | |
912 | if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { | |
09f75cd7 | 913 | ++dev->stats.rx_errors; |
1da177e4 | 914 | if (frame_status & RS_OFLO) |
09f75cd7 | 915 | ++dev->stats.rx_over_errors; |
1da177e4 | 916 | if (frame_status & RS_FRAMERR) |
09f75cd7 | 917 | ++dev->stats.rx_frame_errors; |
1da177e4 | 918 | if (frame_status & RS_FCSERR) |
09f75cd7 | 919 | ++dev->stats.rx_crc_errors; |
1da177e4 LT |
920 | } else { |
921 | /* Mace feature AUTO_STRIP_RCV is on by default, dropping the | |
922 | * FCS on frames with 802.3 headers. This means that Ethernet | |
923 | * frames have 8 extra octets at the end, while 802.3 frames | |
924 | * have only 4. We need to correctly account for this. */ | |
925 | if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */ | |
926 | nb -= 4; | |
927 | else /* Ethernet header; mace includes FCS */ | |
928 | nb -= 8; | |
929 | skb_put(skb, nb); | |
1da177e4 | 930 | skb->protocol = eth_type_trans(skb, dev); |
09f75cd7 | 931 | dev->stats.rx_bytes += skb->len; |
1da177e4 LT |
932 | netif_rx(skb); |
933 | dev->last_rx = jiffies; | |
934 | mp->rx_bufs[i] = NULL; | |
09f75cd7 | 935 | ++dev->stats.rx_packets; |
1da177e4 LT |
936 | } |
937 | } else { | |
09f75cd7 JG |
938 | ++dev->stats.rx_errors; |
939 | ++dev->stats.rx_length_errors; | |
1da177e4 LT |
940 | } |
941 | ||
942 | /* advance to next */ | |
943 | if (++i >= N_RX_RING) | |
944 | i = 0; | |
945 | } | |
946 | mp->rx_empty = i; | |
947 | ||
948 | i = mp->rx_fill; | |
949 | for (;;) { | |
950 | next = i + 1; | |
951 | if (next >= N_RX_RING) | |
952 | next = 0; | |
953 | if (next == mp->rx_empty) | |
954 | break; | |
955 | cp = mp->rx_cmds + i; | |
956 | skb = mp->rx_bufs[i]; | |
957 | if (skb == 0) { | |
958 | skb = dev_alloc_skb(RX_BUFLEN + 2); | |
959 | if (skb != 0) { | |
960 | skb_reserve(skb, 2); | |
961 | mp->rx_bufs[i] = skb; | |
962 | } | |
963 | } | |
964 | st_le16(&cp->req_count, RX_BUFLEN); | |
965 | data = skb? skb->data: dummy_buf; | |
966 | st_le32(&cp->phy_addr, virt_to_bus(data)); | |
967 | out_le16(&cp->xfer_status, 0); | |
968 | out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); | |
969 | #if 0 | |
970 | if ((ld_le32(&rd->status) & ACTIVE) != 0) { | |
971 | out_le32(&rd->control, (PAUSE << 16) | PAUSE); | |
972 | while ((in_le32(&rd->status) & ACTIVE) != 0) | |
973 | ; | |
974 | } | |
975 | #endif | |
976 | i = next; | |
977 | } | |
978 | if (i != mp->rx_fill) { | |
979 | out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); | |
980 | mp->rx_fill = i; | |
981 | } | |
982 | spin_unlock_irqrestore(&mp->lock, flags); | |
983 | return IRQ_HANDLED; | |
984 | } | |
985 | ||
6aa20a22 | 986 | static struct of_device_id mace_match[] = |
1da177e4 LT |
987 | { |
988 | { | |
989 | .name = "mace", | |
1da177e4 LT |
990 | }, |
991 | {}, | |
992 | }; | |
8c9795ba | 993 | MODULE_DEVICE_TABLE (of, mace_match); |
1da177e4 | 994 | |
6aa20a22 | 995 | static struct macio_driver mace_driver = |
1da177e4 LT |
996 | { |
997 | .name = "mace", | |
998 | .match_table = mace_match, | |
999 | .probe = mace_probe, | |
1000 | .remove = mace_remove, | |
1001 | }; | |
1002 | ||
1003 | ||
1004 | static int __init mace_init(void) | |
1005 | { | |
1006 | return macio_register_driver(&mace_driver); | |
1007 | } | |
1008 | ||
1009 | static void __exit mace_cleanup(void) | |
1010 | { | |
1011 | macio_unregister_driver(&mace_driver); | |
1012 | ||
b4558ea9 JJ |
1013 | kfree(dummy_buf); |
1014 | dummy_buf = NULL; | |
1da177e4 LT |
1015 | } |
1016 | ||
1017 | MODULE_AUTHOR("Paul Mackerras"); | |
1018 | MODULE_DESCRIPTION("PowerMac MACE driver."); | |
8d3b33f6 | 1019 | module_param(port_aaui, int, 0); |
1da177e4 LT |
1020 | MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)"); |
1021 | MODULE_LICENSE("GPL"); | |
1022 | ||
1023 | module_init(mace_init); | |
1024 | module_exit(mace_cleanup); |