]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/3c527.c
net: convert print_mac to %pM
[mirror_ubuntu-bionic-kernel.git] / drivers / net / 3c527.c
CommitLineData
1da177e4
LT
1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
2 *
3 * (c) Copyright 1998 Red Hat Software Inc
6aa20a22 4 * Written by Alan Cox.
1da177e4
LT
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
8 *
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
11 *
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
14 *
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
17 *
18 */
19
20#define DRV_NAME "3c527"
21#define DRV_VERSION "0.7-SMP"
22#define DRV_RELDATE "2003/09/21"
23
24static const char *version =
25DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
26
27/**
28 * DOC: Traps for the unwary
29 *
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
32 *
6aa20a22
JG
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
1da177e4
LT
36 *
37 * Setting the SAV BP bit does not save bad packets, but
6aa20a22 38 * only enables RX on-card stats collection.
1da177e4
LT
39 *
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
43 *
44 * DOC: Theory Of Operation
45 *
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
52 *
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for inital setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
62 *
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
6aa20a22 67 *
1da177e4
LT
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
6aa20a22 73 * implementation was made necessary --- see mc32_update_stats().
1da177e4
LT
74 *
75 * DOC: Notes
6aa20a22 76 *
1da177e4
LT
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
79 *
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83**/
84
85#include <linux/module.h>
86
87#include <linux/errno.h>
88#include <linux/netdevice.h>
89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
91#include <linux/init.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/mca-legacy.h>
97#include <linux/ioport.h>
98#include <linux/in.h>
99#include <linux/skbuff.h>
100#include <linux/slab.h>
101#include <linux/string.h>
102#include <linux/wait.h>
103#include <linux/ethtool.h>
104#include <linux/completion.h>
105#include <linux/bitops.h>
6188e10d 106#include <linux/semaphore.h>
1da177e4 107
1da177e4
LT
108#include <asm/uaccess.h>
109#include <asm/system.h>
110#include <asm/io.h>
111#include <asm/dma.h>
112
113#include "3c527.h"
114
115MODULE_LICENSE("GPL");
116
117/*
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
120 */
121static const char* cardname = DRV_NAME;
122
123/* use 0 for production, 1 for verification, >2 for debug */
124#ifndef NET_DEBUG
125#define NET_DEBUG 2
126#endif
127
128#undef DEBUG_IRQ
129
130static unsigned int mc32_debug = NET_DEBUG;
131
132/* The number of low I/O ports used by the ethercard. */
133#define MC32_IO_EXTENT 8
134
6aa20a22 135/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
1da177e4
LT
136#define TX_RING_LEN 32 /* Typically the card supports 37 */
137#define RX_RING_LEN 8 /* " " " */
138
6aa20a22
JG
139/* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */
1da177e4
LT
141#define RX_COPYBREAK 200 /* Value from 3c59x.c */
142
143/* Issue the 82586 workaround command - this is for "busy lans", but
6aa20a22
JG
144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */
1da177e4
LT
146static const int WORKAROUND_82586=1;
147
148/* Pointers to buffers and their on-card records */
6aa20a22 149struct mc32_ring_desc
1da177e4 150{
6aa20a22
JG
151 volatile struct skb_header *p;
152 struct sk_buff *skb;
1da177e4
LT
153};
154
155/* Information that needs to be kept for each board. */
6aa20a22 156struct mc32_local
1da177e4
LT
157{
158 int slot;
159
160 u32 base;
1da177e4
LT
161 volatile struct mc32_mailbox *rx_box;
162 volatile struct mc32_mailbox *tx_box;
163 volatile struct mc32_mailbox *exec_box;
164 volatile struct mc32_stats *stats; /* Start of on-card statistics */
165 u16 tx_chain; /* Transmit list start offset */
166 u16 rx_chain; /* Receive list start offset */
6aa20a22 167 u16 tx_len; /* Transmit list count */
1da177e4
LT
168 u16 rx_len; /* Receive list count */
169
170 u16 xceiver_desired_state; /* HALTED or RUNNING */
171 u16 cmd_nonblocking; /* Thread is uninterested in command result */
172 u16 mc_reload_wait; /* A multicast load request is pending */
173 u32 mc_list_valid; /* True when the mclist is set */
174
175 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
176 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
177
178 atomic_t tx_count; /* buffers left */
179 atomic_t tx_ring_head; /* index to tx en-queue end */
180 u16 tx_ring_tail; /* index to tx de-queue end */
181
6aa20a22 182 u16 rx_ring_tail; /* index to rx de-queue end */
1da177e4
LT
183
184 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
185 struct completion execution_cmd; /* Card has completed an execute command */
186 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
187};
188
189/* The station (ethernet) address prefix, used for a sanity check. */
190#define SA_ADDR0 0x02
191#define SA_ADDR1 0x60
192#define SA_ADDR2 0xAC
193
194struct mca_adapters_t {
195 unsigned int id;
196 char *name;
197};
198
199static const struct mca_adapters_t mc32_adapters[] = {
200 { 0x0041, "3COM EtherLink MC/32" },
201 { 0x8EF5, "IBM High Performance Lan Adapter" },
202 { 0x0000, NULL }
203};
204
205
6aa20a22 206/* Macros for ring index manipulations */
1da177e4
LT
207static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
208static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
209
210static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
211
212
213/* Index to functions, as function prototypes. */
214static int mc32_probe1(struct net_device *dev, int ioaddr);
215static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
216static int mc32_open(struct net_device *dev);
217static void mc32_timeout(struct net_device *dev);
218static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
7d12e780 219static irqreturn_t mc32_interrupt(int irq, void *dev_id);
1da177e4
LT
220static int mc32_close(struct net_device *dev);
221static struct net_device_stats *mc32_get_stats(struct net_device *dev);
222static void mc32_set_multicast_list(struct net_device *dev);
223static void mc32_reset_multicast_list(struct net_device *dev);
7282d491 224static const struct ethtool_ops netdev_ethtool_ops;
1da177e4
LT
225
226static void cleanup_card(struct net_device *dev)
227{
228 struct mc32_local *lp = netdev_priv(dev);
229 unsigned slot = lp->slot;
230 mca_mark_as_unused(slot);
231 mca_set_adapter_name(slot, NULL);
232 free_irq(dev->irq, dev);
233 release_region(dev->base_addr, MC32_IO_EXTENT);
234}
235
236/**
237 * mc32_probe - Search for supported boards
238 * @unit: interface number to use
239 *
240 * Because MCA bus is a real bus and we can scan for cards we could do a
241 * single scan for all boards here. Right now we use the passed in device
242 * structure and scan for only one board. This needs fixing for modules
243 * in particular.
244 */
245
246struct net_device *__init mc32_probe(int unit)
247{
248 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
249 static int current_mca_slot = -1;
250 int i;
251 int err;
252
253 if (!dev)
254 return ERR_PTR(-ENOMEM);
255
256 if (unit >= 0)
257 sprintf(dev->name, "eth%d", unit);
258
6aa20a22 259 /* Do not check any supplied i/o locations.
1da177e4
LT
260 POS registers usually don't fail :) */
261
6aa20a22
JG
262 /* MCA cards have POS registers.
263 Autodetecting MCA cards is extremely simple.
1da177e4
LT
264 Just search for the card. */
265
266 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
6aa20a22 267 current_mca_slot =
1da177e4
LT
268 mca_find_unused_adapter(mc32_adapters[i].id, 0);
269
270 if(current_mca_slot != MCA_NOTFOUND) {
271 if(!mc32_probe1(dev, current_mca_slot))
272 {
6aa20a22 273 mca_set_adapter_name(current_mca_slot,
1da177e4
LT
274 mc32_adapters[i].name);
275 mca_mark_as_used(current_mca_slot);
276 err = register_netdev(dev);
277 if (err) {
278 cleanup_card(dev);
279 free_netdev(dev);
280 dev = ERR_PTR(err);
281 }
282 return dev;
283 }
6aa20a22 284
1da177e4
LT
285 }
286 }
287 free_netdev(dev);
288 return ERR_PTR(-ENODEV);
289}
290
291/**
292 * mc32_probe1 - Check a given slot for a board and test the card
293 * @dev: Device structure to fill in
294 * @slot: The MCA bus slot being used by this card
295 *
296 * Decode the slot data and configure the card structures. Having done this we
297 * can reset the card and configure it. The card does a full self test cycle
6aa20a22 298 * in firmware so we have to wait for it to return and post us either a
1da177e4
LT
299 * failure case or some addresses we use to find the board internals.
300 */
301
302static int __init mc32_probe1(struct net_device *dev, int slot)
303{
304 static unsigned version_printed;
305 int i, err;
306 u8 POS;
307 u32 base;
308 struct mc32_local *lp = netdev_priv(dev);
309 static u16 mca_io_bases[]={
310 0x7280,0x7290,
311 0x7680,0x7690,
312 0x7A80,0x7A90,
313 0x7E80,0x7E90
314 };
315 static u32 mca_mem_bases[]={
316 0x00C0000,
317 0x00C4000,
318 0x00C8000,
319 0x00CC000,
320 0x00D0000,
321 0x00D4000,
322 0x00D8000,
323 0x00DC000
324 };
325 static char *failures[]={
326 "Processor instruction",
327 "Processor data bus",
328 "Processor data bus",
329 "Processor data bus",
330 "Adapter bus",
331 "ROM checksum",
332 "Base RAM",
333 "Extended RAM",
334 "82586 internal loopback",
335 "82586 initialisation failure",
336 "Adapter list configuration error"
337 };
338
339 /* Time to play MCA games */
340
341 if (mc32_debug && version_printed++ == 0)
342 printk(KERN_DEBUG "%s", version);
343
344 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
345
346 POS = mca_read_stored_pos(slot, 2);
6aa20a22 347
1da177e4
LT
348 if(!(POS&1))
349 {
350 printk(" disabled.\n");
351 return -ENODEV;
352 }
353
354 /* Fill in the 'dev' fields. */
355 dev->base_addr = mca_io_bases[(POS>>1)&7];
356 dev->mem_start = mca_mem_bases[(POS>>4)&7];
6aa20a22 357
1da177e4
LT
358 POS = mca_read_stored_pos(slot, 4);
359 if(!(POS&1))
360 {
361 printk("memory window disabled.\n");
362 return -ENODEV;
363 }
364
365 POS = mca_read_stored_pos(slot, 5);
6aa20a22 366
1da177e4
LT
367 i=(POS>>4)&3;
368 if(i==3)
369 {
370 printk("invalid memory window.\n");
371 return -ENODEV;
372 }
6aa20a22 373
1da177e4
LT
374 i*=16384;
375 i+=16384;
6aa20a22 376
1da177e4 377 dev->mem_end=dev->mem_start + i;
6aa20a22 378
1da177e4 379 dev->irq = ((POS>>2)&3)+9;
6aa20a22 380
1da177e4
LT
381 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
382 {
383 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
384 return -EBUSY;
385 }
386
387 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
388 dev->base_addr, dev->irq, dev->mem_start, i/1024);
6aa20a22
JG
389
390
1da177e4 391 /* We ought to set the cache line size here.. */
6aa20a22
JG
392
393
1da177e4
LT
394 /*
395 * Go PROM browsing
396 */
6aa20a22 397
1da177e4
LT
398 /* Retrieve and print the ethernet address. */
399 for (i = 0; i < 6; i++)
400 {
401 mca_write_pos(slot, 6, i+12);
402 mca_write_pos(slot, 7, 0);
6aa20a22 403
0795af57 404 dev->dev_addr[i] = mca_read_pos(slot,3);
1da177e4
LT
405 }
406
e174961c 407 printk("%s: Address %pM", dev->name, dev->dev_addr);
0795af57 408
1da177e4
LT
409 mca_write_pos(slot, 6, 0);
410 mca_write_pos(slot, 7, 0);
411
412 POS = mca_read_stored_pos(slot, 4);
6aa20a22 413
1da177e4
LT
414 if(POS&2)
415 printk(" : BNC port selected.\n");
6aa20a22 416 else
1da177e4 417 printk(" : AUI port selected.\n");
6aa20a22 418
1da177e4
LT
419 POS=inb(dev->base_addr+HOST_CTRL);
420 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
421 POS&=~HOST_CTRL_INTE;
422 outb(POS, dev->base_addr+HOST_CTRL);
423 /* Reset adapter */
424 udelay(100);
425 /* Reset off */
426 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
427 outb(POS, dev->base_addr+HOST_CTRL);
6aa20a22 428
1da177e4 429 udelay(300);
6aa20a22 430
1da177e4
LT
431 /*
432 * Grab the IRQ
433 */
434
1fb9df5d 435 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
1da177e4
LT
436 if (err) {
437 release_region(dev->base_addr, MC32_IO_EXTENT);
438 printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
439 goto err_exit_ports;
440 }
441
442 memset(lp, 0, sizeof(struct mc32_local));
443 lp->slot = slot;
444
445 i=0;
446
447 base = inb(dev->base_addr);
6aa20a22 448
1da177e4
LT
449 while(base == 0xFF)
450 {
451 i++;
452 if(i == 1000)
453 {
454 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
6aa20a22 455 err = -ENODEV;
1da177e4
LT
456 goto err_exit_irq;
457 }
458 udelay(1000);
459 if(inb(dev->base_addr+2)&(1<<5))
460 base = inb(dev->base_addr);
461 }
462
463 if(base>0)
464 {
465 if(base < 0x0C)
466 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
467 base<0x0A?" test failure":"");
468 else
469 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
6aa20a22 470 err = -ENODEV;
1da177e4
LT
471 goto err_exit_irq;
472 }
6aa20a22 473
1da177e4
LT
474 base=0;
475 for(i=0;i<4;i++)
476 {
477 int n=0;
6aa20a22 478
1da177e4
LT
479 while(!(inb(dev->base_addr+2)&(1<<5)))
480 {
481 n++;
482 udelay(50);
483 if(n>100)
484 {
485 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
486 err = -ENODEV;
487 goto err_exit_irq;
488 }
489 }
490
491 base|=(inb(dev->base_addr)<<(8*i));
492 }
6aa20a22 493
1da177e4 494 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
6aa20a22
JG
495
496 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
497
1da177e4 498 lp->base = dev->mem_start+base;
6aa20a22
JG
499
500 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
1da177e4 501 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
6aa20a22 502
1da177e4
LT
503 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
504
505 /*
506 * Descriptor chains (card relative)
507 */
6aa20a22 508
1da177e4
LT
509 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
510 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
6aa20a22 511 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
1da177e4
LT
512 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
513
514 init_MUTEX_LOCKED(&lp->cmd_mutex);
515 init_completion(&lp->execution_cmd);
516 init_completion(&lp->xceiver_cmd);
6aa20a22 517
1da177e4
LT
518 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
519 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
520
521 dev->open = mc32_open;
522 dev->stop = mc32_close;
523 dev->hard_start_xmit = mc32_send_packet;
524 dev->get_stats = mc32_get_stats;
525 dev->set_multicast_list = mc32_set_multicast_list;
526 dev->tx_timeout = mc32_timeout;
527 dev->watchdog_timeo = HZ*5; /* Board does all the work */
528 dev->ethtool_ops = &netdev_ethtool_ops;
529
530 return 0;
531
532err_exit_irq:
533 free_irq(dev->irq, dev);
534err_exit_ports:
535 release_region(dev->base_addr, MC32_IO_EXTENT);
536 return err;
537}
538
539
540/**
541 * mc32_ready_poll - wait until we can feed it a command
542 * @dev: The device to wait for
6aa20a22 543 *
1da177e4
LT
544 * Wait until the card becomes ready to accept a command via the
545 * command register. This tells us nothing about the completion
546 * status of any pending commands and takes very little time at all.
547 */
6aa20a22 548
1da177e4
LT
549static inline void mc32_ready_poll(struct net_device *dev)
550{
551 int ioaddr = dev->base_addr;
552 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
553}
554
555
556/**
557 * mc32_command_nowait - send a command non blocking
558 * @dev: The 3c527 to issue the command to
559 * @cmd: The command word to write to the mailbox
560 * @data: A data block if the command expects one
561 * @len: Length of the data block
562 *
563 * Send a command from interrupt state. If there is a command
564 * currently being executed then we return an error of -1. It
565 * simply isn't viable to wait around as commands may be
566 * slow. This can theoretically be starved on SMP, but it's hard
567 * to see a realistic situation. We do not wait for the command
568 * to complete --- we rely on the interrupt handler to tidy up
569 * after us.
570 */
571
572static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
573{
574 struct mc32_local *lp = netdev_priv(dev);
575 int ioaddr = dev->base_addr;
576 int ret = -1;
577
578 if (down_trylock(&lp->cmd_mutex) == 0)
579 {
580 lp->cmd_nonblocking=1;
581 lp->exec_box->mbox=0;
582 lp->exec_box->mbox=cmd;
583 memcpy((void *)lp->exec_box->data, data, len);
584 barrier(); /* the memcpy forgot the volatile so be sure */
585
586 /* Send the command */
587 mc32_ready_poll(dev);
588 outb(1<<6, ioaddr+HOST_CMD);
589
590 ret = 0;
591
592 /* Interrupt handler will signal mutex on completion */
593 }
594
595 return ret;
596}
597
598
599/**
600 * mc32_command - send a command and sleep until completion
601 * @dev: The 3c527 card to issue the command to
602 * @cmd: The command word to write to the mailbox
603 * @data: A data block if the command expects one
604 * @len: Length of the data block
605 *
606 * Sends exec commands in a user context. This permits us to wait around
607 * for the replies and also to wait for the command buffer to complete
6aa20a22 608 * from a previous command before we execute our command. After our
1da177e4
LT
609 * command completes we will attempt any pending multicast reload
610 * we blocked off by hogging the exec buffer.
611 *
6aa20a22 612 * You feed the card a command, you wait, it interrupts you get a
1da177e4
LT
613 * reply. All well and good. The complication arises because you use
614 * commands for filter list changes which come in at bh level from things
615 * like IPV6 group stuff.
616 */
6aa20a22 617
1da177e4
LT
618static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
619{
620 struct mc32_local *lp = netdev_priv(dev);
621 int ioaddr = dev->base_addr;
622 int ret = 0;
6aa20a22 623
1da177e4
LT
624 down(&lp->cmd_mutex);
625
626 /*
627 * My Turn
628 */
629
630 lp->cmd_nonblocking=0;
631 lp->exec_box->mbox=0;
632 lp->exec_box->mbox=cmd;
633 memcpy((void *)lp->exec_box->data, data, len);
634 barrier(); /* the memcpy forgot the volatile so be sure */
635
636 mc32_ready_poll(dev);
637 outb(1<<6, ioaddr+HOST_CMD);
638
639 wait_for_completion(&lp->execution_cmd);
6aa20a22 640
1da177e4
LT
641 if(lp->exec_box->mbox&(1<<13))
642 ret = -1;
643
644 up(&lp->cmd_mutex);
645
646 /*
647 * A multicast set got blocked - try it now
648 */
649
650 if(lp->mc_reload_wait)
651 {
652 mc32_reset_multicast_list(dev);
653 }
654
655 return ret;
656}
657
658
659/**
660 * mc32_start_transceiver - tell board to restart tx/rx
661 * @dev: The 3c527 card to issue the command to
662 *
663 * This may be called from the interrupt state, where it is used
6aa20a22
JG
664 * to restart the rx ring if the card runs out of rx buffers.
665 *
1da177e4
LT
666 * We must first check if it's ok to (re)start the transceiver. See
667 * mc32_close for details.
668 */
669
670static void mc32_start_transceiver(struct net_device *dev) {
671
672 struct mc32_local *lp = netdev_priv(dev);
673 int ioaddr = dev->base_addr;
674
6aa20a22 675 /* Ignore RX overflow on device closure */
1da177e4 676 if (lp->xceiver_desired_state==HALTED)
6aa20a22 677 return;
1da177e4
LT
678
679 /* Give the card the offset to the post-EOL-bit RX descriptor */
6aa20a22 680 mc32_ready_poll(dev);
1da177e4 681 lp->rx_box->mbox=0;
6aa20a22
JG
682 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
683 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
1da177e4 684
6aa20a22 685 mc32_ready_poll(dev);
1da177e4 686 lp->tx_box->mbox=0;
6aa20a22
JG
687 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
688
689 /* We are not interrupted on start completion */
1da177e4
LT
690}
691
692
693/**
694 * mc32_halt_transceiver - tell board to stop tx/rx
695 * @dev: The 3c527 card to issue the command to
696 *
697 * We issue the commands to halt the card's transceiver. In fact,
698 * after some experimenting we now simply tell the card to
699 * suspend. When issuing aborts occasionally odd things happened.
700 *
701 * We then sleep until the card has notified us that both rx and
702 * tx have been suspended.
6aa20a22 703 */
1da177e4 704
6aa20a22 705static void mc32_halt_transceiver(struct net_device *dev)
1da177e4
LT
706{
707 struct mc32_local *lp = netdev_priv(dev);
708 int ioaddr = dev->base_addr;
709
6aa20a22 710 mc32_ready_poll(dev);
1da177e4 711 lp->rx_box->mbox=0;
6aa20a22 712 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
1da177e4
LT
713 wait_for_completion(&lp->xceiver_cmd);
714
6aa20a22 715 mc32_ready_poll(dev);
1da177e4 716 lp->tx_box->mbox=0;
6aa20a22 717 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
1da177e4
LT
718 wait_for_completion(&lp->xceiver_cmd);
719}
720
721
722/**
723 * mc32_load_rx_ring - load the ring of receive buffers
724 * @dev: 3c527 to build the ring for
725 *
726 * This initalises the on-card and driver datastructures to
727 * the point where mc32_start_transceiver() can be called.
728 *
729 * The card sets up the receive ring for us. We are required to use the
730 * ring it provides, although the size of the ring is configurable.
731 *
732 * We allocate an sk_buff for each ring entry in turn and
733 * initalise its house-keeping info. At the same time, we read
734 * each 'next' pointer in our rx_ring array. This reduces slow
735 * shared-memory reads and makes it easy to access predecessor
736 * descriptors.
737 *
738 * We then set the end-of-list bit for the last entry so that the
739 * card will know when it has run out of buffers.
740 */
6aa20a22 741
1da177e4
LT
742static int mc32_load_rx_ring(struct net_device *dev)
743{
744 struct mc32_local *lp = netdev_priv(dev);
745 int i;
746 u16 rx_base;
747 volatile struct skb_header *p;
6aa20a22 748
1da177e4
LT
749 rx_base=lp->rx_chain;
750
751 for(i=0; i<RX_RING_LEN; i++) {
752 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
753 if (lp->rx_ring[i].skb==NULL) {
754 for (;i>=0;i--)
755 kfree_skb(lp->rx_ring[i].skb);
756 return -ENOBUFS;
757 }
758 skb_reserve(lp->rx_ring[i].skb, 18);
759
760 p=isa_bus_to_virt(lp->base+rx_base);
6aa20a22 761
1da177e4
LT
762 p->control=0;
763 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
764 p->status=0;
765 p->length=1532;
6aa20a22
JG
766
767 lp->rx_ring[i].p=p;
768 rx_base=p->next;
1da177e4
LT
769 }
770
771 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
772
773 lp->rx_ring_tail=0;
774
775 return 0;
6aa20a22 776}
1da177e4
LT
777
778
779/**
780 * mc32_flush_rx_ring - free the ring of receive buffers
781 * @lp: Local data of 3c527 to flush the rx ring of
782 *
6aa20a22 783 * Free the buffer for each ring slot. This may be called
1da177e4
LT
784 * before mc32_load_rx_ring(), eg. on error in mc32_open().
785 * Requires rx skb pointers to point to a valid skb, or NULL.
786 */
787
788static void mc32_flush_rx_ring(struct net_device *dev)
789{
790 struct mc32_local *lp = netdev_priv(dev);
6aa20a22 791 int i;
1da177e4 792
6aa20a22
JG
793 for(i=0; i < RX_RING_LEN; i++)
794 {
1da177e4
LT
795 if (lp->rx_ring[i].skb) {
796 dev_kfree_skb(lp->rx_ring[i].skb);
797 lp->rx_ring[i].skb = NULL;
798 }
6aa20a22
JG
799 lp->rx_ring[i].p=NULL;
800 }
1da177e4
LT
801}
802
803
804/**
805 * mc32_load_tx_ring - load transmit ring
806 * @dev: The 3c527 card to issue the command to
807 *
6aa20a22 808 * This sets up the host transmit data-structures.
1da177e4
LT
809 *
810 * First, we obtain from the card it's current postion in the tx
811 * ring, so that we will know where to begin transmitting
812 * packets.
6aa20a22 813 *
1da177e4
LT
814 * Then, we read the 'next' pointers from the on-card tx ring into
815 * our tx_ring array to reduce slow shared-mem reads. Finally, we
816 * intitalise the tx house keeping variables.
6aa20a22
JG
817 *
818 */
1da177e4
LT
819
820static void mc32_load_tx_ring(struct net_device *dev)
6aa20a22 821{
1da177e4
LT
822 struct mc32_local *lp = netdev_priv(dev);
823 volatile struct skb_header *p;
6aa20a22 824 int i;
1da177e4
LT
825 u16 tx_base;
826
6aa20a22 827 tx_base=lp->tx_box->data[0];
1da177e4
LT
828
829 for(i=0 ; i<TX_RING_LEN ; i++)
830 {
831 p=isa_bus_to_virt(lp->base+tx_base);
6aa20a22 832 lp->tx_ring[i].p=p;
1da177e4
LT
833 lp->tx_ring[i].skb=NULL;
834
835 tx_base=p->next;
836 }
837
838 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
839 /* see mc32_tx_ring */
840
6aa20a22
JG
841 atomic_set(&lp->tx_count, TX_RING_LEN-1);
842 atomic_set(&lp->tx_ring_head, 0);
843 lp->tx_ring_tail=0;
844}
1da177e4
LT
845
846
847/**
848 * mc32_flush_tx_ring - free transmit ring
849 * @lp: Local data of 3c527 to flush the tx ring of
850 *
851 * If the ring is non-empty, zip over the it, freeing any
852 * allocated skb_buffs. The tx ring house-keeping variables are
853 * then reset. Requires rx skb pointers to point to a valid skb,
854 * or NULL.
855 */
856
857static void mc32_flush_tx_ring(struct net_device *dev)
858{
859 struct mc32_local *lp = netdev_priv(dev);
860 int i;
861
862 for (i=0; i < TX_RING_LEN; i++)
863 {
864 if (lp->tx_ring[i].skb)
865 {
866 dev_kfree_skb(lp->tx_ring[i].skb);
867 lp->tx_ring[i].skb = NULL;
868 }
869 }
870
6aa20a22
JG
871 atomic_set(&lp->tx_count, 0);
872 atomic_set(&lp->tx_ring_head, 0);
1da177e4
LT
873 lp->tx_ring_tail=0;
874}
6aa20a22 875
1da177e4
LT
876
877/**
878 * mc32_open - handle 'up' of card
879 * @dev: device to open
880 *
881 * The user is trying to bring the card into ready state. This requires
882 * a brief dialogue with the card. Firstly we enable interrupts and then
883 * 'indications'. Without these enabled the card doesn't bother telling
884 * us what it has done. This had me puzzled for a week.
885 *
886 * We configure the number of card descriptors, then load the network
887 * address and multicast filters. Turn on the workaround mode. This
888 * works around a bug in the 82586 - it asks the firmware to do
889 * so. It has a performance (latency) hit but is needed on busy
890 * [read most] lans. We load the ring with buffers then we kick it
891 * all off.
892 */
893
894static int mc32_open(struct net_device *dev)
895{
896 int ioaddr = dev->base_addr;
897 struct mc32_local *lp = netdev_priv(dev);
898 u8 one=1;
899 u8 regs;
900 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
901
902 /*
903 * Interrupts enabled
904 */
905
906 regs=inb(ioaddr+HOST_CTRL);
907 regs|=HOST_CTRL_INTE;
908 outb(regs, ioaddr+HOST_CTRL);
6aa20a22 909
1da177e4
LT
910 /*
911 * Allow ourselves to issue commands
912 */
913
914 up(&lp->cmd_mutex);
915
916
917 /*
918 * Send the indications on command
919 */
920
921 mc32_command(dev, 4, &one, 2);
922
923 /*
6aa20a22 924 * Poke it to make sure it's really dead.
1da177e4
LT
925 */
926
6aa20a22
JG
927 mc32_halt_transceiver(dev);
928 mc32_flush_tx_ring(dev);
1da177e4 929
6aa20a22
JG
930 /*
931 * Ask card to set up on-card descriptors to our spec
932 */
1da177e4 933
6aa20a22 934 if(mc32_command(dev, 8, descnumbuffs, 4)) {
1da177e4
LT
935 printk("%s: %s rejected our buffer configuration!\n",
936 dev->name, cardname);
6aa20a22
JG
937 mc32_close(dev);
938 return -ENOBUFS;
1da177e4 939 }
6aa20a22
JG
940
941 /* Report new configuration */
942 mc32_command(dev, 6, NULL, 0);
1da177e4
LT
943
944 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
945 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
6aa20a22 946 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
1da177e4 947 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
6aa20a22 948
1da177e4
LT
949 /* Set Network Address */
950 mc32_command(dev, 1, dev->dev_addr, 6);
6aa20a22 951
1da177e4
LT
952 /* Set the filters */
953 mc32_set_multicast_list(dev);
6aa20a22
JG
954
955 if (WORKAROUND_82586) {
1da177e4
LT
956 u16 zero_word=0;
957 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
958 }
959
960 mc32_load_tx_ring(dev);
6aa20a22
JG
961
962 if(mc32_load_rx_ring(dev))
1da177e4
LT
963 {
964 mc32_close(dev);
965 return -ENOBUFS;
966 }
967
968 lp->xceiver_desired_state = RUNNING;
6aa20a22 969
1da177e4
LT
970 /* And finally, set the ball rolling... */
971 mc32_start_transceiver(dev);
972
973 netif_start_queue(dev);
974
975 return 0;
976}
977
978
979/**
980 * mc32_timeout - handle a timeout from the network layer
981 * @dev: 3c527 that timed out
982 *
983 * Handle a timeout on transmit from the 3c527. This normally means
984 * bad things as the hardware handles cable timeouts and mess for
985 * us.
986 *
987 */
988
989static void mc32_timeout(struct net_device *dev)
990{
991 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
992 /* Try to restart the adaptor. */
993 netif_wake_queue(dev);
994}
995
996
997/**
998 * mc32_send_packet - queue a frame for transmit
999 * @skb: buffer to transmit
1000 * @dev: 3c527 to send it out of
1001 *
1002 * Transmit a buffer. This normally means throwing the buffer onto
1003 * the transmit queue as the queue is quite large. If the queue is
1004 * full then we set tx_busy and return. Once the interrupt handler
1005 * gets messages telling it to reclaim transmit queue entries, we will
1006 * clear tx_busy and the kernel will start calling this again.
1007 *
1008 * We do not disable interrupts or acquire any locks; this can
1009 * run concurrently with mc32_tx_ring(), and the function itself
1010 * is serialised at a higher layer. However, similarly for the
1011 * card itself, we must ensure that we update tx_ring_head only
1012 * after we've established a valid packet on the tx ring (and
1013 * before we let the card "see" it, to prevent it racing with the
1014 * irq handler).
6aa20a22 1015 *
1da177e4
LT
1016 */
1017
1018static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1019{
1020 struct mc32_local *lp = netdev_priv(dev);
1021 u32 head = atomic_read(&lp->tx_ring_head);
6aa20a22 1022
1da177e4
LT
1023 volatile struct skb_header *p, *np;
1024
1025 netif_stop_queue(dev);
1026
1027 if(atomic_read(&lp->tx_count)==0) {
1028 return 1;
1029 }
1030
5b057c6b 1031 if (skb_padto(skb, ETH_ZLEN)) {
1da177e4
LT
1032 netif_wake_queue(dev);
1033 return 0;
1034 }
1035
6aa20a22 1036 atomic_dec(&lp->tx_count);
1da177e4
LT
1037
1038 /* P is the last sending/sent buffer as a pointer */
1039 p=lp->tx_ring[head].p;
6aa20a22 1040
1da177e4
LT
1041 head = next_tx(head);
1042
1043 /* NP is the buffer we will be loading */
6aa20a22
JG
1044 np=lp->tx_ring[head].p;
1045
1da177e4
LT
1046 /* We will need this to flush the buffer out */
1047 lp->tx_ring[head].skb=skb;
1048
6aa20a22 1049 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1da177e4
LT
1050 np->data = isa_virt_to_bus(skb->data);
1051 np->status = 0;
6aa20a22 1052 np->control = CONTROL_EOP | CONTROL_EOL;
1da177e4 1053 wmb();
6aa20a22 1054
1da177e4
LT
1055 /*
1056 * The new frame has been setup; we can now
1057 * let the interrupt handler and card "see" it
1058 */
1059
6aa20a22 1060 atomic_set(&lp->tx_ring_head, head);
1da177e4
LT
1061 p->control &= ~CONTROL_EOL;
1062
1063 netif_wake_queue(dev);
1064 return 0;
1065}
1066
1067
1068/**
1069 * mc32_update_stats - pull off the on board statistics
1070 * @dev: 3c527 to service
1071 *
6aa20a22 1072 *
1da177e4
LT
1073 * Query and reset the on-card stats. There's the small possibility
1074 * of a race here, which would result in an underestimation of
1075 * actual errors. As such, we'd prefer to keep all our stats
1076 * collection in software. As a rule, we do. However it can't be
1077 * used for rx errors and collisions as, by default, the card discards
6aa20a22 1078 * bad rx packets.
1da177e4
LT
1079 *
1080 * Setting the SAV BP in the rx filter command supposedly
1081 * stops this behaviour. However, testing shows that it only seems to
1082 * enable the collation of on-card rx statistics --- the driver
1083 * never sees an RX descriptor with an error status set.
1084 *
1085 */
1086
1087static void mc32_update_stats(struct net_device *dev)
1088{
1089 struct mc32_local *lp = netdev_priv(dev);
6aa20a22 1090 volatile struct mc32_stats *st = lp->stats;
1da177e4 1091
6aa20a22
JG
1092 u32 rx_errors=0;
1093
4711c841 1094 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1da177e4 1095 st->rx_crc_errors=0;
4711c841 1096 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
6aa20a22 1097 st->rx_overrun_errors=0;
4711c841 1098 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1da177e4 1099 st->rx_alignment_errors=0;
4711c841 1100 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1da177e4 1101 st->rx_tooshort_errors=0;
4711c841 1102 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
6aa20a22 1103 st->rx_outofresource_errors=0;
4711c841 1104 dev->stats.rx_errors=rx_errors;
6aa20a22 1105
1da177e4 1106 /* Number of packets which saw one collision */
4711c841 1107 dev->stats.collisions+=st->dataC[10];
6aa20a22 1108 st->dataC[10]=0;
1da177e4 1109
6aa20a22 1110 /* Number of packets which saw 2--15 collisions */
4711c841 1111 dev->stats.collisions+=st->dataC[11];
6aa20a22
JG
1112 st->dataC[11]=0;
1113}
1da177e4
LT
1114
1115
1116/**
1117 * mc32_rx_ring - process the receive ring
1118 * @dev: 3c527 that needs its receive ring processing
1119 *
1120 *
1121 * We have received one or more indications from the card that a
1122 * receive has completed. The buffer ring thus contains dirty
1123 * entries. We walk the ring by iterating over the circular rx_ring
1124 * array, starting at the next dirty buffer (which happens to be the
1125 * one we finished up at last time around).
1126 *
1127 * For each completed packet, we will either copy it and pass it up
1128 * the stack or, if the packet is near MTU sized, we allocate
1129 * another buffer and flip the old one up the stack.
6aa20a22 1130 *
1da177e4
LT
1131 * We must succeed in keeping a buffer on the ring. If necessary we
1132 * will toss a received packet rather than lose a ring entry. Once
1133 * the first uncompleted descriptor is found, we move the
1134 * End-Of-List bit to include the buffers just processed.
1135 *
1136 */
1137
1138static void mc32_rx_ring(struct net_device *dev)
1139{
1140 struct mc32_local *lp = netdev_priv(dev);
1141 volatile struct skb_header *p;
1142 u16 rx_ring_tail;
1143 u16 rx_old_tail;
1144 int x=0;
1145
1146 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
6aa20a22 1147
1da177e4 1148 do
6aa20a22
JG
1149 {
1150 p=lp->rx_ring[rx_ring_tail].p;
1da177e4 1151
6aa20a22 1152 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1da177e4 1153 break;
6aa20a22 1154 }
1da177e4 1155 if(p->status & (1<<6)) /* COMPLETED_OK */
6aa20a22 1156 {
1da177e4
LT
1157
1158 u16 length=p->length;
6aa20a22
JG
1159 struct sk_buff *skb;
1160 struct sk_buff *newskb;
1da177e4
LT
1161
1162 /* Try to save time by avoiding a copy on big frames */
1163
6aa20a22
JG
1164 if ((length > RX_COPYBREAK)
1165 && ((newskb=dev_alloc_skb(1532)) != NULL))
1166 {
1da177e4
LT
1167 skb=lp->rx_ring[rx_ring_tail].skb;
1168 skb_put(skb, length);
6aa20a22
JG
1169
1170 skb_reserve(newskb,18);
1171 lp->rx_ring[rx_ring_tail].skb=newskb;
1172 p->data=isa_virt_to_bus(newskb->data);
1173 }
1174 else
1da177e4 1175 {
6aa20a22 1176 skb=dev_alloc_skb(length+2);
1da177e4
LT
1177
1178 if(skb==NULL) {
4711c841 1179 dev->stats.rx_dropped++;
6aa20a22 1180 goto dropped;
1da177e4
LT
1181 }
1182
1183 skb_reserve(skb,2);
1184 memcpy(skb_put(skb, length),
1185 lp->rx_ring[rx_ring_tail].skb->data, length);
1186 }
6aa20a22
JG
1187
1188 skb->protocol=eth_type_trans(skb,dev);
1da177e4 1189 dev->last_rx = jiffies;
4711c841
PZ
1190 dev->stats.rx_packets++;
1191 dev->stats.rx_bytes += length;
1da177e4
LT
1192 netif_rx(skb);
1193 }
1194
1195 dropped:
6aa20a22 1196 p->length = 1532;
1da177e4 1197 p->status = 0;
6aa20a22
JG
1198
1199 rx_ring_tail=next_rx(rx_ring_tail);
1da177e4 1200 }
6aa20a22 1201 while(x++<48);
1da177e4 1202
6aa20a22
JG
1203 /* If there was actually a frame to be processed, place the EOL bit */
1204 /* at the descriptor prior to the one to be filled next */
1da177e4 1205
6aa20a22
JG
1206 if (rx_ring_tail != rx_old_tail)
1207 {
1208 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1209 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1da177e4 1210
6aa20a22 1211 lp->rx_ring_tail=rx_ring_tail;
1da177e4
LT
1212 }
1213}
1214
1215
1216/**
1217 * mc32_tx_ring - process completed transmits
1218 * @dev: 3c527 that needs its transmit ring processing
1219 *
1220 *
1221 * This operates in a similar fashion to mc32_rx_ring. We iterate
1222 * over the transmit ring. For each descriptor which has been
1223 * processed by the card, we free its associated buffer and note
1224 * any errors. This continues until the transmit ring is emptied
1225 * or we reach a descriptor that hasn't yet been processed by the
1226 * card.
6aa20a22 1227 *
1da177e4
LT
1228 */
1229
6aa20a22 1230static void mc32_tx_ring(struct net_device *dev)
1da177e4
LT
1231{
1232 struct mc32_local *lp = netdev_priv(dev);
1233 volatile struct skb_header *np;
1234
1235 /*
1236 * We rely on head==tail to mean 'queue empty'.
1237 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1238 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1239 * condition with 'queue full'
1240 */
1241
6aa20a22
JG
1242 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1243 {
1244 u16 t;
1da177e4 1245
6aa20a22
JG
1246 t=next_tx(lp->tx_ring_tail);
1247 np=lp->tx_ring[t].p;
1da177e4 1248
6aa20a22 1249 if(!(np->status & (1<<7)))
1da177e4 1250 {
6aa20a22
JG
1251 /* Not COMPLETED */
1252 break;
1253 }
4711c841 1254 dev->stats.tx_packets++;
1da177e4
LT
1255 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1256 {
4711c841 1257 dev->stats.tx_errors++;
1da177e4
LT
1258
1259 switch(np->status&0x0F)
1260 {
1261 case 1:
4711c841 1262 dev->stats.tx_aborted_errors++;
6aa20a22 1263 break; /* Max collisions */
1da177e4 1264 case 2:
4711c841 1265 dev->stats.tx_fifo_errors++;
1da177e4
LT
1266 break;
1267 case 3:
4711c841 1268 dev->stats.tx_carrier_errors++;
1da177e4
LT
1269 break;
1270 case 4:
4711c841 1271 dev->stats.tx_window_errors++;
6aa20a22 1272 break; /* CTS Lost */
1da177e4 1273 case 5:
4711c841 1274 dev->stats.tx_aborted_errors++;
6aa20a22 1275 break; /* Transmit timeout */
1da177e4
LT
1276 }
1277 }
1278 /* Packets are sent in order - this is
1279 basically a FIFO queue of buffers matching
1280 the card ring */
4711c841 1281 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1da177e4
LT
1282 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1283 lp->tx_ring[t].skb=NULL;
1284 atomic_inc(&lp->tx_count);
1285 netif_wake_queue(dev);
1286
6aa20a22 1287 lp->tx_ring_tail=t;
1da177e4
LT
1288 }
1289
6aa20a22 1290}
1da177e4
LT
1291
1292
1293/**
1294 * mc32_interrupt - handle an interrupt from a 3c527
1295 * @irq: Interrupt number
1296 * @dev_id: 3c527 that requires servicing
1297 * @regs: Registers (unused)
1298 *
1299 *
1300 * An interrupt is raised whenever the 3c527 writes to the command
1301 * register. This register contains the message it wishes to send us
1302 * packed into a single byte field. We keep reading status entries
1303 * until we have processed all the control items, but simply count
1304 * transmit and receive reports. When all reports are in we empty the
1305 * transceiver rings as appropriate. This saves the overhead of
1306 * multiple command requests.
1307 *
1308 * Because MCA is level-triggered, we shouldn't miss indications.
1309 * Therefore, we needn't ask the card to suspend interrupts within
1310 * this handler. The card receives an implicit acknowledgment of the
1311 * current interrupt when we read the command register.
1312 *
1313 */
1314
7d12e780 1315static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1da177e4
LT
1316{
1317 struct net_device *dev = dev_id;
1318 struct mc32_local *lp;
1319 int ioaddr, status, boguscount = 0;
1320 int rx_event = 0;
6aa20a22
JG
1321 int tx_event = 0;
1322
1da177e4
LT
1323 ioaddr = dev->base_addr;
1324 lp = netdev_priv(dev);
1325
1326 /* See whats cooking */
1327
1328 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1329 {
1330 status=inb(ioaddr+HOST_CMD);
1331
6aa20a22 1332#ifdef DEBUG_IRQ
1da177e4
LT
1333 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1334 (status&7), (status>>3)&7, (status>>6)&1,
1335 (status>>7)&1, boguscount);
1336#endif
6aa20a22 1337
1da177e4
LT
1338 switch(status&7)
1339 {
1340 case 0:
1341 break;
1342 case 6: /* TX fail */
1343 case 2: /* TX ok */
6aa20a22 1344 tx_event = 1;
1da177e4
LT
1345 break;
1346 case 3: /* Halt */
1347 case 4: /* Abort */
1348 complete(&lp->xceiver_cmd);
1349 break;
1350 default:
1351 printk("%s: strange tx ack %d\n", dev->name, status&7);
1352 }
1353 status>>=3;
1354 switch(status&7)
1355 {
1356 case 0:
1357 break;
1358 case 2: /* RX */
6aa20a22 1359 rx_event=1;
1da177e4
LT
1360 break;
1361 case 3: /* Halt */
1362 case 4: /* Abort */
1363 complete(&lp->xceiver_cmd);
1364 break;
1365 case 6:
1366 /* Out of RX buffers stat */
1367 /* Must restart rx */
4711c841 1368 dev->stats.rx_dropped++;
6aa20a22
JG
1369 mc32_rx_ring(dev);
1370 mc32_start_transceiver(dev);
1da177e4
LT
1371 break;
1372 default:
6aa20a22
JG
1373 printk("%s: strange rx ack %d\n",
1374 dev->name, status&7);
1da177e4
LT
1375 }
1376 status>>=3;
1377 if(status&1)
1378 {
1379 /*
1380 * No thread is waiting: we need to tidy
1381 * up ourself.
1382 */
6aa20a22 1383
1da177e4
LT
1384 if (lp->cmd_nonblocking) {
1385 up(&lp->cmd_mutex);
6aa20a22 1386 if (lp->mc_reload_wait)
1da177e4
LT
1387 mc32_reset_multicast_list(dev);
1388 }
1389 else complete(&lp->execution_cmd);
1390 }
1391 if(status&2)
1392 {
1393 /*
1394 * We get interrupted once per
6aa20a22 1395 * counter that is about to overflow.
1da177e4
LT
1396 */
1397
6aa20a22 1398 mc32_update_stats(dev);
1da177e4
LT
1399 }
1400 }
1401
1402
1403 /*
6aa20a22 1404 * Process the transmit and receive rings
1da177e4
LT
1405 */
1406
6aa20a22 1407 if(tx_event)
1da177e4 1408 mc32_tx_ring(dev);
6aa20a22
JG
1409
1410 if(rx_event)
1da177e4
LT
1411 mc32_rx_ring(dev);
1412
1413 return IRQ_HANDLED;
1414}
1415
1416
1417/**
1418 * mc32_close - user configuring the 3c527 down
1419 * @dev: 3c527 card to shut down
1420 *
1421 * The 3c527 is a bus mastering device. We must be careful how we
1422 * shut it down. It may also be running shared interrupt so we have
1423 * to be sure to silence it properly
1424 *
1425 * We indicate that the card is closing to the rest of the
1426 * driver. Otherwise, it is possible that the card may run out
1427 * of receive buffers and restart the transceiver while we're
1428 * trying to close it.
6aa20a22 1429 *
1da177e4
LT
1430 * We abort any receive and transmits going on and then wait until
1431 * any pending exec commands have completed in other code threads.
1432 * In theory we can't get here while that is true, in practice I am
1433 * paranoid
1434 *
1435 * We turn off the interrupt enable for the board to be sure it can't
1436 * intefere with other devices.
1437 */
1438
1439static int mc32_close(struct net_device *dev)
1440{
1441 struct mc32_local *lp = netdev_priv(dev);
1442 int ioaddr = dev->base_addr;
1443
1444 u8 regs;
1445 u16 one=1;
6aa20a22 1446
1da177e4
LT
1447 lp->xceiver_desired_state = HALTED;
1448 netif_stop_queue(dev);
1449
1450 /*
1451 * Send the indications on command (handy debug check)
1452 */
1453
1454 mc32_command(dev, 4, &one, 2);
1455
1456 /* Shut down the transceiver */
1457
6aa20a22
JG
1458 mc32_halt_transceiver(dev);
1459
1da177e4
LT
1460 /* Ensure we issue no more commands beyond this point */
1461
1462 down(&lp->cmd_mutex);
6aa20a22
JG
1463
1464 /* Ok the card is now stopping */
1465
1da177e4
LT
1466 regs=inb(ioaddr+HOST_CTRL);
1467 regs&=~HOST_CTRL_INTE;
1468 outb(regs, ioaddr+HOST_CTRL);
1469
1470 mc32_flush_rx_ring(dev);
1471 mc32_flush_tx_ring(dev);
6aa20a22
JG
1472
1473 mc32_update_stats(dev);
1da177e4
LT
1474
1475 return 0;
1476}
1477
1478
1479/**
1480 * mc32_get_stats - hand back stats to network layer
1481 * @dev: The 3c527 card to handle
1482 *
1483 * We've collected all the stats we can in software already. Now
6aa20a22
JG
1484 * it's time to update those kept on-card and return the lot.
1485 *
1da177e4
LT
1486 */
1487
1488static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1489{
6aa20a22 1490 mc32_update_stats(dev);
4711c841 1491 return &dev->stats;
1da177e4
LT
1492}
1493
1494
1495/**
1496 * do_mc32_set_multicast_list - attempt to update multicasts
1497 * @dev: 3c527 device to load the list on
6aa20a22 1498 * @retry: indicates this is not the first call.
1da177e4
LT
1499 *
1500 *
1501 * Actually set or clear the multicast filter for this adaptor. The
1502 * locking issues are handled by this routine. We have to track
1503 * state as it may take multiple calls to get the command sequence
1504 * completed. We just keep trying to schedule the loads until we
1505 * manage to process them all.
6aa20a22 1506 *
1da177e4 1507 * num_addrs == -1 Promiscuous mode, receive all packets
6aa20a22 1508 *
1da177e4 1509 * num_addrs == 0 Normal mode, clear multicast list
1da177e4 1510 *
6aa20a22
JG
1511 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1512 * and do best-effort filtering.
1513 *
1514 * See mc32_update_stats() regards setting the SAV BP bit.
1da177e4
LT
1515 *
1516 */
1517
1518static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1519{
1520 struct mc32_local *lp = netdev_priv(dev);
6aa20a22 1521 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1da177e4 1522
c16d1185
WC
1523 if ((dev->flags&IFF_PROMISC) ||
1524 (dev->flags&IFF_ALLMULTI) ||
1525 dev->mc_count > 10)
1da177e4
LT
1526 /* Enable promiscuous mode */
1527 filt |= 1;
1da177e4
LT
1528 else if(dev->mc_count)
1529 {
1530 unsigned char block[62];
1531 unsigned char *bp;
1532 struct dev_mc_list *dmc=dev->mc_list;
6aa20a22 1533
1da177e4 1534 int i;
6aa20a22 1535
1da177e4
LT
1536 if(retry==0)
1537 lp->mc_list_valid = 0;
1538 if(!lp->mc_list_valid)
1539 {
1540 block[1]=0;
1541 block[0]=dev->mc_count;
1542 bp=block+2;
6aa20a22 1543
1da177e4
LT
1544 for(i=0;i<dev->mc_count;i++)
1545 {
1546 memcpy(bp, dmc->dmi_addr, 6);
1547 bp+=6;
1548 dmc=dmc->next;
1549 }
1550 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1551 {
1552 lp->mc_reload_wait = 1;
1553 return;
1554 }
1555 lp->mc_list_valid=1;
1556 }
1557 }
6aa20a22
JG
1558
1559 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1da177e4
LT
1560 {
1561 lp->mc_reload_wait = 1;
6aa20a22
JG
1562 }
1563 else {
1da177e4
LT
1564 lp->mc_reload_wait = 0;
1565 }
1566}
1567
1568
1569/**
1570 * mc32_set_multicast_list - queue multicast list update
1571 * @dev: The 3c527 to use
1572 *
1573 * Commence loading the multicast list. This is called when the kernel
1574 * changes the lists. It will override any pending list we are trying to
1575 * load.
1576 */
1577
1578static void mc32_set_multicast_list(struct net_device *dev)
1579{
1580 do_mc32_set_multicast_list(dev,0);
1581}
1582
1583
1584/**
1585 * mc32_reset_multicast_list - reset multicast list
1586 * @dev: The 3c527 to use
1587 *
1588 * Attempt the next step in loading the multicast lists. If this attempt
1589 * fails to complete then it will be scheduled and this function called
1590 * again later from elsewhere.
1591 */
1592
1593static void mc32_reset_multicast_list(struct net_device *dev)
1594{
1595 do_mc32_set_multicast_list(dev,1);
1596}
1597
1598static void netdev_get_drvinfo(struct net_device *dev,
1599 struct ethtool_drvinfo *info)
1600{
1601 strcpy(info->driver, DRV_NAME);
1602 strcpy(info->version, DRV_VERSION);
1603 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1604}
1605
1606static u32 netdev_get_msglevel(struct net_device *dev)
1607{
1608 return mc32_debug;
1609}
1610
1611static void netdev_set_msglevel(struct net_device *dev, u32 level)
1612{
1613 mc32_debug = level;
1614}
1615
7282d491 1616static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1617 .get_drvinfo = netdev_get_drvinfo,
1618 .get_msglevel = netdev_get_msglevel,
1619 .set_msglevel = netdev_set_msglevel,
1620};
1621
1622#ifdef MODULE
1623
1624static struct net_device *this_device;
1625
1626/**
1627 * init_module - entry point
1628 *
1629 * Probe and locate a 3c527 card. This really should probe and locate
1630 * all the 3c527 cards in the machine not just one of them. Yes you can
1631 * insmod multiple modules for now but it's a hack.
1632 */
1633
96e672c7 1634int __init init_module(void)
1da177e4
LT
1635{
1636 this_device = mc32_probe(-1);
1637 if (IS_ERR(this_device))
1638 return PTR_ERR(this_device);
1639 return 0;
1640}
1641
1642/**
1643 * cleanup_module - free resources for an unload
1644 *
1645 * Unloading time. We release the MCA bus resources and the interrupt
1646 * at which point everything is ready to unload. The card must be stopped
1647 * at this point or we would not have been called. When we unload we
1648 * leave the card stopped but not totally shut down. When the card is
1649 * initialized it must be rebooted or the rings reloaded before any
1650 * transmit operations are allowed to start scribbling into memory.
1651 */
1652
afc8eb46 1653void __exit cleanup_module(void)
1da177e4
LT
1654{
1655 unregister_netdev(this_device);
1656 cleanup_card(this_device);
1657 free_netdev(this_device);
1658}
1659
1660#endif /* MODULE */