]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/tokenring/olympic.c
net: remove use of ndo_set_multicast_list in drivers
[mirror_ubuntu-zesty-kernel.git] / drivers / net / tokenring / olympic.c
CommitLineData
1da177e4
LT
1/*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
7 *
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
10 *
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
13 *
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 *
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
22 *
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
69 *
70 * Wake on lan
71 *
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
76 */
77
78/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80#define OLYMPIC_DEBUG 0
81
82
1da177e4
LT
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/errno.h>
86#include <linux/timer.h>
87#include <linux/in.h>
88#include <linux/ioport.h>
48e20467 89#include <linux/seq_file.h>
1da177e4
LT
90#include <linux/string.h>
91#include <linux/proc_fs.h>
92#include <linux/ptrace.h>
93#include <linux/skbuff.h>
94#include <linux/interrupt.h>
95#include <linux/delay.h>
96#include <linux/netdevice.h>
97#include <linux/trdevice.h>
98#include <linux/stddef.h>
99#include <linux/init.h>
100#include <linux/pci.h>
101#include <linux/spinlock.h>
102#include <linux/bitops.h>
ff5688ae 103#include <linux/jiffies.h>
1da177e4
LT
104
105#include <net/checksum.h>
457c4cbc 106#include <net/net_namespace.h>
1da177e4
LT
107
108#include <asm/io.h>
109#include <asm/system.h>
110
111#include "olympic.h"
112
113/* I've got to put some intelligence into the version number so that Peter and I know
114 * which version of the code somebody has got.
115 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
116 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
117 *
118 * Official releases will only have an a.b.c version number format.
119 */
120
e28e3a61 121static char version[] =
1da177e4
LT
122"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
123
124static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
125 "Address Verification", "Neighbor Notification (Ring Poll)",
126 "Request Parameters","FDX Registration Request",
127 "FDX Duplicate Address Check", "Station registration Query Wait",
128 "Unknown stage"};
129
130static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
131 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
132 "Duplicate Node Address","Request Parameters","Remove Received",
133 "Reserved", "Reserved", "No Monitor Detected for RPL",
134 "Monitor Contention failer for RPL", "FDX Protocol Error"};
135
98a1708d 136/* Module parameters */
1da177e4
LT
137
138MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
139MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
140
141/* Ring Speed 0,4,16,100
142 * 0 = Autosense
143 * 4,16 = Selected speed only, no autosense
144 * This allows the card to be the first on the ring
145 * and become the active monitor.
146 * 100 = Nothing at present, 100mbps is autodetected
147 * if FDX is turned on. May be implemented in the future to
148 * fail if 100mpbs is not detected.
149 *
150 * WARNING: Some hubs will allow you to insert
151 * at the wrong speed
152 */
153
154static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
155module_param_array(ringspeed, int, NULL, 0);
156
157/* Packet buffer size */
158
159static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
160module_param_array(pkt_buf_sz, int, NULL, 0) ;
161
162/* Message Level */
163
164static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
165module_param_array(message_level, int, NULL, 0) ;
166
167/* Change network_monitor to receive mac frames through the arb channel.
168 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
169 * device, i.e. tr0, tr1 etc.
170 * Intended to be used to create a ring-error reporting network module
171 * i.e. it will give you the source address of beaconers on the ring
172 */
173static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
174module_param_array(network_monitor, int, NULL, 0);
175
a3aa1884 176static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
1da177e4
LT
177 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
178 { } /* Terminating Entry */
179};
180MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
181
182
183static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
184static int olympic_init(struct net_device *dev);
185static int olympic_open(struct net_device *dev);
61a84108
SH
186static netdev_tx_t olympic_xmit(struct sk_buff *skb,
187 struct net_device *dev);
1da177e4
LT
188static int olympic_close(struct net_device *dev);
189static void olympic_set_rx_mode(struct net_device *dev);
190static void olympic_freemem(struct net_device *dev) ;
7d12e780 191static irqreturn_t olympic_interrupt(int irq, void *dev_id);
1da177e4
LT
192static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
193static void olympic_arb_cmd(struct net_device *dev);
194static int olympic_change_mtu(struct net_device *dev, int mtu);
195static void olympic_srb_bh(struct net_device *dev) ;
196static void olympic_asb_bh(struct net_device *dev) ;
48e20467 197static const struct file_operations olympic_proc_ops;
1da177e4 198
efda0723
SH
199static const struct net_device_ops olympic_netdev_ops = {
200 .ndo_open = olympic_open,
201 .ndo_stop = olympic_close,
202 .ndo_start_xmit = olympic_xmit,
203 .ndo_change_mtu = olympic_change_mtu,
afc4b13d 204 .ndo_set_rx_mode = olympic_set_rx_mode,
efda0723
SH
205 .ndo_set_mac_address = olympic_set_mac_address,
206};
207
1da177e4
LT
208static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
209{
210 struct net_device *dev ;
211 struct olympic_private *olympic_priv;
212 static int card_no = -1 ;
213 int i ;
214
215 card_no++ ;
216
217 if ((i = pci_enable_device(pdev))) {
218 return i ;
219 }
220
221 pci_set_master(pdev);
222
223 if ((i = pci_request_regions(pdev,"olympic"))) {
224 goto op_disable_dev;
225 }
226
227 dev = alloc_trdev(sizeof(struct olympic_private)) ;
228 if (!dev) {
229 i = -ENOMEM;
6d56ab93 230 goto op_release_dev;
1da177e4
LT
231 }
232
eda10531 233 olympic_priv = netdev_priv(dev) ;
1da177e4
LT
234
235 spin_lock_init(&olympic_priv->olympic_lock) ;
236
237 init_waitqueue_head(&olympic_priv->srb_wait);
238 init_waitqueue_head(&olympic_priv->trb_wait);
239#if OLYMPIC_DEBUG
eda10531 240 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
1da177e4
LT
241#endif
242 dev->irq=pdev->irq;
243 dev->base_addr=pci_resource_start(pdev, 0);
244 olympic_priv->olympic_card_name = pci_name(pdev);
245 olympic_priv->pdev = pdev;
246 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
247 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
248 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
249 goto op_free_iomap;
250 }
251
252 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
253 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
254 else
255 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
256
257 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
258 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
259 olympic_priv->olympic_message_level = message_level[card_no] ;
260 olympic_priv->olympic_network_monitor = network_monitor[card_no];
261
262 if ((i = olympic_init(dev))) {
263 goto op_free_iomap;
264 }
265
efda0723 266 dev->netdev_ops = &olympic_netdev_ops;
1da177e4
LT
267 SET_NETDEV_DEV(dev, &pdev->dev);
268
269 pci_set_drvdata(pdev,dev) ;
270 register_netdev(dev) ;
271 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
272 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
273 char proc_name[20] ;
457c4cbc 274 strcpy(proc_name,"olympic_") ;
1da177e4 275 strcat(proc_name,dev->name) ;
48e20467 276 proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
1da177e4
LT
277 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
278 }
279 return 0 ;
280
281op_free_iomap:
282 if (olympic_priv->olympic_mmio)
283 iounmap(olympic_priv->olympic_mmio);
284 if (olympic_priv->olympic_lap)
285 iounmap(olympic_priv->olympic_lap);
286
1da177e4 287 free_netdev(dev);
6d56ab93 288op_release_dev:
1da177e4
LT
289 pci_release_regions(pdev);
290
291op_disable_dev:
292 pci_disable_device(pdev);
293 return i;
294}
295
e28e3a61 296static int olympic_init(struct net_device *dev)
1da177e4
LT
297{
298 struct olympic_private *olympic_priv;
299 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
300 unsigned long t;
301 unsigned int uaa_addr;
302
eda10531 303 olympic_priv=netdev_priv(dev);
1da177e4
LT
304 olympic_mmio=olympic_priv->olympic_mmio;
305
014e4668 306 printk("%s\n", version);
1da177e4
LT
307 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
308
309 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
310 t=jiffies;
311 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
312 schedule();
ff5688ae 313 if(time_after(jiffies, t + 40*HZ)) {
1da177e4
LT
314 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
315 return -ENODEV;
316 }
317 }
318
319
320 /* Needed for cardbus */
321 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
322 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
323 }
324
325#if OLYMPIC_DEBUG
326 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
327 printk("GPR: %x\n",readw(olympic_mmio+GPR));
328 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
329#endif
330 /* Aaaahhh, You have got to be real careful setting GPR, the card
331 holds the previous values from flash memory, including autosense
332 and ring speed */
333
334 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
335
336 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
337 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
338 if (olympic_priv->olympic_message_level)
339 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
340 } else if (olympic_priv->olympic_ring_speed == 16) {
341 if (olympic_priv->olympic_message_level)
342 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
343 writew(GPR_16MBPS, olympic_mmio+GPR);
344 } else if (olympic_priv->olympic_ring_speed == 4) {
345 if (olympic_priv->olympic_message_level)
346 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
347 writew(0, olympic_mmio+GPR);
348 }
349
350 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
351
352#if OLYMPIC_DEBUG
353 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
354#endif
355 /* Solo has been paused to meet the Cardbus power
356 * specs if the adapter is cardbus. Check to
357 * see its been paused and then restart solo. The
358 * adapter should set the pause bit within 1 second.
359 */
360
361 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
362 t=jiffies;
b710b43c 363 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
1da177e4 364 schedule() ;
ff5688ae 365 if(time_after(jiffies, t + 2*HZ)) {
1da177e4
LT
366 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
367 return -ENODEV;
368 }
369 }
370 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
371 }
372
373 /* start solo init */
374 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
375
376 t=jiffies;
377 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
378 schedule();
ff5688ae 379 if(time_after(jiffies, t + 15*HZ)) {
1da177e4
LT
380 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
381 return -ENODEV;
382 }
383 }
384
385 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
386
387#if OLYMPIC_DEBUG
388 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
389#endif
390
391 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
392
393#if OLYMPIC_DEBUG
394{
395 int i;
396 printk("init_srb(%p): ",init_srb);
397 for(i=0;i<20;i++)
398 printk("%x ",readb(init_srb+i));
399 printk("\n");
400}
401#endif
402 if(readw(init_srb+6)) {
403 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
404 return -ENODEV;
405 }
406
407 if (olympic_priv->olympic_message_level) {
408 if ( readb(init_srb +2) & 0x40) {
409 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
410 } else {
411 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
412 }
413 }
414
415 uaa_addr=swab16(readw(init_srb+8));
416
417#if OLYMPIC_DEBUG
418 printk("UAA resides at %x\n",uaa_addr);
419#endif
420
421 writel(uaa_addr,olympic_mmio+LAPA);
422 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
423
0795af57
JP
424 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
425
1da177e4 426#if OLYMPIC_DEBUG
e174961c 427 printk("adapter address: %pM\n", dev->dev_addr);
1da177e4
LT
428#endif
429
1da177e4
LT
430 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
431 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
432
433 return 0;
434
435}
436
e28e3a61 437static int olympic_open(struct net_device *dev)
1da177e4 438{
eda10531 439 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
440 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
441 unsigned long flags, t;
442 int i, open_finished = 1 ;
443 u8 resp, err;
444
445 DECLARE_WAITQUEUE(wait,current) ;
446
447 olympic_init(dev);
448
dddcb445
JL
449 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
450 dev))
1da177e4 451 return -EAGAIN;
1da177e4
LT
452
453#if OLYMPIC_DEBUG
454 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
455 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
456#endif
457
458 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
459
460 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
461
462 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
463
464 /* adapter is closed, so SRB is pointed to by LAPWWO */
465
466 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
467 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
468
469#if OLYMPIC_DEBUG
470 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
471 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
014e4668 472 printk("Before the open command\n");
1da177e4
LT
473#endif
474 do {
475 memset_io(init_srb,0,SRB_COMMAND_SIZE);
476
477 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
478 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
479
480 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
481 if (olympic_priv->olympic_network_monitor)
482 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
483 else
484 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
485
486 /* Test OR of first 3 bytes as its totally possible for
487 * someone to set the first 2 bytes to be zero, although this
488 * is an error, the first byte must have bit 6 set to 1 */
489
490 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
491 writeb(olympic_priv->olympic_laa[0],init_srb+12);
492 writeb(olympic_priv->olympic_laa[1],init_srb+13);
493 writeb(olympic_priv->olympic_laa[2],init_srb+14);
494 writeb(olympic_priv->olympic_laa[3],init_srb+15);
495 writeb(olympic_priv->olympic_laa[4],init_srb+16);
496 writeb(olympic_priv->olympic_laa[5],init_srb+17);
497 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
498 }
499 writeb(1,init_srb+30);
500
501 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
502 olympic_priv->srb_queued=1;
503
504 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
505 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
506
507 t = jiffies ;
508
509 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
510 set_current_state(TASK_INTERRUPTIBLE) ;
511
512 while(olympic_priv->srb_queued) {
513 schedule() ;
514 if(signal_pending(current)) {
515 printk(KERN_WARNING "%s: Signal received in open.\n",
516 dev->name);
517 printk(KERN_WARNING "SISR=%x LISR=%x\n",
518 readl(olympic_mmio+SISR),
519 readl(olympic_mmio+LISR));
520 olympic_priv->srb_queued=0;
521 break;
522 }
ff5688ae 523 if (time_after(jiffies, t + 10*HZ)) {
014e4668 524 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1da177e4
LT
525 olympic_priv->srb_queued=0;
526 break ;
527 }
528 set_current_state(TASK_INTERRUPTIBLE) ;
529 }
530 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
531 set_current_state(TASK_RUNNING) ;
532 olympic_priv->srb_queued = 0 ;
533#if OLYMPIC_DEBUG
534 printk("init_srb(%p): ",init_srb);
535 for(i=0;i<20;i++)
536 printk("%02x ",readb(init_srb+i));
537 printk("\n");
538#endif
539
540 /* If we get the same return response as we set, the interrupt wasn't raised and the open
541 * timed out.
542 */
543
544 switch (resp = readb(init_srb+2)) {
545 case OLYMPIC_CLEAR_RET_CODE:
546 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
547 goto out;
548 case 0:
549 open_finished = 1;
550 break;
551 case 0x07:
552 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
014e4668 553 printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
1da177e4
LT
554 open_finished = 0 ;
555 continue;
556 }
557
558 err = readb(init_srb+7);
559
560 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
561 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
014e4668 562 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
1da177e4
LT
563 } else {
564 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
565 open_maj_error[(err & 0xf0) >> 4],
566 open_min_error[(err & 0x0f)]);
567 }
568 goto out;
569
570 case 0x32:
e174961c
JB
571 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
572 dev->name, olympic_priv->olympic_laa);
1da177e4
LT
573 goto out;
574
575 default:
576 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
577 goto out;
578
579 }
580 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
581
582 if (readb(init_srb+18) & (1<<3))
583 if (olympic_priv->olympic_message_level)
584 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
585
586 if (readb(init_srb+18) & (1<<1))
587 olympic_priv->olympic_ring_speed = 100 ;
588 else if (readb(init_srb+18) & 1)
589 olympic_priv->olympic_ring_speed = 16 ;
590 else
591 olympic_priv->olympic_ring_speed = 4 ;
592
593 if (olympic_priv->olympic_message_level)
594 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
595
596 olympic_priv->asb = swab16(readw(init_srb+8));
597 olympic_priv->srb = swab16(readw(init_srb+10));
598 olympic_priv->arb = swab16(readw(init_srb+12));
599 olympic_priv->trb = swab16(readw(init_srb+16));
600
601 olympic_priv->olympic_receive_options = 0x01 ;
602 olympic_priv->olympic_copy_all_options = 0 ;
603
604 /* setup rx ring */
605
606 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
607
608 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
609
610 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
611
612 struct sk_buff *skb;
613
614 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
615 if(skb == NULL)
616 break;
617
618 skb->dev = dev;
619
620 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
621 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
622 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
623 olympic_priv->rx_ring_skb[i]=skb;
624 }
625
626 if (i==0) {
627 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
628 goto out;
629 }
630
631 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
632 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
633 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
634 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
635 writew(i, olympic_mmio+RXDESCQCNT);
636
637 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
638 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
639 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
640 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
641
642 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
643 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
644
645 writew(i, olympic_mmio+RXSTATQCNT);
646
647#if OLYMPIC_DEBUG
648 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
649 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
650 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
651 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
652 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
653
654 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
655 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
656 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
657#endif
658
659 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
660
661#if OLYMPIC_DEBUG
662 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
663 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
664 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
665#endif
666
667 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
668
669 /* setup tx ring */
670
671 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
672 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
b710b43c 673 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
1da177e4
LT
674
675 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
676 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
677 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
678 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
679 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
680 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
681
682 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
683 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
684 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
685 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
686 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
687
688 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
689 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
690
691 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
692 writel(0,olympic_mmio+EISR) ;
693 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
694 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
695
696#if OLYMPIC_DEBUG
697 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
698 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
699#endif
700
701 if (olympic_priv->olympic_network_monitor) {
0795af57
JP
702 u8 __iomem *oat;
703 u8 __iomem *opt;
0795af57 704 u8 addr[6];
0795af57
JP
705 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
706 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
707
708 for (i = 0; i < 6; i++)
709 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
e174961c 710 printk("%s: Node Address: %pM\n", dev->name, addr);
1da177e4
LT
711 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
714 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
715 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
0795af57
JP
716
717 for (i = 0; i < 6; i++)
718 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
e174961c 719 printk("%s: NAUN Address: %pM\n", dev->name, addr);
1da177e4
LT
720 }
721
722 netif_start_queue(dev);
723 return 0;
724
725out:
726 free_irq(dev->irq, dev);
727 return -EIO;
728}
729
730/*
731 * When we enter the rx routine we do not know how many frames have been
732 * queued on the rx channel. Therefore we start at the next rx status
733 * position and travel around the receive ring until we have completed
734 * all the frames.
735 *
736 * This means that we may process the frame before we receive the end
737 * of frame interrupt. This is why we always test the status instead
738 * of blindly processing the next frame.
739 *
740 * We also remove the last 4 bytes from the packet as well, these are
741 * just token ring trailer info and upset protocols that don't check
742 * their own length, i.e. SNA.
743 *
744 */
745static void olympic_rx(struct net_device *dev)
746{
eda10531 747 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
748 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
749 struct olympic_rx_status *rx_status;
750 struct olympic_rx_desc *rx_desc ;
751 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
752 struct sk_buff *skb, *skb2;
753 int i;
754
755 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
756
757 while (rx_status->status_buffercnt) {
758 u32 l_status_buffercnt;
759
760 olympic_priv->rx_status_last_received++ ;
761 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
762#if OLYMPIC_DEBUG
014e4668 763 printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
1da177e4
LT
764#endif
765 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
766 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
767 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
768 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
769
770#if OLYMPIC_DEBUG
771 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
772#endif
773 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
774 if(l_status_buffercnt & 0xC0000000) {
775 if (l_status_buffercnt & 0x3B000000) {
776 if (olympic_priv->olympic_message_level) {
777 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
014e4668 778 printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
1da177e4 779 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
014e4668 780 printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
1da177e4 781 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
014e4668 782 printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
1da177e4 783 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
014e4668 784 printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
1da177e4 785 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
014e4668 786 printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
1da177e4
LT
787 }
788 olympic_priv->rx_ring_last_received += i ;
789 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
dcc59a97 790 dev->stats.rx_errors++;
1da177e4
LT
791 } else {
792
793 if (buffer_cnt == 1) {
794 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
795 } else {
796 skb = dev_alloc_skb(length) ;
797 }
798
799 if (skb == NULL) {
014e4668 800 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
dcc59a97 801 dev->stats.rx_dropped++;
1da177e4
LT
802 /* Update counters even though we don't transfer the frame */
803 olympic_priv->rx_ring_last_received += i ;
804 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
805 } else {
1da177e4
LT
806 /* Optimise based upon number of buffers used.
807 If only one buffer is used we can simply swap the buffers around.
808 If more than one then we must use the new buffer and copy the information
809 first. Ideally all frames would be in a single buffer, this can be tuned by
810 altering the buffer size. If the length of the packet is less than
811 1500 bytes we're going to copy it over anyway to stop packets getting
812 dropped from sockets with buffers smaller than our pkt_buf_sz. */
813
814 if (buffer_cnt==1) {
815 olympic_priv->rx_ring_last_received++ ;
816 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
817 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
818 if (length > 1500) {
819 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
820 /* unmap buffer */
821 pci_unmap_single(olympic_priv->pdev,
822 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
823 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
824 skb_put(skb2,length-4);
825 skb2->protocol = tr_type_trans(skb2,dev);
826 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
827 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
828 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
829 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
830 cpu_to_le32(olympic_priv->pkt_buf_sz);
831 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
832 netif_rx(skb2) ;
833 } else {
834 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
835 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
836 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
d626f62b
ACM
837 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
838 skb_put(skb,length - 4),
839 length - 4);
1da177e4
LT
840 pci_dma_sync_single_for_device(olympic_priv->pdev,
841 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
842 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
843 skb->protocol = tr_type_trans(skb,dev) ;
844 netif_rx(skb) ;
845 }
846 } else {
847 do { /* Walk the buffers */
848 olympic_priv->rx_ring_last_received++ ;
849 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
850 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
851 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
852 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
853 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
854 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
855 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
d626f62b
ACM
856 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
857 skb_put(skb, cpy_length),
858 cpy_length);
1da177e4
LT
859 pci_dma_sync_single_for_device(olympic_priv->pdev,
860 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
861 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
862 } while (--i) ;
863 skb_trim(skb,skb->len-4) ;
864 skb->protocol = tr_type_trans(skb,dev);
865 netif_rx(skb) ;
866 }
dcc59a97
SH
867 dev->stats.rx_packets++ ;
868 dev->stats.rx_bytes += length ;
1da177e4
LT
869 } /* if skb == null */
870 } /* If status & 0x3b */
871
872 } else { /*if buffercnt & 0xC */
873 olympic_priv->rx_ring_last_received += i ;
874 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
875 }
876
877 rx_status->fragmentcnt_framelen = 0 ;
878 rx_status->status_buffercnt = 0 ;
879 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
880
881 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
882 } /* while */
883
884}
885
886static void olympic_freemem(struct net_device *dev)
887{
eda10531 888 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
889 int i;
890
891 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
892 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
893 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
894 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
895 }
b710b43c 896 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
1da177e4
LT
897 pci_unmap_single(olympic_priv->pdev,
898 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
899 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
900 }
901 olympic_priv->rx_status_last_received++;
902 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
903 }
904 /* unmap rings */
905 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
906 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
907 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
908 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
909
910 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
911 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
912 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
913 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
914
915 return ;
916}
917
7d12e780 918static irqreturn_t olympic_interrupt(int irq, void *dev_id)
1da177e4
LT
919{
920 struct net_device *dev= (struct net_device *)dev_id;
eda10531 921 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
922 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
923 u32 sisr;
924 u8 __iomem *adapter_check_area ;
925
926 /*
927 * Read sisr but don't reset it yet.
928 * The indication bit may have been set but the interrupt latch
929 * bit may not be set, so we'd lose the interrupt later.
930 */
931 sisr=readl(olympic_mmio+SISR) ;
932 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
933 return IRQ_NONE;
934 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
935
936 spin_lock(&olympic_priv->olympic_lock);
937
938 /* Hotswap gives us this on removal */
939 if (sisr == 0xffffffff) {
940 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
941 spin_unlock(&olympic_priv->olympic_lock) ;
942 return IRQ_NONE;
943 }
944
945 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
946 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
947
948 /* If we ever get this the adapter is seriously dead. Only a reset is going to
949 * bring it back to life. We're talking pci bus errors and such like :( */
950 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
951 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
952 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
953 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
954 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
955 wake_up_interruptible(&olympic_priv->srb_wait);
956 spin_unlock(&olympic_priv->olympic_lock) ;
957 return IRQ_HANDLED;
958 } /* SISR_ERR */
959
960 if(sisr & SISR_SRB_REPLY) {
961 if(olympic_priv->srb_queued==1) {
962 wake_up_interruptible(&olympic_priv->srb_wait);
963 } else if (olympic_priv->srb_queued==2) {
964 olympic_srb_bh(dev) ;
965 }
966 olympic_priv->srb_queued=0;
967 } /* SISR_SRB_REPLY */
968
969 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
970 we get all tx completions. */
971 if (sisr & SISR_TX1_EOF) {
972 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
973 olympic_priv->tx_ring_last_status++;
974 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
975 olympic_priv->free_tx_ring_entries++;
dcc59a97
SH
976 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
977 dev->stats.tx_packets++ ;
1da177e4
LT
978 pci_unmap_single(olympic_priv->pdev,
979 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
980 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
981 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
b710b43c 982 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
1da177e4
LT
983 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
984 }
985 netif_wake_queue(dev);
986 } /* SISR_TX1_EOF */
987
988 if (sisr & SISR_RX_STATUS) {
989 olympic_rx(dev);
990 } /* SISR_RX_STATUS */
991
992 if (sisr & SISR_ADAPTER_CHECK) {
993 netif_stop_queue(dev);
994 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
995 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
996 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
997 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
998 spin_unlock(&olympic_priv->olympic_lock) ;
999 return IRQ_HANDLED;
1000 } /* SISR_ADAPTER_CHECK */
1001
1002 if (sisr & SISR_ASB_FREE) {
1003 /* Wake up anything that is waiting for the asb response */
1004 if (olympic_priv->asb_queued) {
1005 olympic_asb_bh(dev) ;
1006 }
1007 } /* SISR_ASB_FREE */
1008
1009 if (sisr & SISR_ARB_CMD) {
1010 olympic_arb_cmd(dev) ;
1011 } /* SISR_ARB_CMD */
1012
1013 if (sisr & SISR_TRB_REPLY) {
1014 /* Wake up anything that is waiting for the trb response */
1015 if (olympic_priv->trb_queued) {
1016 wake_up_interruptible(&olympic_priv->trb_wait);
1017 }
1018 olympic_priv->trb_queued = 0 ;
1019 } /* SISR_TRB_REPLY */
1020
1021 if (sisr & SISR_RX_NOBUF) {
1022 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1023 /var/log/messages. */
1024 } /* SISR_RX_NOBUF */
1025 } else {
1026 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1027 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1028 } /* One if the interrupts we want */
1029 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1030
1031 spin_unlock(&olympic_priv->olympic_lock) ;
1032 return IRQ_HANDLED;
1033}
1034
61a84108
SH
1035static netdev_tx_t olympic_xmit(struct sk_buff *skb,
1036 struct net_device *dev)
1da177e4 1037{
eda10531 1038 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
1039 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1040 unsigned long flags ;
1041
1042 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1043
1044 netif_stop_queue(dev);
1045
1046 if(olympic_priv->free_tx_ring_entries) {
1047 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1048 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1049 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1050 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1051 olympic_priv->free_tx_ring_entries--;
1052
1053 olympic_priv->tx_ring_free++;
1054 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1055 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1056 netif_wake_queue(dev);
1057 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
6ed10654 1058 return NETDEV_TX_OK;
1da177e4
LT
1059 } else {
1060 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
5b548140 1061 return NETDEV_TX_BUSY;
1da177e4
LT
1062 }
1063
1064}
1065
1066
1067static int olympic_close(struct net_device *dev)
1068{
eda10531 1069 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
1070 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1071 unsigned long t,flags;
1072
1073 DECLARE_WAITQUEUE(wait,current) ;
1074
1075 netif_stop_queue(dev);
1076
1077 writel(olympic_priv->srb,olympic_mmio+LAPA);
1078 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1079
1080 writeb(SRB_CLOSE_ADAPTER,srb+0);
1081 writeb(0,srb+1);
1082 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1083
1084 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1085 set_current_state(TASK_INTERRUPTIBLE) ;
1086
1087 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1088 olympic_priv->srb_queued=1;
1089
1090 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1091 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1092
1093 while(olympic_priv->srb_queued) {
1094
3173c890 1095 t = schedule_timeout_interruptible(60*HZ);
1da177e4
LT
1096
1097 if(signal_pending(current)) {
1098 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1099 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1100 olympic_priv->srb_queued=0;
1101 break;
1102 }
1103
1104 if (t == 0) {
014e4668 1105 printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
1da177e4
LT
1106 }
1107 olympic_priv->srb_queued=0;
1108 }
1109 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1110
1111 olympic_priv->rx_status_last_received++;
1112 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1113
1114 olympic_freemem(dev) ;
1115
1116 /* reset tx/rx fifo's and busmaster logic */
1117
1118 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1119 udelay(1);
1120 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1121
1122#if OLYMPIC_DEBUG
1123 {
1124 int i ;
1125 printk("srb(%p): ",srb);
1126 for(i=0;i<4;i++)
1127 printk("%x ",readb(srb+i));
1128 printk("\n");
1129 }
1130#endif
1131 free_irq(dev->irq,dev);
1132
1133 return 0;
1134
1135}
1136
1137static void olympic_set_rx_mode(struct net_device *dev)
1138{
eda10531 1139 struct olympic_private *olympic_priv = netdev_priv(dev);
1da177e4
LT
1140 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1141 u8 options = 0;
1142 u8 __iomem *srb;
22bedad3 1143 struct netdev_hw_addr *ha;
1da177e4 1144 unsigned char dev_mc_address[4] ;
1da177e4
LT
1145
1146 writel(olympic_priv->srb,olympic_mmio+LAPA);
1147 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1148 options = olympic_priv->olympic_copy_all_options;
1149
1150 if (dev->flags&IFF_PROMISC)
1151 options |= 0x61 ;
1152 else
1153 options &= ~0x61 ;
1154
1155 /* Only issue the srb if there is a change in options */
1156
1157 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1158
1159 /* Now to issue the srb command to alter the copy.all.options */
1160
1161 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1162 writeb(0,srb+1);
1163 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1164 writeb(0,srb+3);
1165 writeb(olympic_priv->olympic_receive_options,srb+4);
1166 writeb(options,srb+5);
1167
1168 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1169
1170 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1171
1172 olympic_priv->olympic_copy_all_options = options ;
1173
1174 return ;
1175 }
1176
1177 /* Set the functional addresses we need for multicast */
1178
1179 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1180
22bedad3
JP
1181 netdev_for_each_mc_addr(ha, dev) {
1182 dev_mc_address[0] |= ha->addr[2];
1183 dev_mc_address[1] |= ha->addr[3];
1184 dev_mc_address[2] |= ha->addr[4];
1185 dev_mc_address[3] |= ha->addr[5];
1da177e4
LT
1186 }
1187
1188 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1189 writeb(0,srb+1);
1190 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1191 writeb(0,srb+3);
1192 writeb(0,srb+4);
1193 writeb(0,srb+5);
1194 writeb(dev_mc_address[0],srb+6);
1195 writeb(dev_mc_address[1],srb+7);
1196 writeb(dev_mc_address[2],srb+8);
1197 writeb(dev_mc_address[3],srb+9);
1198
1199 olympic_priv->srb_queued = 2 ;
1200 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1201
1202}
1203
1204static void olympic_srb_bh(struct net_device *dev)
1205{
eda10531 1206 struct olympic_private *olympic_priv = netdev_priv(dev);
1da177e4
LT
1207 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1208 u8 __iomem *srb;
1209
1210 writel(olympic_priv->srb,olympic_mmio+LAPA);
1211 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1212
1213 switch (readb(srb)) {
1214
1215 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1216 * At some point we should do something if we get an error, such as
1217 * resetting the IFF_PROMISC flag in dev
1218 */
1219
1220 case SRB_MODIFY_RECEIVE_OPTIONS:
1221 switch (readb(srb+2)) {
1222 case 0x01:
1223 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1224 break ;
1225 case 0x04:
1226 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1227 break ;
1228 default:
1229 if (olympic_priv->olympic_message_level)
1230 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1231 break ;
1232 } /* switch srb[2] */
1233 break ;
1234
1235 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1236 */
1237
1238 case SRB_SET_GROUP_ADDRESS:
1239 switch (readb(srb+2)) {
1240 case 0x00:
1241 break ;
1242 case 0x01:
014e4668 1243 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1da177e4
LT
1244 break ;
1245 case 0x04:
1246 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1247 break ;
1248 case 0x3c:
1249 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1250 break ;
1251 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1252 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1253 break ;
1254 case 0x55:
1255 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1256 break ;
1257 default:
1258 break ;
1259 } /* switch srb[2] */
1260 break ;
1261
1262 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1263 */
1264
1265 case SRB_RESET_GROUP_ADDRESS:
1266 switch (readb(srb+2)) {
1267 case 0x00:
1268 break ;
1269 case 0x01:
014e4668 1270 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1da177e4
LT
1271 break ;
1272 case 0x04:
1273 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1274 break ;
1275 case 0x39: /* Must deal with this if individual multicast addresses used */
014e4668 1276 printk(KERN_INFO "%s: Group address not found\n",dev->name);
1da177e4
LT
1277 break ;
1278 default:
1279 break ;
1280 } /* switch srb[2] */
1281 break ;
1282
1283
1284 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1285 */
1286
1287 case SRB_SET_FUNC_ADDRESS:
1288 switch (readb(srb+2)) {
1289 case 0x00:
1290 if (olympic_priv->olympic_message_level)
014e4668 1291 printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
1da177e4
LT
1292 break ;
1293 case 0x01:
014e4668 1294 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1da177e4
LT
1295 break ;
1296 case 0x04:
1297 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1298 break ;
1299 default:
1300 break ;
1301 } /* switch srb[2] */
1302 break ;
1303
1304 /* SRB_READ_LOG - Read and reset the adapter error counters
1305 */
1306
1307 case SRB_READ_LOG:
1308 switch (readb(srb+2)) {
1309 case 0x00:
1310 if (olympic_priv->olympic_message_level)
1311 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1312 break ;
1313 case 0x01:
014e4668 1314 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1da177e4
LT
1315 break ;
1316 case 0x04:
1317 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1318 break ;
1319
1320 } /* switch srb[2] */
1321 break ;
1322
1323 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1324
1325 case SRB_READ_SR_COUNTERS:
1326 switch (readb(srb+2)) {
1327 case 0x00:
1328 if (olympic_priv->olympic_message_level)
1329 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1330 break ;
1331 case 0x01:
014e4668 1332 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1da177e4
LT
1333 break ;
1334 case 0x04:
1335 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1336 break ;
1337 default:
1338 break ;
1339 } /* switch srb[2] */
1340 break ;
1341
1342 default:
1343 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1344 break ;
1345 } /* switch srb[0] */
1346
1347}
1348
1da177e4
LT
1349static int olympic_set_mac_address (struct net_device *dev, void *addr)
1350{
1351 struct sockaddr *saddr = addr ;
eda10531 1352 struct olympic_private *olympic_priv = netdev_priv(dev);
1da177e4
LT
1353
1354 if (netif_running(dev)) {
1355 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1356 return -EIO ;
1357 }
1358
1359 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1360
1361 if (olympic_priv->olympic_message_level) {
1362 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1363 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1364 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1365 olympic_priv->olympic_laa[5]);
1366 }
1367
1368 return 0 ;
1369}
1370
1371static void olympic_arb_cmd(struct net_device *dev)
1372{
eda10531 1373 struct olympic_private *olympic_priv = netdev_priv(dev);
1da177e4
LT
1374 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1375 u8 __iomem *arb_block, *asb_block, *srb ;
1376 u8 header_len ;
1377 u16 frame_len, buffer_len ;
1378 struct sk_buff *mac_frame ;
1379 u8 __iomem *buf_ptr ;
1380 u8 __iomem *frame_data ;
1381 u16 buff_off ;
1382 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1383 u8 fdx_prot_error ;
1384 u16 next_ptr;
1385
1386 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1387 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1388 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1389
1390 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1391
1392 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1393 frame_len = swab16(readw(arb_block + 10)) ;
1394
1395 buff_off = swab16(readw(arb_block + 6)) ;
1396
1397 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1398
1399#if OLYMPIC_DEBUG
1400{
1401 int i;
1402 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1403
1404 for (i=0 ; i < 14 ; i++) {
1405 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1406 }
1407
014e4668 1408 printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1da177e4
LT
1409}
1410#endif
1411 mac_frame = dev_alloc_skb(frame_len) ;
1412 if (!mac_frame) {
1413 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1414 goto drop_frame;
1415 }
1416
1417 /* Walk the buffer chain, creating the frame */
1418
1419 do {
1420 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1421 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1422 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1423 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
b710b43c 1424 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1da177e4 1425
c1a4b86e
ACM
1426 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1427
1da177e4 1428 if (olympic_priv->olympic_network_monitor) {
0795af57 1429 struct trh_hdr *mac_hdr;
014e4668 1430 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
c1a4b86e 1431 mac_hdr = tr_hdr(mac_frame);
e174961c
JB
1432 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1433 dev->name, mac_hdr->daddr);
1434 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1435 dev->name, mac_hdr->saddr);
1da177e4 1436 }
c1a4b86e 1437 netif_rx(mac_frame);
1da177e4
LT
1438
1439drop_frame:
1440 /* Now tell the card we have dealt with the received frame */
1441
1442 /* Set LISR Bit 1 */
1443 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1444
1445 /* Is the ASB free ? */
1446
1447 if (readb(asb_block + 2) != 0xff) {
1448 olympic_priv->asb_queued = 1 ;
1449 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1450 return ;
1451 /* Drop out and wait for the bottom half to be run */
1452 }
1453
1454 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1455 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1456 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1457 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1458
1459 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1460
1461 olympic_priv->asb_queued = 2 ;
1462
1463 return ;
1464
1465 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1466 lan_status = swab16(readw(arb_block+6));
1467 fdx_prot_error = readb(arb_block+8) ;
1468
1469 /* Issue ARB Free */
1470 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1471
1472 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1473
1474 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1475 if (lan_status_diff & LSC_LWF)
1476 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1477 if (lan_status_diff & LSC_ARW)
1478 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1479 if (lan_status_diff & LSC_FPE)
1480 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1481 if (lan_status_diff & LSC_RR)
1482 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1483
1484 /* Adapter has been closed by the hardware */
1485
1486 /* reset tx/rx fifo's and busmaster logic */
1487
1488 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1489 udelay(1);
1490 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1491 netif_stop_queue(dev);
1492 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
014e4668 1493 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1da177e4
LT
1494 } /* If serious error */
1495
1496 if (olympic_priv->olympic_message_level) {
1497 if (lan_status_diff & LSC_SIG_LOSS)
014e4668 1498 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1da177e4 1499 if (lan_status_diff & LSC_HARD_ERR)
014e4668 1500 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1da177e4 1501 if (lan_status_diff & LSC_SOFT_ERR)
014e4668 1502 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1da177e4 1503 if (lan_status_diff & LSC_TRAN_BCN)
9814290a 1504 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1da177e4 1505 if (lan_status_diff & LSC_SS)
014e4668 1506 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1da177e4
LT
1507 if (lan_status_diff & LSC_RING_REC)
1508 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1509 if (lan_status_diff & LSC_FDX_MODE)
1510 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1511 }
1512
1513 if (lan_status_diff & LSC_CO) {
1514
1515 if (olympic_priv->olympic_message_level)
014e4668 1516 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1da177e4
LT
1517
1518 /* Issue READ.LOG command */
1519
1520 writeb(SRB_READ_LOG, srb);
1521 writeb(0,srb+1);
1522 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1523 writeb(0,srb+3);
1524 writeb(0,srb+4);
1525 writeb(0,srb+5);
1526
1527 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1528
1529 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1530
1531 }
1532
1533 if (lan_status_diff & LSC_SR_CO) {
1534
1535 if (olympic_priv->olympic_message_level)
1536 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1537
1538 /* Issue a READ.SR.COUNTERS */
1539
1540 writeb(SRB_READ_SR_COUNTERS,srb);
1541 writeb(0,srb+1);
1542 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1543 writeb(0,srb+3);
1544
1545 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1546
1547 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1548
1549 }
1550
1551 olympic_priv->olympic_lan_status = lan_status ;
1552
1553 } /* Lan.change.status */
1554 else
014e4668 1555 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1da177e4
LT
1556}
1557
1558static void olympic_asb_bh(struct net_device *dev)
1559{
eda10531 1560 struct olympic_private *olympic_priv = netdev_priv(dev);
1da177e4
LT
1561 u8 __iomem *arb_block, *asb_block ;
1562
1563 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1564 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1565
1566 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1567
1568 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1569 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1570 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1571 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1572
1573 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1574 olympic_priv->asb_queued = 2 ;
1575
1576 return ;
1577 }
1578
1579 if (olympic_priv->asb_queued == 2) {
1580 switch (readb(asb_block+2)) {
1581 case 0x01:
014e4668 1582 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1da177e4
LT
1583 break ;
1584 case 0x26:
014e4668 1585 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1da177e4
LT
1586 break ;
1587 case 0xFF:
1588 /* Valid response, everything should be ok again */
1589 break ;
1590 default:
1591 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1592 break ;
1593 }
1594 }
1595 olympic_priv->asb_queued = 0 ;
1596}
1597
1598static int olympic_change_mtu(struct net_device *dev, int mtu)
1599{
eda10531 1600 struct olympic_private *olympic_priv = netdev_priv(dev);
1da177e4
LT
1601 u16 max_mtu ;
1602
1603 if (olympic_priv->olympic_ring_speed == 4)
1604 max_mtu = 4500 ;
1605 else
1606 max_mtu = 18000 ;
1607
1608 if (mtu > max_mtu)
1609 return -EINVAL ;
1610 if (mtu < 100)
1611 return -EINVAL ;
1612
1613 dev->mtu = mtu ;
1614 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1615
1616 return 0 ;
1617}
1618
48e20467 1619static int olympic_proc_show(struct seq_file *m, void *v)
1da177e4 1620{
48e20467 1621 struct net_device *dev = m->private;
eda10531 1622 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
1623 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1624 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
0795af57
JP
1625 u8 addr[6];
1626 u8 addr2[6];
1627 int i;
0795af57 1628
48e20467 1629 seq_printf(m,
1da177e4 1630 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
48e20467 1631 seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1da177e4
LT
1632 dev->name);
1633
0795af57
JP
1634 for (i = 0 ; i < 6 ; i++)
1635 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1636
48e20467 1637 seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1da177e4 1638 dev->name,
e174961c 1639 dev->dev_addr, addr,
1da177e4
LT
1640 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1641 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1643 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1644
48e20467 1645 seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1da177e4 1646
48e20467 1647 seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1da177e4 1648 dev->name) ;
0795af57
JP
1649
1650 for (i = 0 ; i < 6 ; i++)
1651 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1652 for (i = 0 ; i < 6 ; i++)
1653 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1654
48e20467 1655 seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1da177e4
LT
1656 dev->name,
1657 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1658 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1659 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1660 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
e174961c 1661 addr, addr2,
1da177e4
LT
1662 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1663 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1664 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1665
48e20467 1666 seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1da177e4
LT
1667 dev->name) ;
1668
0795af57
JP
1669 for (i = 0 ; i < 6 ; i++)
1670 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
48e20467 1671 seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
e174961c 1672 dev->name, addr,
1da177e4
LT
1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1678 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1679
48e20467 1680 seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1da177e4
LT
1681 dev->name) ;
1682
0795af57
JP
1683 for (i = 0 ; i < 6 ; i++)
1684 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
48e20467 1685 seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1da177e4
LT
1686 dev->name,
1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1688 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
e174961c 1689 addr,
1da177e4
LT
1690 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1691 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1693 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1694
48e20467 1695 return 0;
1da177e4
LT
1696}
1697
48e20467
AD
1698static int olympic_proc_open(struct inode *inode, struct file *file)
1699{
1700 return single_open(file, olympic_proc_show, PDE(inode)->data);
1701}
1702
1703static const struct file_operations olympic_proc_ops = {
1704 .open = olympic_proc_open,
1705 .read = seq_read,
1706 .llseek = seq_lseek,
1707 .release = single_release,
1708};
1709
1da177e4
LT
1710static void __devexit olympic_remove_one(struct pci_dev *pdev)
1711{
1712 struct net_device *dev = pci_get_drvdata(pdev) ;
eda10531 1713 struct olympic_private *olympic_priv=netdev_priv(dev);
1da177e4
LT
1714
1715 if (olympic_priv->olympic_network_monitor) {
1716 char proc_name[20] ;
457c4cbc 1717 strcpy(proc_name,"olympic_") ;
1da177e4 1718 strcat(proc_name,dev->name) ;
457c4cbc 1719 remove_proc_entry(proc_name,init_net.proc_net);
1da177e4
LT
1720 }
1721 unregister_netdev(dev) ;
1722 iounmap(olympic_priv->olympic_mmio) ;
1723 iounmap(olympic_priv->olympic_lap) ;
1724 pci_release_regions(pdev) ;
1725 pci_set_drvdata(pdev,NULL) ;
1726 free_netdev(dev) ;
1727}
1728
1729static struct pci_driver olympic_driver = {
1730 .name = "olympic",
1731 .id_table = olympic_pci_tbl,
1732 .probe = olympic_probe,
1733 .remove = __devexit_p(olympic_remove_one),
1734};
1735
1736static int __init olympic_pci_init(void)
1737{
83717cf0 1738 return pci_register_driver(&olympic_driver) ;
1da177e4
LT
1739}
1740
1741static void __exit olympic_pci_cleanup(void)
1742{
1743 pci_unregister_driver(&olympic_driver) ;
1744}
1745
1746
1747module_init(olympic_pci_init) ;
1748module_exit(olympic_pci_cleanup) ;
1749
1750MODULE_LICENSE("GPL");