]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/ethernet/alteon/acenic.c
ethernet: use net core MTU range checking in more drivers
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / alteon / acenic.c
1 /*
2 * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
3 * and other Tigon based cards.
4 *
5 * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
6 *
7 * Thanks to Alteon and 3Com for providing hardware and documentation
8 * enabling me to write this driver.
9 *
10 * A mailing list for discussing the use of this driver has been
11 * setup, please subscribe to the lists if you have any questions
12 * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
13 * see how to subscribe.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * Additional credits:
21 * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
22 * dump support. The trace dump support has not been
23 * integrated yet however.
24 * Troy Benjegerdes: Big Endian (PPC) patches.
25 * Nate Stahl: Better out of memory handling and stats support.
26 * Aman Singla: Nasty race between interrupt handler and tx code dealing
27 * with 'testing the tx_ret_csm and setting tx_full'
28 * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
29 * infrastructure and Sparc support
30 * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
31 * driver under Linux/Sparc64
32 * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
33 * ETHTOOL_GDRVINFO support
34 * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
35 * handler and close() cleanup.
36 * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
37 * memory mapped IO is enabled to
38 * make the driver work on RS/6000.
39 * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
40 * where the driver would disable
41 * bus master mode if it had to disable
42 * write and invalidate.
43 * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
44 * endian systems.
45 * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
46 * rx producer index when
47 * flushing the Jumbo ring.
48 * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
49 * driver init path.
50 * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
51 */
52
53 #include <linux/module.h>
54 #include <linux/moduleparam.h>
55 #include <linux/types.h>
56 #include <linux/errno.h>
57 #include <linux/ioport.h>
58 #include <linux/pci.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/kernel.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/skbuff.h>
64 #include <linux/delay.h>
65 #include <linux/mm.h>
66 #include <linux/highmem.h>
67 #include <linux/sockios.h>
68 #include <linux/firmware.h>
69 #include <linux/slab.h>
70 #include <linux/prefetch.h>
71 #include <linux/if_vlan.h>
72
73 #ifdef SIOCETHTOOL
74 #include <linux/ethtool.h>
75 #endif
76
77 #include <net/sock.h>
78 #include <net/ip.h>
79
80 #include <asm/io.h>
81 #include <asm/irq.h>
82 #include <asm/byteorder.h>
83 #include <asm/uaccess.h>
84
85
86 #define DRV_NAME "acenic"
87
88 #undef INDEX_DEBUG
89
90 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
91 #define ACE_IS_TIGON_I(ap) 0
92 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
93 #else
94 #define ACE_IS_TIGON_I(ap) (ap->version == 1)
95 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
96 #endif
97
98 #ifndef PCI_VENDOR_ID_ALTEON
99 #define PCI_VENDOR_ID_ALTEON 0x12ae
100 #endif
101 #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
102 #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
103 #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
104 #endif
105 #ifndef PCI_DEVICE_ID_3COM_3C985
106 #define PCI_DEVICE_ID_3COM_3C985 0x0001
107 #endif
108 #ifndef PCI_VENDOR_ID_NETGEAR
109 #define PCI_VENDOR_ID_NETGEAR 0x1385
110 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
111 #endif
112 #ifndef PCI_DEVICE_ID_NETGEAR_GA620T
113 #define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
114 #endif
115
116
117 /*
118 * Farallon used the DEC vendor ID by mistake and they seem not
119 * to care - stinky!
120 */
121 #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
122 #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
123 #endif
124 #ifndef PCI_DEVICE_ID_FARALLON_PN9100T
125 #define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
126 #endif
127 #ifndef PCI_VENDOR_ID_SGI
128 #define PCI_VENDOR_ID_SGI 0x10a9
129 #endif
130 #ifndef PCI_DEVICE_ID_SGI_ACENIC
131 #define PCI_DEVICE_ID_SGI_ACENIC 0x0009
132 #endif
133
134 static const struct pci_device_id acenic_pci_tbl[] = {
135 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
136 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
137 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
138 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
139 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
140 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
141 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
142 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
143 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
144 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
145 /*
146 * Farallon used the DEC vendor ID on their cards incorrectly,
147 * then later Alteon's ID.
148 */
149 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
150 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
151 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
152 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
153 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
154 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
155 { }
156 };
157 MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
158
159 #define ace_sync_irq(irq) synchronize_irq(irq)
160
161 #ifndef offset_in_page
162 #define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
163 #endif
164
165 #define ACE_MAX_MOD_PARMS 8
166 #define BOARD_IDX_STATIC 0
167 #define BOARD_IDX_OVERFLOW -1
168
169 #include "acenic.h"
170
171 /*
172 * These must be defined before the firmware is included.
173 */
174 #define MAX_TEXT_LEN 96*1024
175 #define MAX_RODATA_LEN 8*1024
176 #define MAX_DATA_LEN 2*1024
177
178 #ifndef tigon2FwReleaseLocal
179 #define tigon2FwReleaseLocal 0
180 #endif
181
182 /*
183 * This driver currently supports Tigon I and Tigon II based cards
184 * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
185 * GA620. The driver should also work on the SGI, DEC and Farallon
186 * versions of the card, however I have not been able to test that
187 * myself.
188 *
189 * This card is really neat, it supports receive hardware checksumming
190 * and jumbo frames (up to 9000 bytes) and does a lot of work in the
191 * firmware. Also the programming interface is quite neat, except for
192 * the parts dealing with the i2c eeprom on the card ;-)
193 *
194 * Using jumbo frames:
195 *
196 * To enable jumbo frames, simply specify an mtu between 1500 and 9000
197 * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
198 * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
199 * interface number and <MTU> being the MTU value.
200 *
201 * Module parameters:
202 *
203 * When compiled as a loadable module, the driver allows for a number
204 * of module parameters to be specified. The driver supports the
205 * following module parameters:
206 *
207 * trace=<val> - Firmware trace level. This requires special traced
208 * firmware to replace the firmware supplied with
209 * the driver - for debugging purposes only.
210 *
211 * link=<val> - Link state. Normally you want to use the default link
212 * parameters set by the driver. This can be used to
213 * override these in case your switch doesn't negotiate
214 * the link properly. Valid values are:
215 * 0x0001 - Force half duplex link.
216 * 0x0002 - Do not negotiate line speed with the other end.
217 * 0x0010 - 10Mbit/sec link.
218 * 0x0020 - 100Mbit/sec link.
219 * 0x0040 - 1000Mbit/sec link.
220 * 0x0100 - Do not negotiate flow control.
221 * 0x0200 - Enable RX flow control Y
222 * 0x0400 - Enable TX flow control Y (Tigon II NICs only).
223 * Default value is 0x0270, ie. enable link+flow
224 * control negotiation. Negotiating the highest
225 * possible link speed with RX flow control enabled.
226 *
227 * When disabling link speed negotiation, only one link
228 * speed is allowed to be specified!
229 *
230 * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
231 * to wait for more packets to arive before
232 * interrupting the host, from the time the first
233 * packet arrives.
234 *
235 * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
236 * to wait for more packets to arive in the transmit ring,
237 * before interrupting the host, after transmitting the
238 * first packet in the ring.
239 *
240 * max_tx_desc=<val> - maximum number of transmit descriptors
241 * (packets) transmitted before interrupting the host.
242 *
243 * max_rx_desc=<val> - maximum number of receive descriptors
244 * (packets) received before interrupting the host.
245 *
246 * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
247 * increments of the NIC's on board memory to be used for
248 * transmit and receive buffers. For the 1MB NIC app. 800KB
249 * is available, on the 1/2MB NIC app. 300KB is available.
250 * 68KB will always be available as a minimum for both
251 * directions. The default value is a 50/50 split.
252 * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
253 * operations, default (1) is to always disable this as
254 * that is what Alteon does on NT. I have not been able
255 * to measure any real performance differences with
256 * this on my systems. Set <val>=0 if you want to
257 * enable these operations.
258 *
259 * If you use more than one NIC, specify the parameters for the
260 * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
261 * run tracing on NIC #2 but not on NIC #1 and #3.
262 *
263 * TODO:
264 *
265 * - Proper multicast support.
266 * - NIC dump support.
267 * - More tuning parameters.
268 *
269 * The mini ring is not used under Linux and I am not sure it makes sense
270 * to actually use it.
271 *
272 * New interrupt handler strategy:
273 *
274 * The old interrupt handler worked using the traditional method of
275 * replacing an skbuff with a new one when a packet arrives. However
276 * the rx rings do not need to contain a static number of buffer
277 * descriptors, thus it makes sense to move the memory allocation out
278 * of the main interrupt handler and do it in a bottom half handler
279 * and only allocate new buffers when the number of buffers in the
280 * ring is below a certain threshold. In order to avoid starving the
281 * NIC under heavy load it is however necessary to force allocation
282 * when hitting a minimum threshold. The strategy for alloction is as
283 * follows:
284 *
285 * RX_LOW_BUF_THRES - allocate buffers in the bottom half
286 * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
287 * the buffers in the interrupt handler
288 * RX_RING_THRES - maximum number of buffers in the rx ring
289 * RX_MINI_THRES - maximum number of buffers in the mini ring
290 * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
291 *
292 * One advantagous side effect of this allocation approach is that the
293 * entire rx processing can be done without holding any spin lock
294 * since the rx rings and registers are totally independent of the tx
295 * ring and its registers. This of course includes the kmalloc's of
296 * new skb's. Thus start_xmit can run in parallel with rx processing
297 * and the memory allocation on SMP systems.
298 *
299 * Note that running the skb reallocation in a bottom half opens up
300 * another can of races which needs to be handled properly. In
301 * particular it can happen that the interrupt handler tries to run
302 * the reallocation while the bottom half is either running on another
303 * CPU or was interrupted on the same CPU. To get around this the
304 * driver uses bitops to prevent the reallocation routines from being
305 * reentered.
306 *
307 * TX handling can also be done without holding any spin lock, wheee
308 * this is fun! since tx_ret_csm is only written to by the interrupt
309 * handler. The case to be aware of is when shutting down the device
310 * and cleaning up where it is necessary to make sure that
311 * start_xmit() is not running while this is happening. Well DaveM
312 * informs me that this case is already protected against ... bye bye
313 * Mr. Spin Lock, it was nice to know you.
314 *
315 * TX interrupts are now partly disabled so the NIC will only generate
316 * TX interrupts for the number of coal ticks, not for the number of
317 * TX packets in the queue. This should reduce the number of TX only,
318 * ie. when no RX processing is done, interrupts seen.
319 */
320
321 /*
322 * Threshold values for RX buffer allocation - the low water marks for
323 * when to start refilling the rings are set to 75% of the ring
324 * sizes. It seems to make sense to refill the rings entirely from the
325 * intrrupt handler once it gets below the panic threshold, that way
326 * we don't risk that the refilling is moved to another CPU when the
327 * one running the interrupt handler just got the slab code hot in its
328 * cache.
329 */
330 #define RX_RING_SIZE 72
331 #define RX_MINI_SIZE 64
332 #define RX_JUMBO_SIZE 48
333
334 #define RX_PANIC_STD_THRES 16
335 #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
336 #define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
337 #define RX_PANIC_MINI_THRES 12
338 #define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
339 #define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
340 #define RX_PANIC_JUMBO_THRES 6
341 #define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
342 #define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
343
344
345 /*
346 * Size of the mini ring entries, basically these just should be big
347 * enough to take TCP ACKs
348 */
349 #define ACE_MINI_SIZE 100
350
351 #define ACE_MINI_BUFSIZE ACE_MINI_SIZE
352 #define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
353 #define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
354
355 /*
356 * There seems to be a magic difference in the effect between 995 and 996
357 * but little difference between 900 and 995 ... no idea why.
358 *
359 * There is now a default set of tuning parameters which is set, depending
360 * on whether or not the user enables Jumbo frames. It's assumed that if
361 * Jumbo frames are enabled, the user wants optimal tuning for that case.
362 */
363 #define DEF_TX_COAL 400 /* 996 */
364 #define DEF_TX_MAX_DESC 60 /* was 40 */
365 #define DEF_RX_COAL 120 /* 1000 */
366 #define DEF_RX_MAX_DESC 25
367 #define DEF_TX_RATIO 21 /* 24 */
368
369 #define DEF_JUMBO_TX_COAL 20
370 #define DEF_JUMBO_TX_MAX_DESC 60
371 #define DEF_JUMBO_RX_COAL 30
372 #define DEF_JUMBO_RX_MAX_DESC 6
373 #define DEF_JUMBO_TX_RATIO 21
374
375 #if tigon2FwReleaseLocal < 20001118
376 /*
377 * Standard firmware and early modifications duplicate
378 * IRQ load without this flag (coal timer is never reset).
379 * Note that with this flag tx_coal should be less than
380 * time to xmit full tx ring.
381 * 400usec is not so bad for tx ring size of 128.
382 */
383 #define TX_COAL_INTS_ONLY 1 /* worth it */
384 #else
385 /*
386 * With modified firmware, this is not necessary, but still useful.
387 */
388 #define TX_COAL_INTS_ONLY 1
389 #endif
390
391 #define DEF_TRACE 0
392 #define DEF_STAT (2 * TICKS_PER_SEC)
393
394
395 static int link_state[ACE_MAX_MOD_PARMS];
396 static int trace[ACE_MAX_MOD_PARMS];
397 static int tx_coal_tick[ACE_MAX_MOD_PARMS];
398 static int rx_coal_tick[ACE_MAX_MOD_PARMS];
399 static int max_tx_desc[ACE_MAX_MOD_PARMS];
400 static int max_rx_desc[ACE_MAX_MOD_PARMS];
401 static int tx_ratio[ACE_MAX_MOD_PARMS];
402 static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
403
404 MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
405 MODULE_LICENSE("GPL");
406 MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
407 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
408 MODULE_FIRMWARE("acenic/tg1.bin");
409 #endif
410 MODULE_FIRMWARE("acenic/tg2.bin");
411
412 module_param_array_named(link, link_state, int, NULL, 0);
413 module_param_array(trace, int, NULL, 0);
414 module_param_array(tx_coal_tick, int, NULL, 0);
415 module_param_array(max_tx_desc, int, NULL, 0);
416 module_param_array(rx_coal_tick, int, NULL, 0);
417 module_param_array(max_rx_desc, int, NULL, 0);
418 module_param_array(tx_ratio, int, NULL, 0);
419 MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
420 MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
421 MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
422 MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
423 MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
424 MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
425 MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
426
427
428 static const char version[] =
429 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
430 " http://home.cern.ch/~jes/gige/acenic.html\n";
431
432 static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
433 static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
434 static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
435
436 static const struct ethtool_ops ace_ethtool_ops = {
437 .get_settings = ace_get_settings,
438 .set_settings = ace_set_settings,
439 .get_drvinfo = ace_get_drvinfo,
440 };
441
442 static void ace_watchdog(struct net_device *dev);
443
444 static const struct net_device_ops ace_netdev_ops = {
445 .ndo_open = ace_open,
446 .ndo_stop = ace_close,
447 .ndo_tx_timeout = ace_watchdog,
448 .ndo_get_stats = ace_get_stats,
449 .ndo_start_xmit = ace_start_xmit,
450 .ndo_set_rx_mode = ace_set_multicast_list,
451 .ndo_validate_addr = eth_validate_addr,
452 .ndo_set_mac_address = ace_set_mac_addr,
453 .ndo_change_mtu = ace_change_mtu,
454 };
455
456 static int acenic_probe_one(struct pci_dev *pdev,
457 const struct pci_device_id *id)
458 {
459 struct net_device *dev;
460 struct ace_private *ap;
461 static int boards_found;
462
463 dev = alloc_etherdev(sizeof(struct ace_private));
464 if (dev == NULL)
465 return -ENOMEM;
466
467 SET_NETDEV_DEV(dev, &pdev->dev);
468
469 ap = netdev_priv(dev);
470 ap->pdev = pdev;
471 ap->name = pci_name(pdev);
472
473 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
474 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
475
476 dev->watchdog_timeo = 5*HZ;
477 dev->min_mtu = 0;
478 dev->max_mtu = ACE_JUMBO_MTU;
479
480 dev->netdev_ops = &ace_netdev_ops;
481 dev->ethtool_ops = &ace_ethtool_ops;
482
483 /* we only display this string ONCE */
484 if (!boards_found)
485 printk(version);
486
487 if (pci_enable_device(pdev))
488 goto fail_free_netdev;
489
490 /*
491 * Enable master mode before we start playing with the
492 * pci_command word since pci_set_master() will modify
493 * it.
494 */
495 pci_set_master(pdev);
496
497 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
498
499 /* OpenFirmware on Mac's does not set this - DOH.. */
500 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
501 printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
502 "access - was not enabled by BIOS/Firmware\n",
503 ap->name);
504 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
505 pci_write_config_word(ap->pdev, PCI_COMMAND,
506 ap->pci_command);
507 wmb();
508 }
509
510 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
511 if (ap->pci_latency <= 0x40) {
512 ap->pci_latency = 0x40;
513 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
514 }
515
516 /*
517 * Remap the regs into kernel space - this is abuse of
518 * dev->base_addr since it was means for I/O port
519 * addresses but who gives a damn.
520 */
521 dev->base_addr = pci_resource_start(pdev, 0);
522 ap->regs = ioremap(dev->base_addr, 0x4000);
523 if (!ap->regs) {
524 printk(KERN_ERR "%s: Unable to map I/O register, "
525 "AceNIC %i will be disabled.\n",
526 ap->name, boards_found);
527 goto fail_free_netdev;
528 }
529
530 switch(pdev->vendor) {
531 case PCI_VENDOR_ID_ALTEON:
532 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
533 printk(KERN_INFO "%s: Farallon PN9100-T ",
534 ap->name);
535 } else {
536 printk(KERN_INFO "%s: Alteon AceNIC ",
537 ap->name);
538 }
539 break;
540 case PCI_VENDOR_ID_3COM:
541 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
542 break;
543 case PCI_VENDOR_ID_NETGEAR:
544 printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
545 break;
546 case PCI_VENDOR_ID_DEC:
547 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
548 printk(KERN_INFO "%s: Farallon PN9000-SX ",
549 ap->name);
550 break;
551 }
552 case PCI_VENDOR_ID_SGI:
553 printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
554 break;
555 default:
556 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
557 break;
558 }
559
560 printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
561 printk("irq %d\n", pdev->irq);
562
563 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
564 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
565 printk(KERN_ERR "%s: Driver compiled without Tigon I"
566 " support - NIC disabled\n", dev->name);
567 goto fail_uninit;
568 }
569 #endif
570
571 if (ace_allocate_descriptors(dev))
572 goto fail_free_netdev;
573
574 #ifdef MODULE
575 if (boards_found >= ACE_MAX_MOD_PARMS)
576 ap->board_idx = BOARD_IDX_OVERFLOW;
577 else
578 ap->board_idx = boards_found;
579 #else
580 ap->board_idx = BOARD_IDX_STATIC;
581 #endif
582
583 if (ace_init(dev))
584 goto fail_free_netdev;
585
586 if (register_netdev(dev)) {
587 printk(KERN_ERR "acenic: device registration failed\n");
588 goto fail_uninit;
589 }
590 ap->name = dev->name;
591
592 if (ap->pci_using_dac)
593 dev->features |= NETIF_F_HIGHDMA;
594
595 pci_set_drvdata(pdev, dev);
596
597 boards_found++;
598 return 0;
599
600 fail_uninit:
601 ace_init_cleanup(dev);
602 fail_free_netdev:
603 free_netdev(dev);
604 return -ENODEV;
605 }
606
607 static void acenic_remove_one(struct pci_dev *pdev)
608 {
609 struct net_device *dev = pci_get_drvdata(pdev);
610 struct ace_private *ap = netdev_priv(dev);
611 struct ace_regs __iomem *regs = ap->regs;
612 short i;
613
614 unregister_netdev(dev);
615
616 writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
617 if (ap->version >= 2)
618 writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
619
620 /*
621 * This clears any pending interrupts
622 */
623 writel(1, &regs->Mb0Lo);
624 readl(&regs->CpuCtrl); /* flush */
625
626 /*
627 * Make sure no other CPUs are processing interrupts
628 * on the card before the buffers are being released.
629 * Otherwise one might experience some `interesting'
630 * effects.
631 *
632 * Then release the RX buffers - jumbo buffers were
633 * already released in ace_close().
634 */
635 ace_sync_irq(dev->irq);
636
637 for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
638 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
639
640 if (skb) {
641 struct ring_info *ringp;
642 dma_addr_t mapping;
643
644 ringp = &ap->skb->rx_std_skbuff[i];
645 mapping = dma_unmap_addr(ringp, mapping);
646 pci_unmap_page(ap->pdev, mapping,
647 ACE_STD_BUFSIZE,
648 PCI_DMA_FROMDEVICE);
649
650 ap->rx_std_ring[i].size = 0;
651 ap->skb->rx_std_skbuff[i].skb = NULL;
652 dev_kfree_skb(skb);
653 }
654 }
655
656 if (ap->version >= 2) {
657 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
658 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
659
660 if (skb) {
661 struct ring_info *ringp;
662 dma_addr_t mapping;
663
664 ringp = &ap->skb->rx_mini_skbuff[i];
665 mapping = dma_unmap_addr(ringp,mapping);
666 pci_unmap_page(ap->pdev, mapping,
667 ACE_MINI_BUFSIZE,
668 PCI_DMA_FROMDEVICE);
669
670 ap->rx_mini_ring[i].size = 0;
671 ap->skb->rx_mini_skbuff[i].skb = NULL;
672 dev_kfree_skb(skb);
673 }
674 }
675 }
676
677 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
678 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
679 if (skb) {
680 struct ring_info *ringp;
681 dma_addr_t mapping;
682
683 ringp = &ap->skb->rx_jumbo_skbuff[i];
684 mapping = dma_unmap_addr(ringp, mapping);
685 pci_unmap_page(ap->pdev, mapping,
686 ACE_JUMBO_BUFSIZE,
687 PCI_DMA_FROMDEVICE);
688
689 ap->rx_jumbo_ring[i].size = 0;
690 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
691 dev_kfree_skb(skb);
692 }
693 }
694
695 ace_init_cleanup(dev);
696 free_netdev(dev);
697 }
698
699 static struct pci_driver acenic_pci_driver = {
700 .name = "acenic",
701 .id_table = acenic_pci_tbl,
702 .probe = acenic_probe_one,
703 .remove = acenic_remove_one,
704 };
705
706 static void ace_free_descriptors(struct net_device *dev)
707 {
708 struct ace_private *ap = netdev_priv(dev);
709 int size;
710
711 if (ap->rx_std_ring != NULL) {
712 size = (sizeof(struct rx_desc) *
713 (RX_STD_RING_ENTRIES +
714 RX_JUMBO_RING_ENTRIES +
715 RX_MINI_RING_ENTRIES +
716 RX_RETURN_RING_ENTRIES));
717 pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
718 ap->rx_ring_base_dma);
719 ap->rx_std_ring = NULL;
720 ap->rx_jumbo_ring = NULL;
721 ap->rx_mini_ring = NULL;
722 ap->rx_return_ring = NULL;
723 }
724 if (ap->evt_ring != NULL) {
725 size = (sizeof(struct event) * EVT_RING_ENTRIES);
726 pci_free_consistent(ap->pdev, size, ap->evt_ring,
727 ap->evt_ring_dma);
728 ap->evt_ring = NULL;
729 }
730 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
731 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
732 pci_free_consistent(ap->pdev, size, ap->tx_ring,
733 ap->tx_ring_dma);
734 }
735 ap->tx_ring = NULL;
736
737 if (ap->evt_prd != NULL) {
738 pci_free_consistent(ap->pdev, sizeof(u32),
739 (void *)ap->evt_prd, ap->evt_prd_dma);
740 ap->evt_prd = NULL;
741 }
742 if (ap->rx_ret_prd != NULL) {
743 pci_free_consistent(ap->pdev, sizeof(u32),
744 (void *)ap->rx_ret_prd,
745 ap->rx_ret_prd_dma);
746 ap->rx_ret_prd = NULL;
747 }
748 if (ap->tx_csm != NULL) {
749 pci_free_consistent(ap->pdev, sizeof(u32),
750 (void *)ap->tx_csm, ap->tx_csm_dma);
751 ap->tx_csm = NULL;
752 }
753 }
754
755
756 static int ace_allocate_descriptors(struct net_device *dev)
757 {
758 struct ace_private *ap = netdev_priv(dev);
759 int size;
760
761 size = (sizeof(struct rx_desc) *
762 (RX_STD_RING_ENTRIES +
763 RX_JUMBO_RING_ENTRIES +
764 RX_MINI_RING_ENTRIES +
765 RX_RETURN_RING_ENTRIES));
766
767 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
768 &ap->rx_ring_base_dma);
769 if (ap->rx_std_ring == NULL)
770 goto fail;
771
772 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
773 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
774 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
775
776 size = (sizeof(struct event) * EVT_RING_ENTRIES);
777
778 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
779
780 if (ap->evt_ring == NULL)
781 goto fail;
782
783 /*
784 * Only allocate a host TX ring for the Tigon II, the Tigon I
785 * has to use PCI registers for this ;-(
786 */
787 if (!ACE_IS_TIGON_I(ap)) {
788 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
789
790 ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
791 &ap->tx_ring_dma);
792
793 if (ap->tx_ring == NULL)
794 goto fail;
795 }
796
797 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
798 &ap->evt_prd_dma);
799 if (ap->evt_prd == NULL)
800 goto fail;
801
802 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
803 &ap->rx_ret_prd_dma);
804 if (ap->rx_ret_prd == NULL)
805 goto fail;
806
807 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
808 &ap->tx_csm_dma);
809 if (ap->tx_csm == NULL)
810 goto fail;
811
812 return 0;
813
814 fail:
815 /* Clean up. */
816 ace_init_cleanup(dev);
817 return 1;
818 }
819
820
821 /*
822 * Generic cleanup handling data allocated during init. Used when the
823 * module is unloaded or if an error occurs during initialization
824 */
825 static void ace_init_cleanup(struct net_device *dev)
826 {
827 struct ace_private *ap;
828
829 ap = netdev_priv(dev);
830
831 ace_free_descriptors(dev);
832
833 if (ap->info)
834 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
835 ap->info, ap->info_dma);
836 kfree(ap->skb);
837 kfree(ap->trace_buf);
838
839 if (dev->irq)
840 free_irq(dev->irq, dev);
841
842 iounmap(ap->regs);
843 }
844
845
846 /*
847 * Commands are considered to be slow.
848 */
849 static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
850 {
851 u32 idx;
852
853 idx = readl(&regs->CmdPrd);
854
855 writel(*(u32 *)(cmd), &regs->CmdRng[idx]);
856 idx = (idx + 1) % CMD_RING_ENTRIES;
857
858 writel(idx, &regs->CmdPrd);
859 }
860
861
862 static int ace_init(struct net_device *dev)
863 {
864 struct ace_private *ap;
865 struct ace_regs __iomem *regs;
866 struct ace_info *info = NULL;
867 struct pci_dev *pdev;
868 unsigned long myjif;
869 u64 tmp_ptr;
870 u32 tig_ver, mac1, mac2, tmp, pci_state;
871 int board_idx, ecode = 0;
872 short i;
873 unsigned char cache_size;
874
875 ap = netdev_priv(dev);
876 regs = ap->regs;
877
878 board_idx = ap->board_idx;
879
880 /*
881 * aman@sgi.com - its useful to do a NIC reset here to
882 * address the `Firmware not running' problem subsequent
883 * to any crashes involving the NIC
884 */
885 writel(HW_RESET | (HW_RESET << 24), &regs->HostCtrl);
886 readl(&regs->HostCtrl); /* PCI write posting */
887 udelay(5);
888
889 /*
890 * Don't access any other registers before this point!
891 */
892 #ifdef __BIG_ENDIAN
893 /*
894 * This will most likely need BYTE_SWAP once we switch
895 * to using __raw_writel()
896 */
897 writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
898 &regs->HostCtrl);
899 #else
900 writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
901 &regs->HostCtrl);
902 #endif
903 readl(&regs->HostCtrl); /* PCI write posting */
904
905 /*
906 * Stop the NIC CPU and clear pending interrupts
907 */
908 writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
909 readl(&regs->CpuCtrl); /* PCI write posting */
910 writel(0, &regs->Mb0Lo);
911
912 tig_ver = readl(&regs->HostCtrl) >> 28;
913
914 switch(tig_ver){
915 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
916 case 4:
917 case 5:
918 printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
919 tig_ver, ap->firmware_major, ap->firmware_minor,
920 ap->firmware_fix);
921 writel(0, &regs->LocalCtrl);
922 ap->version = 1;
923 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
924 break;
925 #endif
926 case 6:
927 printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
928 tig_ver, ap->firmware_major, ap->firmware_minor,
929 ap->firmware_fix);
930 writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
931 readl(&regs->CpuBCtrl); /* PCI write posting */
932 /*
933 * The SRAM bank size does _not_ indicate the amount
934 * of memory on the card, it controls the _bank_ size!
935 * Ie. a 1MB AceNIC will have two banks of 512KB.
936 */
937 writel(SRAM_BANK_512K, &regs->LocalCtrl);
938 writel(SYNC_SRAM_TIMING, &regs->MiscCfg);
939 ap->version = 2;
940 ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
941 break;
942 default:
943 printk(KERN_WARNING " Unsupported Tigon version detected "
944 "(%i)\n", tig_ver);
945 ecode = -ENODEV;
946 goto init_error;
947 }
948
949 /*
950 * ModeStat _must_ be set after the SRAM settings as this change
951 * seems to corrupt the ModeStat and possible other registers.
952 * The SRAM settings survive resets and setting it to the same
953 * value a second time works as well. This is what caused the
954 * `Firmware not running' problem on the Tigon II.
955 */
956 #ifdef __BIG_ENDIAN
957 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
958 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
959 #else
960 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
961 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
962 #endif
963 readl(&regs->ModeStat); /* PCI write posting */
964
965 mac1 = 0;
966 for(i = 0; i < 4; i++) {
967 int t;
968
969 mac1 = mac1 << 8;
970 t = read_eeprom_byte(dev, 0x8c+i);
971 if (t < 0) {
972 ecode = -EIO;
973 goto init_error;
974 } else
975 mac1 |= (t & 0xff);
976 }
977 mac2 = 0;
978 for(i = 4; i < 8; i++) {
979 int t;
980
981 mac2 = mac2 << 8;
982 t = read_eeprom_byte(dev, 0x8c+i);
983 if (t < 0) {
984 ecode = -EIO;
985 goto init_error;
986 } else
987 mac2 |= (t & 0xff);
988 }
989
990 writel(mac1, &regs->MacAddrHi);
991 writel(mac2, &regs->MacAddrLo);
992
993 dev->dev_addr[0] = (mac1 >> 8) & 0xff;
994 dev->dev_addr[1] = mac1 & 0xff;
995 dev->dev_addr[2] = (mac2 >> 24) & 0xff;
996 dev->dev_addr[3] = (mac2 >> 16) & 0xff;
997 dev->dev_addr[4] = (mac2 >> 8) & 0xff;
998 dev->dev_addr[5] = mac2 & 0xff;
999
1000 printk("MAC: %pM\n", dev->dev_addr);
1001
1002 /*
1003 * Looks like this is necessary to deal with on all architectures,
1004 * even this %$#%$# N440BX Intel based thing doesn't get it right.
1005 * Ie. having two NICs in the machine, one will have the cache
1006 * line set at boot time, the other will not.
1007 */
1008 pdev = ap->pdev;
1009 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
1010 cache_size <<= 2;
1011 if (cache_size != SMP_CACHE_BYTES) {
1012 printk(KERN_INFO " PCI cache line size set incorrectly "
1013 "(%i bytes) by BIOS/FW, ", cache_size);
1014 if (cache_size > SMP_CACHE_BYTES)
1015 printk("expecting %i\n", SMP_CACHE_BYTES);
1016 else {
1017 printk("correcting to %i\n", SMP_CACHE_BYTES);
1018 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1019 SMP_CACHE_BYTES >> 2);
1020 }
1021 }
1022
1023 pci_state = readl(&regs->PciState);
1024 printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
1025 "latency: %i clks\n",
1026 (pci_state & PCI_32BIT) ? 32 : 64,
1027 (pci_state & PCI_66MHZ) ? 66 : 33,
1028 ap->pci_latency);
1029
1030 /*
1031 * Set the max DMA transfer size. Seems that for most systems
1032 * the performance is better when no MAX parameter is
1033 * set. However for systems enabling PCI write and invalidate,
1034 * DMA writes must be set to the L1 cache line size to get
1035 * optimal performance.
1036 *
1037 * The default is now to turn the PCI write and invalidate off
1038 * - that is what Alteon does for NT.
1039 */
1040 tmp = READ_CMD_MEM | WRITE_CMD_MEM;
1041 if (ap->version >= 2) {
1042 tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
1043 /*
1044 * Tuning parameters only supported for 8 cards
1045 */
1046 if (board_idx == BOARD_IDX_OVERFLOW ||
1047 dis_pci_mem_inval[board_idx]) {
1048 if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1049 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1050 pci_write_config_word(pdev, PCI_COMMAND,
1051 ap->pci_command);
1052 printk(KERN_INFO " Disabling PCI memory "
1053 "write and invalidate\n");
1054 }
1055 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1056 printk(KERN_INFO " PCI memory write & invalidate "
1057 "enabled by BIOS, enabling counter measures\n");
1058
1059 switch(SMP_CACHE_BYTES) {
1060 case 16:
1061 tmp |= DMA_WRITE_MAX_16;
1062 break;
1063 case 32:
1064 tmp |= DMA_WRITE_MAX_32;
1065 break;
1066 case 64:
1067 tmp |= DMA_WRITE_MAX_64;
1068 break;
1069 case 128:
1070 tmp |= DMA_WRITE_MAX_128;
1071 break;
1072 default:
1073 printk(KERN_INFO " Cache line size %i not "
1074 "supported, PCI write and invalidate "
1075 "disabled\n", SMP_CACHE_BYTES);
1076 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1077 pci_write_config_word(pdev, PCI_COMMAND,
1078 ap->pci_command);
1079 }
1080 }
1081 }
1082
1083 #ifdef __sparc__
1084 /*
1085 * On this platform, we know what the best dma settings
1086 * are. We use 64-byte maximum bursts, because if we
1087 * burst larger than the cache line size (or even cross
1088 * a 64byte boundary in a single burst) the UltraSparc
1089 * PCI controller will disconnect at 64-byte multiples.
1090 *
1091 * Read-multiple will be properly enabled above, and when
1092 * set will give the PCI controller proper hints about
1093 * prefetching.
1094 */
1095 tmp &= ~DMA_READ_WRITE_MASK;
1096 tmp |= DMA_READ_MAX_64;
1097 tmp |= DMA_WRITE_MAX_64;
1098 #endif
1099 #ifdef __alpha__
1100 tmp &= ~DMA_READ_WRITE_MASK;
1101 tmp |= DMA_READ_MAX_128;
1102 /*
1103 * All the docs say MUST NOT. Well, I did.
1104 * Nothing terrible happens, if we load wrong size.
1105 * Bit w&i still works better!
1106 */
1107 tmp |= DMA_WRITE_MAX_128;
1108 #endif
1109 writel(tmp, &regs->PciState);
1110
1111 #if 0
1112 /*
1113 * The Host PCI bus controller driver has to set FBB.
1114 * If all devices on that PCI bus support FBB, then the controller
1115 * can enable FBB support in the Host PCI Bus controller (or on
1116 * the PCI-PCI bridge if that applies).
1117 * -ggg
1118 */
1119 /*
1120 * I have received reports from people having problems when this
1121 * bit is enabled.
1122 */
1123 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
1124 printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
1125 ap->pci_command |= PCI_COMMAND_FAST_BACK;
1126 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
1127 }
1128 #endif
1129
1130 /*
1131 * Configure DMA attributes.
1132 */
1133 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1134 ap->pci_using_dac = 1;
1135 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1136 ap->pci_using_dac = 0;
1137 } else {
1138 ecode = -ENODEV;
1139 goto init_error;
1140 }
1141
1142 /*
1143 * Initialize the generic info block and the command+event rings
1144 * and the control blocks for the transmit and receive rings
1145 * as they need to be setup once and for all.
1146 */
1147 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
1148 &ap->info_dma))) {
1149 ecode = -EAGAIN;
1150 goto init_error;
1151 }
1152 ap->info = info;
1153
1154 /*
1155 * Get the memory for the skb rings.
1156 */
1157 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1158 ecode = -EAGAIN;
1159 goto init_error;
1160 }
1161
1162 ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
1163 DRV_NAME, dev);
1164 if (ecode) {
1165 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1166 DRV_NAME, pdev->irq);
1167 goto init_error;
1168 } else
1169 dev->irq = pdev->irq;
1170
1171 #ifdef INDEX_DEBUG
1172 spin_lock_init(&ap->debug_lock);
1173 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1174 ap->last_std_rx = 0;
1175 ap->last_mini_rx = 0;
1176 #endif
1177
1178 memset(ap->info, 0, sizeof(struct ace_info));
1179 memset(ap->skb, 0, sizeof(struct ace_skb));
1180
1181 ecode = ace_load_firmware(dev);
1182 if (ecode)
1183 goto init_error;
1184
1185 ap->fw_running = 0;
1186
1187 tmp_ptr = ap->info_dma;
1188 writel(tmp_ptr >> 32, &regs->InfoPtrHi);
1189 writel(tmp_ptr & 0xffffffff, &regs->InfoPtrLo);
1190
1191 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1192
1193 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1194 info->evt_ctrl.flags = 0;
1195
1196 *(ap->evt_prd) = 0;
1197 wmb();
1198 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1199 writel(0, &regs->EvtCsm);
1200
1201 set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
1202 info->cmd_ctrl.flags = 0;
1203 info->cmd_ctrl.max_len = 0;
1204
1205 for (i = 0; i < CMD_RING_ENTRIES; i++)
1206 writel(0, &regs->CmdRng[i]);
1207
1208 writel(0, &regs->CmdPrd);
1209 writel(0, &regs->CmdCsm);
1210
1211 tmp_ptr = ap->info_dma;
1212 tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
1213 set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
1214
1215 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1216 info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
1217 info->rx_std_ctrl.flags =
1218 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1219
1220 memset(ap->rx_std_ring, 0,
1221 RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
1222
1223 for (i = 0; i < RX_STD_RING_ENTRIES; i++)
1224 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1225
1226 ap->rx_std_skbprd = 0;
1227 atomic_set(&ap->cur_rx_bufs, 0);
1228
1229 set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
1230 (ap->rx_ring_base_dma +
1231 (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
1232 info->rx_jumbo_ctrl.max_len = 0;
1233 info->rx_jumbo_ctrl.flags =
1234 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1235
1236 memset(ap->rx_jumbo_ring, 0,
1237 RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
1238
1239 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
1240 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1241
1242 ap->rx_jumbo_skbprd = 0;
1243 atomic_set(&ap->cur_jumbo_bufs, 0);
1244
1245 memset(ap->rx_mini_ring, 0,
1246 RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
1247
1248 if (ap->version >= 2) {
1249 set_aceaddr(&info->rx_mini_ctrl.rngptr,
1250 (ap->rx_ring_base_dma +
1251 (sizeof(struct rx_desc) *
1252 (RX_STD_RING_ENTRIES +
1253 RX_JUMBO_RING_ENTRIES))));
1254 info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
1255 info->rx_mini_ctrl.flags =
1256 RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
1257
1258 for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
1259 ap->rx_mini_ring[i].flags =
1260 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
1261 } else {
1262 set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
1263 info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
1264 info->rx_mini_ctrl.max_len = 0;
1265 }
1266
1267 ap->rx_mini_skbprd = 0;
1268 atomic_set(&ap->cur_mini_bufs, 0);
1269
1270 set_aceaddr(&info->rx_return_ctrl.rngptr,
1271 (ap->rx_ring_base_dma +
1272 (sizeof(struct rx_desc) *
1273 (RX_STD_RING_ENTRIES +
1274 RX_JUMBO_RING_ENTRIES +
1275 RX_MINI_RING_ENTRIES))));
1276 info->rx_return_ctrl.flags = 0;
1277 info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
1278
1279 memset(ap->rx_return_ring, 0,
1280 RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
1281
1282 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1283 *(ap->rx_ret_prd) = 0;
1284
1285 writel(TX_RING_BASE, &regs->WinBase);
1286
1287 if (ACE_IS_TIGON_I(ap)) {
1288 ap->tx_ring = (__force struct tx_desc *) regs->Window;
1289 for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
1290 * sizeof(struct tx_desc)) / sizeof(u32); i++)
1291 writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
1292
1293 set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
1294 } else {
1295 memset(ap->tx_ring, 0,
1296 MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
1297
1298 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1299 }
1300
1301 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1302 tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1303
1304 /*
1305 * The Tigon I does not like having the TX ring in host memory ;-(
1306 */
1307 if (!ACE_IS_TIGON_I(ap))
1308 tmp |= RCB_FLG_TX_HOST_RING;
1309 #if TX_COAL_INTS_ONLY
1310 tmp |= RCB_FLG_COAL_INT_ONLY;
1311 #endif
1312 info->tx_ctrl.flags = tmp;
1313
1314 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1315
1316 /*
1317 * Potential item for tuning parameter
1318 */
1319 #if 0 /* NO */
1320 writel(DMA_THRESH_16W, &regs->DmaReadCfg);
1321 writel(DMA_THRESH_16W, &regs->DmaWriteCfg);
1322 #else
1323 writel(DMA_THRESH_8W, &regs->DmaReadCfg);
1324 writel(DMA_THRESH_8W, &regs->DmaWriteCfg);
1325 #endif
1326
1327 writel(0, &regs->MaskInt);
1328 writel(1, &regs->IfIdx);
1329 #if 0
1330 /*
1331 * McKinley boxes do not like us fiddling with AssistState
1332 * this early
1333 */
1334 writel(1, &regs->AssistState);
1335 #endif
1336
1337 writel(DEF_STAT, &regs->TuneStatTicks);
1338 writel(DEF_TRACE, &regs->TuneTrace);
1339
1340 ace_set_rxtx_parms(dev, 0);
1341
1342 if (board_idx == BOARD_IDX_OVERFLOW) {
1343 printk(KERN_WARNING "%s: more than %i NICs detected, "
1344 "ignoring module parameters!\n",
1345 ap->name, ACE_MAX_MOD_PARMS);
1346 } else if (board_idx >= 0) {
1347 if (tx_coal_tick[board_idx])
1348 writel(tx_coal_tick[board_idx],
1349 &regs->TuneTxCoalTicks);
1350 if (max_tx_desc[board_idx])
1351 writel(max_tx_desc[board_idx], &regs->TuneMaxTxDesc);
1352
1353 if (rx_coal_tick[board_idx])
1354 writel(rx_coal_tick[board_idx],
1355 &regs->TuneRxCoalTicks);
1356 if (max_rx_desc[board_idx])
1357 writel(max_rx_desc[board_idx], &regs->TuneMaxRxDesc);
1358
1359 if (trace[board_idx])
1360 writel(trace[board_idx], &regs->TuneTrace);
1361
1362 if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
1363 writel(tx_ratio[board_idx], &regs->TxBufRat);
1364 }
1365
1366 /*
1367 * Default link parameters
1368 */
1369 tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
1370 LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
1371 if(ap->version >= 2)
1372 tmp |= LNK_TX_FLOW_CTL_Y;
1373
1374 /*
1375 * Override link default parameters
1376 */
1377 if ((board_idx >= 0) && link_state[board_idx]) {
1378 int option = link_state[board_idx];
1379
1380 tmp = LNK_ENABLE;
1381
1382 if (option & 0x01) {
1383 printk(KERN_INFO "%s: Setting half duplex link\n",
1384 ap->name);
1385 tmp &= ~LNK_FULL_DUPLEX;
1386 }
1387 if (option & 0x02)
1388 tmp &= ~LNK_NEGOTIATE;
1389 if (option & 0x10)
1390 tmp |= LNK_10MB;
1391 if (option & 0x20)
1392 tmp |= LNK_100MB;
1393 if (option & 0x40)
1394 tmp |= LNK_1000MB;
1395 if ((option & 0x70) == 0) {
1396 printk(KERN_WARNING "%s: No media speed specified, "
1397 "forcing auto negotiation\n", ap->name);
1398 tmp |= LNK_NEGOTIATE | LNK_1000MB |
1399 LNK_100MB | LNK_10MB;
1400 }
1401 if ((option & 0x100) == 0)
1402 tmp |= LNK_NEG_FCTL;
1403 else
1404 printk(KERN_INFO "%s: Disabling flow control "
1405 "negotiation\n", ap->name);
1406 if (option & 0x200)
1407 tmp |= LNK_RX_FLOW_CTL_Y;
1408 if ((option & 0x400) && (ap->version >= 2)) {
1409 printk(KERN_INFO "%s: Enabling TX flow control\n",
1410 ap->name);
1411 tmp |= LNK_TX_FLOW_CTL_Y;
1412 }
1413 }
1414
1415 ap->link = tmp;
1416 writel(tmp, &regs->TuneLink);
1417 if (ap->version >= 2)
1418 writel(tmp, &regs->TuneFastLink);
1419
1420 writel(ap->firmware_start, &regs->Pc);
1421
1422 writel(0, &regs->Mb0Lo);
1423
1424 /*
1425 * Set tx_csm before we start receiving interrupts, otherwise
1426 * the interrupt handler might think it is supposed to process
1427 * tx ints before we are up and running, which may cause a null
1428 * pointer access in the int handler.
1429 */
1430 ap->cur_rx = 0;
1431 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1432
1433 wmb();
1434 ace_set_txprd(regs, ap, 0);
1435 writel(0, &regs->RxRetCsm);
1436
1437 /*
1438 * Enable DMA engine now.
1439 * If we do this sooner, Mckinley box pukes.
1440 * I assume it's because Tigon II DMA engine wants to check
1441 * *something* even before the CPU is started.
1442 */
1443 writel(1, &regs->AssistState); /* enable DMA */
1444
1445 /*
1446 * Start the NIC CPU
1447 */
1448 writel(readl(&regs->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), &regs->CpuCtrl);
1449 readl(&regs->CpuCtrl);
1450
1451 /*
1452 * Wait for the firmware to spin up - max 3 seconds.
1453 */
1454 myjif = jiffies + 3 * HZ;
1455 while (time_before(jiffies, myjif) && !ap->fw_running)
1456 cpu_relax();
1457
1458 if (!ap->fw_running) {
1459 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
1460
1461 ace_dump_trace(ap);
1462 writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
1463 readl(&regs->CpuCtrl);
1464
1465 /* aman@sgi.com - account for badly behaving firmware/NIC:
1466 * - have observed that the NIC may continue to generate
1467 * interrupts for some reason; attempt to stop it - halt
1468 * second CPU for Tigon II cards, and also clear Mb0
1469 * - if we're a module, we'll fail to load if this was
1470 * the only GbE card in the system => if the kernel does
1471 * see an interrupt from the NIC, code to handle it is
1472 * gone and OOps! - so free_irq also
1473 */
1474 if (ap->version >= 2)
1475 writel(readl(&regs->CpuBCtrl) | CPU_HALT,
1476 &regs->CpuBCtrl);
1477 writel(0, &regs->Mb0Lo);
1478 readl(&regs->Mb0Lo);
1479
1480 ecode = -EBUSY;
1481 goto init_error;
1482 }
1483
1484 /*
1485 * We load the ring here as there seem to be no way to tell the
1486 * firmware to wipe the ring without re-initializing it.
1487 */
1488 if (!test_and_set_bit(0, &ap->std_refill_busy))
1489 ace_load_std_rx_ring(dev, RX_RING_SIZE);
1490 else
1491 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1492 ap->name);
1493 if (ap->version >= 2) {
1494 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1495 ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
1496 else
1497 printk(KERN_ERR "%s: Someone is busy refilling "
1498 "the RX mini ring\n", ap->name);
1499 }
1500 return 0;
1501
1502 init_error:
1503 ace_init_cleanup(dev);
1504 return ecode;
1505 }
1506
1507
1508 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
1509 {
1510 struct ace_private *ap = netdev_priv(dev);
1511 struct ace_regs __iomem *regs = ap->regs;
1512 int board_idx = ap->board_idx;
1513
1514 if (board_idx >= 0) {
1515 if (!jumbo) {
1516 if (!tx_coal_tick[board_idx])
1517 writel(DEF_TX_COAL, &regs->TuneTxCoalTicks);
1518 if (!max_tx_desc[board_idx])
1519 writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc);
1520 if (!rx_coal_tick[board_idx])
1521 writel(DEF_RX_COAL, &regs->TuneRxCoalTicks);
1522 if (!max_rx_desc[board_idx])
1523 writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc);
1524 if (!tx_ratio[board_idx])
1525 writel(DEF_TX_RATIO, &regs->TxBufRat);
1526 } else {
1527 if (!tx_coal_tick[board_idx])
1528 writel(DEF_JUMBO_TX_COAL,
1529 &regs->TuneTxCoalTicks);
1530 if (!max_tx_desc[board_idx])
1531 writel(DEF_JUMBO_TX_MAX_DESC,
1532 &regs->TuneMaxTxDesc);
1533 if (!rx_coal_tick[board_idx])
1534 writel(DEF_JUMBO_RX_COAL,
1535 &regs->TuneRxCoalTicks);
1536 if (!max_rx_desc[board_idx])
1537 writel(DEF_JUMBO_RX_MAX_DESC,
1538 &regs->TuneMaxRxDesc);
1539 if (!tx_ratio[board_idx])
1540 writel(DEF_JUMBO_TX_RATIO, &regs->TxBufRat);
1541 }
1542 }
1543 }
1544
1545
1546 static void ace_watchdog(struct net_device *data)
1547 {
1548 struct net_device *dev = data;
1549 struct ace_private *ap = netdev_priv(dev);
1550 struct ace_regs __iomem *regs = ap->regs;
1551
1552 /*
1553 * We haven't received a stats update event for more than 2.5
1554 * seconds and there is data in the transmit queue, thus we
1555 * assume the card is stuck.
1556 */
1557 if (*ap->tx_csm != ap->tx_ret_csm) {
1558 printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
1559 dev->name, (unsigned int)readl(&regs->HostCtrl));
1560 /* This can happen due to ieee flow control. */
1561 } else {
1562 printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
1563 dev->name);
1564 #if 0
1565 netif_wake_queue(dev);
1566 #endif
1567 }
1568 }
1569
1570
1571 static void ace_tasklet(unsigned long arg)
1572 {
1573 struct net_device *dev = (struct net_device *) arg;
1574 struct ace_private *ap = netdev_priv(dev);
1575 int cur_size;
1576
1577 cur_size = atomic_read(&ap->cur_rx_bufs);
1578 if ((cur_size < RX_LOW_STD_THRES) &&
1579 !test_and_set_bit(0, &ap->std_refill_busy)) {
1580 #ifdef DEBUG
1581 printk("refilling buffers (current %i)\n", cur_size);
1582 #endif
1583 ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
1584 }
1585
1586 if (ap->version >= 2) {
1587 cur_size = atomic_read(&ap->cur_mini_bufs);
1588 if ((cur_size < RX_LOW_MINI_THRES) &&
1589 !test_and_set_bit(0, &ap->mini_refill_busy)) {
1590 #ifdef DEBUG
1591 printk("refilling mini buffers (current %i)\n",
1592 cur_size);
1593 #endif
1594 ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
1595 }
1596 }
1597
1598 cur_size = atomic_read(&ap->cur_jumbo_bufs);
1599 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1600 !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1601 #ifdef DEBUG
1602 printk("refilling jumbo buffers (current %i)\n", cur_size);
1603 #endif
1604 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
1605 }
1606 ap->tasklet_pending = 0;
1607 }
1608
1609
1610 /*
1611 * Copy the contents of the NIC's trace buffer to kernel memory.
1612 */
1613 static void ace_dump_trace(struct ace_private *ap)
1614 {
1615 #if 0
1616 if (!ap->trace_buf)
1617 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
1618 return;
1619 #endif
1620 }
1621
1622
1623 /*
1624 * Load the standard rx ring.
1625 *
1626 * Loading rings is safe without holding the spin lock since this is
1627 * done only before the device is enabled, thus no interrupts are
1628 * generated and by the interrupt handler/tasklet handler.
1629 */
1630 static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
1631 {
1632 struct ace_private *ap = netdev_priv(dev);
1633 struct ace_regs __iomem *regs = ap->regs;
1634 short i, idx;
1635
1636
1637 prefetchw(&ap->cur_rx_bufs);
1638
1639 idx = ap->rx_std_skbprd;
1640
1641 for (i = 0; i < nr_bufs; i++) {
1642 struct sk_buff *skb;
1643 struct rx_desc *rd;
1644 dma_addr_t mapping;
1645
1646 skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
1647 if (!skb)
1648 break;
1649
1650 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1651 offset_in_page(skb->data),
1652 ACE_STD_BUFSIZE,
1653 PCI_DMA_FROMDEVICE);
1654 ap->skb->rx_std_skbuff[idx].skb = skb;
1655 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1656 mapping, mapping);
1657
1658 rd = &ap->rx_std_ring[idx];
1659 set_aceaddr(&rd->addr, mapping);
1660 rd->size = ACE_STD_BUFSIZE;
1661 rd->idx = idx;
1662 idx = (idx + 1) % RX_STD_RING_ENTRIES;
1663 }
1664
1665 if (!i)
1666 goto error_out;
1667
1668 atomic_add(i, &ap->cur_rx_bufs);
1669 ap->rx_std_skbprd = idx;
1670
1671 if (ACE_IS_TIGON_I(ap)) {
1672 struct cmd cmd;
1673 cmd.evt = C_SET_RX_PRD_IDX;
1674 cmd.code = 0;
1675 cmd.idx = ap->rx_std_skbprd;
1676 ace_issue_cmd(regs, &cmd);
1677 } else {
1678 writel(idx, &regs->RxStdPrd);
1679 wmb();
1680 }
1681
1682 out:
1683 clear_bit(0, &ap->std_refill_busy);
1684 return;
1685
1686 error_out:
1687 printk(KERN_INFO "Out of memory when allocating "
1688 "standard receive buffers\n");
1689 goto out;
1690 }
1691
1692
1693 static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
1694 {
1695 struct ace_private *ap = netdev_priv(dev);
1696 struct ace_regs __iomem *regs = ap->regs;
1697 short i, idx;
1698
1699 prefetchw(&ap->cur_mini_bufs);
1700
1701 idx = ap->rx_mini_skbprd;
1702 for (i = 0; i < nr_bufs; i++) {
1703 struct sk_buff *skb;
1704 struct rx_desc *rd;
1705 dma_addr_t mapping;
1706
1707 skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
1708 if (!skb)
1709 break;
1710
1711 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1712 offset_in_page(skb->data),
1713 ACE_MINI_BUFSIZE,
1714 PCI_DMA_FROMDEVICE);
1715 ap->skb->rx_mini_skbuff[idx].skb = skb;
1716 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1717 mapping, mapping);
1718
1719 rd = &ap->rx_mini_ring[idx];
1720 set_aceaddr(&rd->addr, mapping);
1721 rd->size = ACE_MINI_BUFSIZE;
1722 rd->idx = idx;
1723 idx = (idx + 1) % RX_MINI_RING_ENTRIES;
1724 }
1725
1726 if (!i)
1727 goto error_out;
1728
1729 atomic_add(i, &ap->cur_mini_bufs);
1730
1731 ap->rx_mini_skbprd = idx;
1732
1733 writel(idx, &regs->RxMiniPrd);
1734 wmb();
1735
1736 out:
1737 clear_bit(0, &ap->mini_refill_busy);
1738 return;
1739 error_out:
1740 printk(KERN_INFO "Out of memory when allocating "
1741 "mini receive buffers\n");
1742 goto out;
1743 }
1744
1745
1746 /*
1747 * Load the jumbo rx ring, this may happen at any time if the MTU
1748 * is changed to a value > 1500.
1749 */
1750 static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
1751 {
1752 struct ace_private *ap = netdev_priv(dev);
1753 struct ace_regs __iomem *regs = ap->regs;
1754 short i, idx;
1755
1756 idx = ap->rx_jumbo_skbprd;
1757
1758 for (i = 0; i < nr_bufs; i++) {
1759 struct sk_buff *skb;
1760 struct rx_desc *rd;
1761 dma_addr_t mapping;
1762
1763 skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
1764 if (!skb)
1765 break;
1766
1767 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1768 offset_in_page(skb->data),
1769 ACE_JUMBO_BUFSIZE,
1770 PCI_DMA_FROMDEVICE);
1771 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1772 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1773 mapping, mapping);
1774
1775 rd = &ap->rx_jumbo_ring[idx];
1776 set_aceaddr(&rd->addr, mapping);
1777 rd->size = ACE_JUMBO_BUFSIZE;
1778 rd->idx = idx;
1779 idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
1780 }
1781
1782 if (!i)
1783 goto error_out;
1784
1785 atomic_add(i, &ap->cur_jumbo_bufs);
1786 ap->rx_jumbo_skbprd = idx;
1787
1788 if (ACE_IS_TIGON_I(ap)) {
1789 struct cmd cmd;
1790 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1791 cmd.code = 0;
1792 cmd.idx = ap->rx_jumbo_skbprd;
1793 ace_issue_cmd(regs, &cmd);
1794 } else {
1795 writel(idx, &regs->RxJumboPrd);
1796 wmb();
1797 }
1798
1799 out:
1800 clear_bit(0, &ap->jumbo_refill_busy);
1801 return;
1802 error_out:
1803 if (net_ratelimit())
1804 printk(KERN_INFO "Out of memory when allocating "
1805 "jumbo receive buffers\n");
1806 goto out;
1807 }
1808
1809
1810 /*
1811 * All events are considered to be slow (RX/TX ints do not generate
1812 * events) and are handled here, outside the main interrupt handler,
1813 * to reduce the size of the handler.
1814 */
1815 static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
1816 {
1817 struct ace_private *ap;
1818
1819 ap = netdev_priv(dev);
1820
1821 while (evtcsm != evtprd) {
1822 switch (ap->evt_ring[evtcsm].evt) {
1823 case E_FW_RUNNING:
1824 printk(KERN_INFO "%s: Firmware up and running\n",
1825 ap->name);
1826 ap->fw_running = 1;
1827 wmb();
1828 break;
1829 case E_STATS_UPDATED:
1830 break;
1831 case E_LNK_STATE:
1832 {
1833 u16 code = ap->evt_ring[evtcsm].code;
1834 switch (code) {
1835 case E_C_LINK_UP:
1836 {
1837 u32 state = readl(&ap->regs->GigLnkState);
1838 printk(KERN_WARNING "%s: Optical link UP "
1839 "(%s Duplex, Flow Control: %s%s)\n",
1840 ap->name,
1841 state & LNK_FULL_DUPLEX ? "Full":"Half",
1842 state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
1843 state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
1844 break;
1845 }
1846 case E_C_LINK_DOWN:
1847 printk(KERN_WARNING "%s: Optical link DOWN\n",
1848 ap->name);
1849 break;
1850 case E_C_LINK_10_100:
1851 printk(KERN_WARNING "%s: 10/100BaseT link "
1852 "UP\n", ap->name);
1853 break;
1854 default:
1855 printk(KERN_ERR "%s: Unknown optical link "
1856 "state %02x\n", ap->name, code);
1857 }
1858 break;
1859 }
1860 case E_ERROR:
1861 switch(ap->evt_ring[evtcsm].code) {
1862 case E_C_ERR_INVAL_CMD:
1863 printk(KERN_ERR "%s: invalid command error\n",
1864 ap->name);
1865 break;
1866 case E_C_ERR_UNIMP_CMD:
1867 printk(KERN_ERR "%s: unimplemented command "
1868 "error\n", ap->name);
1869 break;
1870 case E_C_ERR_BAD_CFG:
1871 printk(KERN_ERR "%s: bad config error\n",
1872 ap->name);
1873 break;
1874 default:
1875 printk(KERN_ERR "%s: unknown error %02x\n",
1876 ap->name, ap->evt_ring[evtcsm].code);
1877 }
1878 break;
1879 case E_RESET_JUMBO_RNG:
1880 {
1881 int i;
1882 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
1883 if (ap->skb->rx_jumbo_skbuff[i].skb) {
1884 ap->rx_jumbo_ring[i].size = 0;
1885 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
1886 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
1887 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
1888 }
1889 }
1890
1891 if (ACE_IS_TIGON_I(ap)) {
1892 struct cmd cmd;
1893 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1894 cmd.code = 0;
1895 cmd.idx = 0;
1896 ace_issue_cmd(ap->regs, &cmd);
1897 } else {
1898 writel(0, &((ap->regs)->RxJumboPrd));
1899 wmb();
1900 }
1901
1902 ap->jumbo = 0;
1903 ap->rx_jumbo_skbprd = 0;
1904 printk(KERN_INFO "%s: Jumbo ring flushed\n",
1905 ap->name);
1906 clear_bit(0, &ap->jumbo_refill_busy);
1907 break;
1908 }
1909 default:
1910 printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
1911 ap->name, ap->evt_ring[evtcsm].evt);
1912 }
1913 evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
1914 }
1915
1916 return evtcsm;
1917 }
1918
1919
1920 static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1921 {
1922 struct ace_private *ap = netdev_priv(dev);
1923 u32 idx;
1924 int mini_count = 0, std_count = 0;
1925
1926 idx = rxretcsm;
1927
1928 prefetchw(&ap->cur_rx_bufs);
1929 prefetchw(&ap->cur_mini_bufs);
1930
1931 while (idx != rxretprd) {
1932 struct ring_info *rip;
1933 struct sk_buff *skb;
1934 struct rx_desc *rxdesc, *retdesc;
1935 u32 skbidx;
1936 int bd_flags, desc_type, mapsize;
1937 u16 csum;
1938
1939
1940 /* make sure the rx descriptor isn't read before rxretprd */
1941 if (idx == rxretcsm)
1942 rmb();
1943
1944 retdesc = &ap->rx_return_ring[idx];
1945 skbidx = retdesc->idx;
1946 bd_flags = retdesc->flags;
1947 desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
1948
1949 switch(desc_type) {
1950 /*
1951 * Normal frames do not have any flags set
1952 *
1953 * Mini and normal frames arrive frequently,
1954 * so use a local counter to avoid doing
1955 * atomic operations for each packet arriving.
1956 */
1957 case 0:
1958 rip = &ap->skb->rx_std_skbuff[skbidx];
1959 mapsize = ACE_STD_BUFSIZE;
1960 rxdesc = &ap->rx_std_ring[skbidx];
1961 std_count++;
1962 break;
1963 case BD_FLG_JUMBO:
1964 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1965 mapsize = ACE_JUMBO_BUFSIZE;
1966 rxdesc = &ap->rx_jumbo_ring[skbidx];
1967 atomic_dec(&ap->cur_jumbo_bufs);
1968 break;
1969 case BD_FLG_MINI:
1970 rip = &ap->skb->rx_mini_skbuff[skbidx];
1971 mapsize = ACE_MINI_BUFSIZE;
1972 rxdesc = &ap->rx_mini_ring[skbidx];
1973 mini_count++;
1974 break;
1975 default:
1976 printk(KERN_INFO "%s: unknown frame type (0x%02x) "
1977 "returned by NIC\n", dev->name,
1978 retdesc->flags);
1979 goto error;
1980 }
1981
1982 skb = rip->skb;
1983 rip->skb = NULL;
1984 pci_unmap_page(ap->pdev,
1985 dma_unmap_addr(rip, mapping),
1986 mapsize,
1987 PCI_DMA_FROMDEVICE);
1988 skb_put(skb, retdesc->size);
1989
1990 /*
1991 * Fly baby, fly!
1992 */
1993 csum = retdesc->tcp_udp_csum;
1994
1995 skb->protocol = eth_type_trans(skb, dev);
1996
1997 /*
1998 * Instead of forcing the poor tigon mips cpu to calculate
1999 * pseudo hdr checksum, we do this ourselves.
2000 */
2001 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
2002 skb->csum = htons(csum);
2003 skb->ip_summed = CHECKSUM_COMPLETE;
2004 } else {
2005 skb_checksum_none_assert(skb);
2006 }
2007
2008 /* send it up */
2009 if ((bd_flags & BD_FLG_VLAN_TAG))
2010 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
2011 netif_rx(skb);
2012
2013 dev->stats.rx_packets++;
2014 dev->stats.rx_bytes += retdesc->size;
2015
2016 idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2017 }
2018
2019 atomic_sub(std_count, &ap->cur_rx_bufs);
2020 if (!ACE_IS_TIGON_I(ap))
2021 atomic_sub(mini_count, &ap->cur_mini_bufs);
2022
2023 out:
2024 /*
2025 * According to the documentation RxRetCsm is obsolete with
2026 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
2027 */
2028 if (ACE_IS_TIGON_I(ap)) {
2029 writel(idx, &ap->regs->RxRetCsm);
2030 }
2031 ap->cur_rx = idx;
2032
2033 return;
2034 error:
2035 idx = rxretprd;
2036 goto out;
2037 }
2038
2039
2040 static inline void ace_tx_int(struct net_device *dev,
2041 u32 txcsm, u32 idx)
2042 {
2043 struct ace_private *ap = netdev_priv(dev);
2044
2045 do {
2046 struct sk_buff *skb;
2047 struct tx_ring_info *info;
2048
2049 info = ap->skb->tx_skbuff + idx;
2050 skb = info->skb;
2051
2052 if (dma_unmap_len(info, maplen)) {
2053 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2054 dma_unmap_len(info, maplen),
2055 PCI_DMA_TODEVICE);
2056 dma_unmap_len_set(info, maplen, 0);
2057 }
2058
2059 if (skb) {
2060 dev->stats.tx_packets++;
2061 dev->stats.tx_bytes += skb->len;
2062 dev_kfree_skb_irq(skb);
2063 info->skb = NULL;
2064 }
2065
2066 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2067 } while (idx != txcsm);
2068
2069 if (netif_queue_stopped(dev))
2070 netif_wake_queue(dev);
2071
2072 wmb();
2073 ap->tx_ret_csm = txcsm;
2074
2075 /* So... tx_ret_csm is advanced _after_ check for device wakeup.
2076 *
2077 * We could try to make it before. In this case we would get
2078 * the following race condition: hard_start_xmit on other cpu
2079 * enters after we advanced tx_ret_csm and fills space,
2080 * which we have just freed, so that we make illegal device wakeup.
2081 * There is no good way to workaround this (at entry
2082 * to ace_start_xmit detects this condition and prevents
2083 * ring corruption, but it is not a good workaround.)
2084 *
2085 * When tx_ret_csm is advanced after, we wake up device _only_
2086 * if we really have some space in ring (though the core doing
2087 * hard_start_xmit can see full ring for some period and has to
2088 * synchronize.) Superb.
2089 * BUT! We get another subtle race condition. hard_start_xmit
2090 * may think that ring is full between wakeup and advancing
2091 * tx_ret_csm and will stop device instantly! It is not so bad.
2092 * We are guaranteed that there is something in ring, so that
2093 * the next irq will resume transmission. To speedup this we could
2094 * mark descriptor, which closes ring with BD_FLG_COAL_NOW
2095 * (see ace_start_xmit).
2096 *
2097 * Well, this dilemma exists in all lock-free devices.
2098 * We, following scheme used in drivers by Donald Becker,
2099 * select the least dangerous.
2100 * --ANK
2101 */
2102 }
2103
2104
2105 static irqreturn_t ace_interrupt(int irq, void *dev_id)
2106 {
2107 struct net_device *dev = (struct net_device *)dev_id;
2108 struct ace_private *ap = netdev_priv(dev);
2109 struct ace_regs __iomem *regs = ap->regs;
2110 u32 idx;
2111 u32 txcsm, rxretcsm, rxretprd;
2112 u32 evtcsm, evtprd;
2113
2114 /*
2115 * In case of PCI shared interrupts or spurious interrupts,
2116 * we want to make sure it is actually our interrupt before
2117 * spending any time in here.
2118 */
2119 if (!(readl(&regs->HostCtrl) & IN_INT))
2120 return IRQ_NONE;
2121
2122 /*
2123 * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
2124 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
2125 * writel(0, &regs->Mb0Lo).
2126 *
2127 * "IRQ avoidance" recommended in docs applies to IRQs served
2128 * threads and it is wrong even for that case.
2129 */
2130 writel(0, &regs->Mb0Lo);
2131 readl(&regs->Mb0Lo);
2132
2133 /*
2134 * There is no conflict between transmit handling in
2135 * start_xmit and receive processing, thus there is no reason
2136 * to take a spin lock for RX handling. Wait until we start
2137 * working on the other stuff - hey we don't need a spin lock
2138 * anymore.
2139 */
2140 rxretprd = *ap->rx_ret_prd;
2141 rxretcsm = ap->cur_rx;
2142
2143 if (rxretprd != rxretcsm)
2144 ace_rx_int(dev, rxretprd, rxretcsm);
2145
2146 txcsm = *ap->tx_csm;
2147 idx = ap->tx_ret_csm;
2148
2149 if (txcsm != idx) {
2150 /*
2151 * If each skb takes only one descriptor this check degenerates
2152 * to identity, because new space has just been opened.
2153 * But if skbs are fragmented we must check that this index
2154 * update releases enough of space, otherwise we just
2155 * wait for device to make more work.
2156 */
2157 if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2158 ace_tx_int(dev, txcsm, idx);
2159 }
2160
2161 evtcsm = readl(&regs->EvtCsm);
2162 evtprd = *ap->evt_prd;
2163
2164 if (evtcsm != evtprd) {
2165 evtcsm = ace_handle_event(dev, evtcsm, evtprd);
2166 writel(evtcsm, &regs->EvtCsm);
2167 }
2168
2169 /*
2170 * This has to go last in the interrupt handler and run with
2171 * the spin lock released ... what lock?
2172 */
2173 if (netif_running(dev)) {
2174 int cur_size;
2175 int run_tasklet = 0;
2176
2177 cur_size = atomic_read(&ap->cur_rx_bufs);
2178 if (cur_size < RX_LOW_STD_THRES) {
2179 if ((cur_size < RX_PANIC_STD_THRES) &&
2180 !test_and_set_bit(0, &ap->std_refill_busy)) {
2181 #ifdef DEBUG
2182 printk("low on std buffers %i\n", cur_size);
2183 #endif
2184 ace_load_std_rx_ring(dev,
2185 RX_RING_SIZE - cur_size);
2186 } else
2187 run_tasklet = 1;
2188 }
2189
2190 if (!ACE_IS_TIGON_I(ap)) {
2191 cur_size = atomic_read(&ap->cur_mini_bufs);
2192 if (cur_size < RX_LOW_MINI_THRES) {
2193 if ((cur_size < RX_PANIC_MINI_THRES) &&
2194 !test_and_set_bit(0,
2195 &ap->mini_refill_busy)) {
2196 #ifdef DEBUG
2197 printk("low on mini buffers %i\n",
2198 cur_size);
2199 #endif
2200 ace_load_mini_rx_ring(dev,
2201 RX_MINI_SIZE - cur_size);
2202 } else
2203 run_tasklet = 1;
2204 }
2205 }
2206
2207 if (ap->jumbo) {
2208 cur_size = atomic_read(&ap->cur_jumbo_bufs);
2209 if (cur_size < RX_LOW_JUMBO_THRES) {
2210 if ((cur_size < RX_PANIC_JUMBO_THRES) &&
2211 !test_and_set_bit(0,
2212 &ap->jumbo_refill_busy)){
2213 #ifdef DEBUG
2214 printk("low on jumbo buffers %i\n",
2215 cur_size);
2216 #endif
2217 ace_load_jumbo_rx_ring(dev,
2218 RX_JUMBO_SIZE - cur_size);
2219 } else
2220 run_tasklet = 1;
2221 }
2222 }
2223 if (run_tasklet && !ap->tasklet_pending) {
2224 ap->tasklet_pending = 1;
2225 tasklet_schedule(&ap->ace_tasklet);
2226 }
2227 }
2228
2229 return IRQ_HANDLED;
2230 }
2231
2232 static int ace_open(struct net_device *dev)
2233 {
2234 struct ace_private *ap = netdev_priv(dev);
2235 struct ace_regs __iomem *regs = ap->regs;
2236 struct cmd cmd;
2237
2238 if (!(ap->fw_running)) {
2239 printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
2240 return -EBUSY;
2241 }
2242
2243 writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu);
2244
2245 cmd.evt = C_CLEAR_STATS;
2246 cmd.code = 0;
2247 cmd.idx = 0;
2248 ace_issue_cmd(regs, &cmd);
2249
2250 cmd.evt = C_HOST_STATE;
2251 cmd.code = C_C_STACK_UP;
2252 cmd.idx = 0;
2253 ace_issue_cmd(regs, &cmd);
2254
2255 if (ap->jumbo &&
2256 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2257 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2258
2259 if (dev->flags & IFF_PROMISC) {
2260 cmd.evt = C_SET_PROMISC_MODE;
2261 cmd.code = C_C_PROMISC_ENABLE;
2262 cmd.idx = 0;
2263 ace_issue_cmd(regs, &cmd);
2264
2265 ap->promisc = 1;
2266 }else
2267 ap->promisc = 0;
2268 ap->mcast_all = 0;
2269
2270 #if 0
2271 cmd.evt = C_LNK_NEGOTIATION;
2272 cmd.code = 0;
2273 cmd.idx = 0;
2274 ace_issue_cmd(regs, &cmd);
2275 #endif
2276
2277 netif_start_queue(dev);
2278
2279 /*
2280 * Setup the bottom half rx ring refill handler
2281 */
2282 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
2283 return 0;
2284 }
2285
2286
2287 static int ace_close(struct net_device *dev)
2288 {
2289 struct ace_private *ap = netdev_priv(dev);
2290 struct ace_regs __iomem *regs = ap->regs;
2291 struct cmd cmd;
2292 unsigned long flags;
2293 short i;
2294
2295 /*
2296 * Without (or before) releasing irq and stopping hardware, this
2297 * is an absolute non-sense, by the way. It will be reset instantly
2298 * by the first irq.
2299 */
2300 netif_stop_queue(dev);
2301
2302
2303 if (ap->promisc) {
2304 cmd.evt = C_SET_PROMISC_MODE;
2305 cmd.code = C_C_PROMISC_DISABLE;
2306 cmd.idx = 0;
2307 ace_issue_cmd(regs, &cmd);
2308 ap->promisc = 0;
2309 }
2310
2311 cmd.evt = C_HOST_STATE;
2312 cmd.code = C_C_STACK_DOWN;
2313 cmd.idx = 0;
2314 ace_issue_cmd(regs, &cmd);
2315
2316 tasklet_kill(&ap->ace_tasklet);
2317
2318 /*
2319 * Make sure one CPU is not processing packets while
2320 * buffers are being released by another.
2321 */
2322
2323 local_irq_save(flags);
2324 ace_mask_irq(dev);
2325
2326 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2327 struct sk_buff *skb;
2328 struct tx_ring_info *info;
2329
2330 info = ap->skb->tx_skbuff + i;
2331 skb = info->skb;
2332
2333 if (dma_unmap_len(info, maplen)) {
2334 if (ACE_IS_TIGON_I(ap)) {
2335 /* NB: TIGON_1 is special, tx_ring is in io space */
2336 struct tx_desc __iomem *tx;
2337 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
2338 writel(0, &tx->addr.addrhi);
2339 writel(0, &tx->addr.addrlo);
2340 writel(0, &tx->flagsize);
2341 } else
2342 memset(ap->tx_ring + i, 0,
2343 sizeof(struct tx_desc));
2344 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2345 dma_unmap_len(info, maplen),
2346 PCI_DMA_TODEVICE);
2347 dma_unmap_len_set(info, maplen, 0);
2348 }
2349 if (skb) {
2350 dev_kfree_skb(skb);
2351 info->skb = NULL;
2352 }
2353 }
2354
2355 if (ap->jumbo) {
2356 cmd.evt = C_RESET_JUMBO_RNG;
2357 cmd.code = 0;
2358 cmd.idx = 0;
2359 ace_issue_cmd(regs, &cmd);
2360 }
2361
2362 ace_unmask_irq(dev);
2363 local_irq_restore(flags);
2364
2365 return 0;
2366 }
2367
2368
2369 static inline dma_addr_t
2370 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2371 struct sk_buff *tail, u32 idx)
2372 {
2373 dma_addr_t mapping;
2374 struct tx_ring_info *info;
2375
2376 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2377 offset_in_page(skb->data),
2378 skb->len, PCI_DMA_TODEVICE);
2379
2380 info = ap->skb->tx_skbuff + idx;
2381 info->skb = tail;
2382 dma_unmap_addr_set(info, mapping, mapping);
2383 dma_unmap_len_set(info, maplen, skb->len);
2384 return mapping;
2385 }
2386
2387
2388 static inline void
2389 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2390 u32 flagsize, u32 vlan_tag)
2391 {
2392 #if !USE_TX_COAL_NOW
2393 flagsize &= ~BD_FLG_COAL_NOW;
2394 #endif
2395
2396 if (ACE_IS_TIGON_I(ap)) {
2397 struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
2398 writel(addr >> 32, &io->addr.addrhi);
2399 writel(addr & 0xffffffff, &io->addr.addrlo);
2400 writel(flagsize, &io->flagsize);
2401 writel(vlan_tag, &io->vlanres);
2402 } else {
2403 desc->addr.addrhi = addr >> 32;
2404 desc->addr.addrlo = addr;
2405 desc->flagsize = flagsize;
2406 desc->vlanres = vlan_tag;
2407 }
2408 }
2409
2410
2411 static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
2412 struct net_device *dev)
2413 {
2414 struct ace_private *ap = netdev_priv(dev);
2415 struct ace_regs __iomem *regs = ap->regs;
2416 struct tx_desc *desc;
2417 u32 idx, flagsize;
2418 unsigned long maxjiff = jiffies + 3*HZ;
2419
2420 restart:
2421 idx = ap->tx_prd;
2422
2423 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2424 goto overflow;
2425
2426 if (!skb_shinfo(skb)->nr_frags) {
2427 dma_addr_t mapping;
2428 u32 vlan_tag = 0;
2429
2430 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2431 flagsize = (skb->len << 16) | (BD_FLG_END);
2432 if (skb->ip_summed == CHECKSUM_PARTIAL)
2433 flagsize |= BD_FLG_TCP_UDP_SUM;
2434 if (skb_vlan_tag_present(skb)) {
2435 flagsize |= BD_FLG_VLAN_TAG;
2436 vlan_tag = skb_vlan_tag_get(skb);
2437 }
2438 desc = ap->tx_ring + idx;
2439 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2440
2441 /* Look at ace_tx_int for explanations. */
2442 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2443 flagsize |= BD_FLG_COAL_NOW;
2444
2445 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2446 } else {
2447 dma_addr_t mapping;
2448 u32 vlan_tag = 0;
2449 int i, len = 0;
2450
2451 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2452 flagsize = (skb_headlen(skb) << 16);
2453 if (skb->ip_summed == CHECKSUM_PARTIAL)
2454 flagsize |= BD_FLG_TCP_UDP_SUM;
2455 if (skb_vlan_tag_present(skb)) {
2456 flagsize |= BD_FLG_VLAN_TAG;
2457 vlan_tag = skb_vlan_tag_get(skb);
2458 }
2459
2460 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2461
2462 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2463
2464 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2465 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2466 struct tx_ring_info *info;
2467
2468 len += skb_frag_size(frag);
2469 info = ap->skb->tx_skbuff + idx;
2470 desc = ap->tx_ring + idx;
2471
2472 mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
2473 skb_frag_size(frag),
2474 DMA_TO_DEVICE);
2475
2476 flagsize = skb_frag_size(frag) << 16;
2477 if (skb->ip_summed == CHECKSUM_PARTIAL)
2478 flagsize |= BD_FLG_TCP_UDP_SUM;
2479 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2480
2481 if (i == skb_shinfo(skb)->nr_frags - 1) {
2482 flagsize |= BD_FLG_END;
2483 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2484 flagsize |= BD_FLG_COAL_NOW;
2485
2486 /*
2487 * Only the last fragment frees
2488 * the skb!
2489 */
2490 info->skb = skb;
2491 } else {
2492 info->skb = NULL;
2493 }
2494 dma_unmap_addr_set(info, mapping, mapping);
2495 dma_unmap_len_set(info, maplen, skb_frag_size(frag));
2496 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2497 }
2498 }
2499
2500 wmb();
2501 ap->tx_prd = idx;
2502 ace_set_txprd(regs, ap, idx);
2503
2504 if (flagsize & BD_FLG_COAL_NOW) {
2505 netif_stop_queue(dev);
2506
2507 /*
2508 * A TX-descriptor producer (an IRQ) might have gotten
2509 * between, making the ring free again. Since xmit is
2510 * serialized, this is the only situation we have to
2511 * re-test.
2512 */
2513 if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2514 netif_wake_queue(dev);
2515 }
2516
2517 return NETDEV_TX_OK;
2518
2519 overflow:
2520 /*
2521 * This race condition is unavoidable with lock-free drivers.
2522 * We wake up the queue _before_ tx_prd is advanced, so that we can
2523 * enter hard_start_xmit too early, while tx ring still looks closed.
2524 * This happens ~1-4 times per 100000 packets, so that we can allow
2525 * to loop syncing to other CPU. Probably, we need an additional
2526 * wmb() in ace_tx_intr as well.
2527 *
2528 * Note that this race is relieved by reserving one more entry
2529 * in tx ring than it is necessary (see original non-SG driver).
2530 * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
2531 * is already overkill.
2532 *
2533 * Alternative is to return with 1 not throttling queue. In this
2534 * case loop becomes longer, no more useful effects.
2535 */
2536 if (time_before(jiffies, maxjiff)) {
2537 barrier();
2538 cpu_relax();
2539 goto restart;
2540 }
2541
2542 /* The ring is stuck full. */
2543 printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
2544 return NETDEV_TX_BUSY;
2545 }
2546
2547
2548 static int ace_change_mtu(struct net_device *dev, int new_mtu)
2549 {
2550 struct ace_private *ap = netdev_priv(dev);
2551 struct ace_regs __iomem *regs = ap->regs;
2552
2553 writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
2554 dev->mtu = new_mtu;
2555
2556 if (new_mtu > ACE_STD_MTU) {
2557 if (!(ap->jumbo)) {
2558 printk(KERN_INFO "%s: Enabling Jumbo frame "
2559 "support\n", dev->name);
2560 ap->jumbo = 1;
2561 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2562 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2563 ace_set_rxtx_parms(dev, 1);
2564 }
2565 } else {
2566 while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2567 ace_sync_irq(dev->irq);
2568 ace_set_rxtx_parms(dev, 0);
2569 if (ap->jumbo) {
2570 struct cmd cmd;
2571
2572 cmd.evt = C_RESET_JUMBO_RNG;
2573 cmd.code = 0;
2574 cmd.idx = 0;
2575 ace_issue_cmd(regs, &cmd);
2576 }
2577 }
2578
2579 return 0;
2580 }
2581
2582 static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2583 {
2584 struct ace_private *ap = netdev_priv(dev);
2585 struct ace_regs __iomem *regs = ap->regs;
2586 u32 link;
2587
2588 memset(ecmd, 0, sizeof(struct ethtool_cmd));
2589 ecmd->supported =
2590 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2591 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2592 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
2593 SUPPORTED_Autoneg | SUPPORTED_FIBRE);
2594
2595 ecmd->port = PORT_FIBRE;
2596 ecmd->transceiver = XCVR_INTERNAL;
2597
2598 link = readl(&regs->GigLnkState);
2599 if (link & LNK_1000MB)
2600 ethtool_cmd_speed_set(ecmd, SPEED_1000);
2601 else {
2602 link = readl(&regs->FastLnkState);
2603 if (link & LNK_100MB)
2604 ethtool_cmd_speed_set(ecmd, SPEED_100);
2605 else if (link & LNK_10MB)
2606 ethtool_cmd_speed_set(ecmd, SPEED_10);
2607 else
2608 ethtool_cmd_speed_set(ecmd, 0);
2609 }
2610 if (link & LNK_FULL_DUPLEX)
2611 ecmd->duplex = DUPLEX_FULL;
2612 else
2613 ecmd->duplex = DUPLEX_HALF;
2614
2615 if (link & LNK_NEGOTIATE)
2616 ecmd->autoneg = AUTONEG_ENABLE;
2617 else
2618 ecmd->autoneg = AUTONEG_DISABLE;
2619
2620 #if 0
2621 /*
2622 * Current struct ethtool_cmd is insufficient
2623 */
2624 ecmd->trace = readl(&regs->TuneTrace);
2625
2626 ecmd->txcoal = readl(&regs->TuneTxCoalTicks);
2627 ecmd->rxcoal = readl(&regs->TuneRxCoalTicks);
2628 #endif
2629 ecmd->maxtxpkt = readl(&regs->TuneMaxTxDesc);
2630 ecmd->maxrxpkt = readl(&regs->TuneMaxRxDesc);
2631
2632 return 0;
2633 }
2634
2635 static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2636 {
2637 struct ace_private *ap = netdev_priv(dev);
2638 struct ace_regs __iomem *regs = ap->regs;
2639 u32 link, speed;
2640
2641 link = readl(&regs->GigLnkState);
2642 if (link & LNK_1000MB)
2643 speed = SPEED_1000;
2644 else {
2645 link = readl(&regs->FastLnkState);
2646 if (link & LNK_100MB)
2647 speed = SPEED_100;
2648 else if (link & LNK_10MB)
2649 speed = SPEED_10;
2650 else
2651 speed = SPEED_100;
2652 }
2653
2654 link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
2655 LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
2656 if (!ACE_IS_TIGON_I(ap))
2657 link |= LNK_TX_FLOW_CTL_Y;
2658 if (ecmd->autoneg == AUTONEG_ENABLE)
2659 link |= LNK_NEGOTIATE;
2660 if (ethtool_cmd_speed(ecmd) != speed) {
2661 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
2662 switch (ethtool_cmd_speed(ecmd)) {
2663 case SPEED_1000:
2664 link |= LNK_1000MB;
2665 break;
2666 case SPEED_100:
2667 link |= LNK_100MB;
2668 break;
2669 case SPEED_10:
2670 link |= LNK_10MB;
2671 break;
2672 }
2673 }
2674
2675 if (ecmd->duplex == DUPLEX_FULL)
2676 link |= LNK_FULL_DUPLEX;
2677
2678 if (link != ap->link) {
2679 struct cmd cmd;
2680 printk(KERN_INFO "%s: Renegotiating link state\n",
2681 dev->name);
2682
2683 ap->link = link;
2684 writel(link, &regs->TuneLink);
2685 if (!ACE_IS_TIGON_I(ap))
2686 writel(link, &regs->TuneFastLink);
2687 wmb();
2688
2689 cmd.evt = C_LNK_NEGOTIATION;
2690 cmd.code = 0;
2691 cmd.idx = 0;
2692 ace_issue_cmd(regs, &cmd);
2693 }
2694 return 0;
2695 }
2696
2697 static void ace_get_drvinfo(struct net_device *dev,
2698 struct ethtool_drvinfo *info)
2699 {
2700 struct ace_private *ap = netdev_priv(dev);
2701
2702 strlcpy(info->driver, "acenic", sizeof(info->driver));
2703 snprintf(info->version, sizeof(info->version), "%i.%i.%i",
2704 ap->firmware_major, ap->firmware_minor,
2705 ap->firmware_fix);
2706
2707 if (ap->pdev)
2708 strlcpy(info->bus_info, pci_name(ap->pdev),
2709 sizeof(info->bus_info));
2710
2711 }
2712
2713 /*
2714 * Set the hardware MAC address.
2715 */
2716 static int ace_set_mac_addr(struct net_device *dev, void *p)
2717 {
2718 struct ace_private *ap = netdev_priv(dev);
2719 struct ace_regs __iomem *regs = ap->regs;
2720 struct sockaddr *addr=p;
2721 u8 *da;
2722 struct cmd cmd;
2723
2724 if(netif_running(dev))
2725 return -EBUSY;
2726
2727 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
2728
2729 da = (u8 *)dev->dev_addr;
2730
2731 writel(da[0] << 8 | da[1], &regs->MacAddrHi);
2732 writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
2733 &regs->MacAddrLo);
2734
2735 cmd.evt = C_SET_MAC_ADDR;
2736 cmd.code = 0;
2737 cmd.idx = 0;
2738 ace_issue_cmd(regs, &cmd);
2739
2740 return 0;
2741 }
2742
2743
2744 static void ace_set_multicast_list(struct net_device *dev)
2745 {
2746 struct ace_private *ap = netdev_priv(dev);
2747 struct ace_regs __iomem *regs = ap->regs;
2748 struct cmd cmd;
2749
2750 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
2751 cmd.evt = C_SET_MULTICAST_MODE;
2752 cmd.code = C_C_MCAST_ENABLE;
2753 cmd.idx = 0;
2754 ace_issue_cmd(regs, &cmd);
2755 ap->mcast_all = 1;
2756 } else if (ap->mcast_all) {
2757 cmd.evt = C_SET_MULTICAST_MODE;
2758 cmd.code = C_C_MCAST_DISABLE;
2759 cmd.idx = 0;
2760 ace_issue_cmd(regs, &cmd);
2761 ap->mcast_all = 0;
2762 }
2763
2764 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
2765 cmd.evt = C_SET_PROMISC_MODE;
2766 cmd.code = C_C_PROMISC_ENABLE;
2767 cmd.idx = 0;
2768 ace_issue_cmd(regs, &cmd);
2769 ap->promisc = 1;
2770 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
2771 cmd.evt = C_SET_PROMISC_MODE;
2772 cmd.code = C_C_PROMISC_DISABLE;
2773 cmd.idx = 0;
2774 ace_issue_cmd(regs, &cmd);
2775 ap->promisc = 0;
2776 }
2777
2778 /*
2779 * For the time being multicast relies on the upper layers
2780 * filtering it properly. The Firmware does not allow one to
2781 * set the entire multicast list at a time and keeping track of
2782 * it here is going to be messy.
2783 */
2784 if (!netdev_mc_empty(dev) && !ap->mcast_all) {
2785 cmd.evt = C_SET_MULTICAST_MODE;
2786 cmd.code = C_C_MCAST_ENABLE;
2787 cmd.idx = 0;
2788 ace_issue_cmd(regs, &cmd);
2789 }else if (!ap->mcast_all) {
2790 cmd.evt = C_SET_MULTICAST_MODE;
2791 cmd.code = C_C_MCAST_DISABLE;
2792 cmd.idx = 0;
2793 ace_issue_cmd(regs, &cmd);
2794 }
2795 }
2796
2797
2798 static struct net_device_stats *ace_get_stats(struct net_device *dev)
2799 {
2800 struct ace_private *ap = netdev_priv(dev);
2801 struct ace_mac_stats __iomem *mac_stats =
2802 (struct ace_mac_stats __iomem *)ap->regs->Stats;
2803
2804 dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2805 dev->stats.multicast = readl(&mac_stats->kept_mc);
2806 dev->stats.collisions = readl(&mac_stats->coll);
2807
2808 return &dev->stats;
2809 }
2810
2811
2812 static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2813 u32 dest, int size)
2814 {
2815 void __iomem *tdest;
2816 short tsize, i;
2817
2818 if (size <= 0)
2819 return;
2820
2821 while (size > 0) {
2822 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2823 min_t(u32, size, ACE_WINDOW_SIZE));
2824 tdest = (void __iomem *) &regs->Window +
2825 (dest & (ACE_WINDOW_SIZE - 1));
2826 writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
2827 for (i = 0; i < (tsize / 4); i++) {
2828 /* Firmware is big-endian */
2829 writel(be32_to_cpup(src), tdest);
2830 src++;
2831 tdest += 4;
2832 dest += 4;
2833 size -= 4;
2834 }
2835 }
2836 }
2837
2838
2839 static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
2840 {
2841 void __iomem *tdest;
2842 short tsize = 0, i;
2843
2844 if (size <= 0)
2845 return;
2846
2847 while (size > 0) {
2848 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2849 min_t(u32, size, ACE_WINDOW_SIZE));
2850 tdest = (void __iomem *) &regs->Window +
2851 (dest & (ACE_WINDOW_SIZE - 1));
2852 writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
2853
2854 for (i = 0; i < (tsize / 4); i++) {
2855 writel(0, tdest + i*4);
2856 }
2857
2858 dest += tsize;
2859 size -= tsize;
2860 }
2861 }
2862
2863
2864 /*
2865 * Download the firmware into the SRAM on the NIC
2866 *
2867 * This operation requires the NIC to be halted and is performed with
2868 * interrupts disabled and with the spinlock hold.
2869 */
2870 static int ace_load_firmware(struct net_device *dev)
2871 {
2872 const struct firmware *fw;
2873 const char *fw_name = "acenic/tg2.bin";
2874 struct ace_private *ap = netdev_priv(dev);
2875 struct ace_regs __iomem *regs = ap->regs;
2876 const __be32 *fw_data;
2877 u32 load_addr;
2878 int ret;
2879
2880 if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) {
2881 printk(KERN_ERR "%s: trying to download firmware while the "
2882 "CPU is running!\n", ap->name);
2883 return -EFAULT;
2884 }
2885
2886 if (ACE_IS_TIGON_I(ap))
2887 fw_name = "acenic/tg1.bin";
2888
2889 ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
2890 if (ret) {
2891 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
2892 ap->name, fw_name);
2893 return ret;
2894 }
2895
2896 fw_data = (void *)fw->data;
2897
2898 /* Firmware blob starts with version numbers, followed by
2899 load and start address. Remainder is the blob to be loaded
2900 contiguously from load address. We don't bother to represent
2901 the BSS/SBSS sections any more, since we were clearing the
2902 whole thing anyway. */
2903 ap->firmware_major = fw->data[0];
2904 ap->firmware_minor = fw->data[1];
2905 ap->firmware_fix = fw->data[2];
2906
2907 ap->firmware_start = be32_to_cpu(fw_data[1]);
2908 if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
2909 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2910 ap->name, ap->firmware_start, fw_name);
2911 ret = -EINVAL;
2912 goto out;
2913 }
2914
2915 load_addr = be32_to_cpu(fw_data[2]);
2916 if (load_addr < 0x4000 || load_addr >= 0x80000) {
2917 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2918 ap->name, load_addr, fw_name);
2919 ret = -EINVAL;
2920 goto out;
2921 }
2922
2923 /*
2924 * Do not try to clear more than 512KiB or we end up seeing
2925 * funny things on NICs with only 512KiB SRAM
2926 */
2927 ace_clear(regs, 0x2000, 0x80000-0x2000);
2928 ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
2929 out:
2930 release_firmware(fw);
2931 return ret;
2932 }
2933
2934
2935 /*
2936 * The eeprom on the AceNIC is an Atmel i2c EEPROM.
2937 *
2938 * Accessing the EEPROM is `interesting' to say the least - don't read
2939 * this code right after dinner.
2940 *
2941 * This is all about black magic and bit-banging the device .... I
2942 * wonder in what hospital they have put the guy who designed the i2c
2943 * specs.
2944 *
2945 * Oh yes, this is only the beginning!
2946 *
2947 * Thanks to Stevarino Webinski for helping tracking down the bugs in the
2948 * code i2c readout code by beta testing all my hacks.
2949 */
2950 static void eeprom_start(struct ace_regs __iomem *regs)
2951 {
2952 u32 local;
2953
2954 readl(&regs->LocalCtrl);
2955 udelay(ACE_SHORT_DELAY);
2956 local = readl(&regs->LocalCtrl);
2957 local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
2958 writel(local, &regs->LocalCtrl);
2959 readl(&regs->LocalCtrl);
2960 mb();
2961 udelay(ACE_SHORT_DELAY);
2962 local |= EEPROM_CLK_OUT;
2963 writel(local, &regs->LocalCtrl);
2964 readl(&regs->LocalCtrl);
2965 mb();
2966 udelay(ACE_SHORT_DELAY);
2967 local &= ~EEPROM_DATA_OUT;
2968 writel(local, &regs->LocalCtrl);
2969 readl(&regs->LocalCtrl);
2970 mb();
2971 udelay(ACE_SHORT_DELAY);
2972 local &= ~EEPROM_CLK_OUT;
2973 writel(local, &regs->LocalCtrl);
2974 readl(&regs->LocalCtrl);
2975 mb();
2976 }
2977
2978
2979 static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
2980 {
2981 short i;
2982 u32 local;
2983
2984 udelay(ACE_SHORT_DELAY);
2985 local = readl(&regs->LocalCtrl);
2986 local &= ~EEPROM_DATA_OUT;
2987 local |= EEPROM_WRITE_ENABLE;
2988 writel(local, &regs->LocalCtrl);
2989 readl(&regs->LocalCtrl);
2990 mb();
2991
2992 for (i = 0; i < 8; i++, magic <<= 1) {
2993 udelay(ACE_SHORT_DELAY);
2994 if (magic & 0x80)
2995 local |= EEPROM_DATA_OUT;
2996 else
2997 local &= ~EEPROM_DATA_OUT;
2998 writel(local, &regs->LocalCtrl);
2999 readl(&regs->LocalCtrl);
3000 mb();
3001
3002 udelay(ACE_SHORT_DELAY);
3003 local |= EEPROM_CLK_OUT;
3004 writel(local, &regs->LocalCtrl);
3005 readl(&regs->LocalCtrl);
3006 mb();
3007 udelay(ACE_SHORT_DELAY);
3008 local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
3009 writel(local, &regs->LocalCtrl);
3010 readl(&regs->LocalCtrl);
3011 mb();
3012 }
3013 }
3014
3015
3016 static int eeprom_check_ack(struct ace_regs __iomem *regs)
3017 {
3018 int state;
3019 u32 local;
3020
3021 local = readl(&regs->LocalCtrl);
3022 local &= ~EEPROM_WRITE_ENABLE;
3023 writel(local, &regs->LocalCtrl);
3024 readl(&regs->LocalCtrl);
3025 mb();
3026 udelay(ACE_LONG_DELAY);
3027 local |= EEPROM_CLK_OUT;
3028 writel(local, &regs->LocalCtrl);
3029 readl(&regs->LocalCtrl);
3030 mb();
3031 udelay(ACE_SHORT_DELAY);
3032 /* sample data in middle of high clk */
3033 state = (readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0;
3034 udelay(ACE_SHORT_DELAY);
3035 mb();
3036 writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
3037 readl(&regs->LocalCtrl);
3038 mb();
3039
3040 return state;
3041 }
3042
3043
3044 static void eeprom_stop(struct ace_regs __iomem *regs)
3045 {
3046 u32 local;
3047
3048 udelay(ACE_SHORT_DELAY);
3049 local = readl(&regs->LocalCtrl);
3050 local |= EEPROM_WRITE_ENABLE;
3051 writel(local, &regs->LocalCtrl);
3052 readl(&regs->LocalCtrl);
3053 mb();
3054 udelay(ACE_SHORT_DELAY);
3055 local &= ~EEPROM_DATA_OUT;
3056 writel(local, &regs->LocalCtrl);
3057 readl(&regs->LocalCtrl);
3058 mb();
3059 udelay(ACE_SHORT_DELAY);
3060 local |= EEPROM_CLK_OUT;
3061 writel(local, &regs->LocalCtrl);
3062 readl(&regs->LocalCtrl);
3063 mb();
3064 udelay(ACE_SHORT_DELAY);
3065 local |= EEPROM_DATA_OUT;
3066 writel(local, &regs->LocalCtrl);
3067 readl(&regs->LocalCtrl);
3068 mb();
3069 udelay(ACE_LONG_DELAY);
3070 local &= ~EEPROM_CLK_OUT;
3071 writel(local, &regs->LocalCtrl);
3072 mb();
3073 }
3074
3075
3076 /*
3077 * Read a whole byte from the EEPROM.
3078 */
3079 static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3080 {
3081 struct ace_private *ap = netdev_priv(dev);
3082 struct ace_regs __iomem *regs = ap->regs;
3083 unsigned long flags;
3084 u32 local;
3085 int result = 0;
3086 short i;
3087
3088 /*
3089 * Don't take interrupts on this CPU will bit banging
3090 * the %#%#@$ I2C device
3091 */
3092 local_irq_save(flags);
3093
3094 eeprom_start(regs);
3095
3096 eeprom_prep(regs, EEPROM_WRITE_SELECT);
3097 if (eeprom_check_ack(regs)) {
3098 local_irq_restore(flags);
3099 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
3100 result = -EIO;
3101 goto eeprom_read_error;
3102 }
3103
3104 eeprom_prep(regs, (offset >> 8) & 0xff);
3105 if (eeprom_check_ack(regs)) {
3106 local_irq_restore(flags);
3107 printk(KERN_ERR "%s: Unable to set address byte 0\n",
3108 ap->name);
3109 result = -EIO;
3110 goto eeprom_read_error;
3111 }
3112
3113 eeprom_prep(regs, offset & 0xff);
3114 if (eeprom_check_ack(regs)) {
3115 local_irq_restore(flags);
3116 printk(KERN_ERR "%s: Unable to set address byte 1\n",
3117 ap->name);
3118 result = -EIO;
3119 goto eeprom_read_error;
3120 }
3121
3122 eeprom_start(regs);
3123 eeprom_prep(regs, EEPROM_READ_SELECT);
3124 if (eeprom_check_ack(regs)) {
3125 local_irq_restore(flags);
3126 printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
3127 ap->name);
3128 result = -EIO;
3129 goto eeprom_read_error;
3130 }
3131
3132 for (i = 0; i < 8; i++) {
3133 local = readl(&regs->LocalCtrl);
3134 local &= ~EEPROM_WRITE_ENABLE;
3135 writel(local, &regs->LocalCtrl);
3136 readl(&regs->LocalCtrl);
3137 udelay(ACE_LONG_DELAY);
3138 mb();
3139 local |= EEPROM_CLK_OUT;
3140 writel(local, &regs->LocalCtrl);
3141 readl(&regs->LocalCtrl);
3142 mb();
3143 udelay(ACE_SHORT_DELAY);
3144 /* sample data mid high clk */
3145 result = (result << 1) |
3146 ((readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0);
3147 udelay(ACE_SHORT_DELAY);
3148 mb();
3149 local = readl(&regs->LocalCtrl);
3150 local &= ~EEPROM_CLK_OUT;
3151 writel(local, &regs->LocalCtrl);
3152 readl(&regs->LocalCtrl);
3153 udelay(ACE_SHORT_DELAY);
3154 mb();
3155 if (i == 7) {
3156 local |= EEPROM_WRITE_ENABLE;
3157 writel(local, &regs->LocalCtrl);
3158 readl(&regs->LocalCtrl);
3159 mb();
3160 udelay(ACE_SHORT_DELAY);
3161 }
3162 }
3163
3164 local |= EEPROM_DATA_OUT;
3165 writel(local, &regs->LocalCtrl);
3166 readl(&regs->LocalCtrl);
3167 mb();
3168 udelay(ACE_SHORT_DELAY);
3169 writel(readl(&regs->LocalCtrl) | EEPROM_CLK_OUT, &regs->LocalCtrl);
3170 readl(&regs->LocalCtrl);
3171 udelay(ACE_LONG_DELAY);
3172 writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
3173 readl(&regs->LocalCtrl);
3174 mb();
3175 udelay(ACE_SHORT_DELAY);
3176 eeprom_stop(regs);
3177
3178 local_irq_restore(flags);
3179 out:
3180 return result;
3181
3182 eeprom_read_error:
3183 printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
3184 ap->name, offset);
3185 goto out;
3186 }
3187
3188 module_pci_driver(acenic_pci_driver);