]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/cris/eth_v10.c
Merge branch 'bridge-PIM-hello'
[mirror_ubuntu-focal-kernel.git] / drivers / net / cris / eth_v10.c
CommitLineData
5efa1d1c 1/*
1da177e4
LT
2 * e100net.c: A network driver for the ETRAX 100LX network controller.
3 *
4 * Copyright (c) 1998-2002 Axis Communications AB.
5 *
6 * The outline of this driver comes from skeleton.c.
7 *
1da177e4
LT
8 */
9
1da177e4
LT
10
11#include <linux/module.h>
12
13#include <linux/kernel.h>
1da177e4
LT
14#include <linux/delay.h>
15#include <linux/types.h>
16#include <linux/fcntl.h>
17#include <linux/interrupt.h>
18#include <linux/ptrace.h>
19#include <linux/ioport.h>
20#include <linux/in.h>
1da177e4
LT
21#include <linux/string.h>
22#include <linux/spinlock.h>
23#include <linux/errno.h>
24#include <linux/init.h>
1977f032 25#include <linux/bitops.h>
1da177e4
LT
26
27#include <linux/if.h>
28#include <linux/mii.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/ethtool.h>
33
556dcee7 34#include <arch/svinto.h>/* DMA and register descriptions */
5efa1d1c 35#include <asm/io.h> /* CRIS_LED_* I/O functions */
1da177e4
LT
36#include <asm/irq.h>
37#include <asm/dma.h>
1da177e4
LT
38#include <asm/ethernet.h>
39#include <asm/cache.h>
556dcee7 40#include <arch/io_interface_mux.h>
1da177e4
LT
41
42//#define ETHDEBUG
43#define D(x)
44
45/*
46 * The name of the card. Is used for messages and in the requests for
47 * io regions, irqs and dma channels
48 */
49
50static const char* cardname = "ETRAX 100LX built-in ethernet controller";
51
52/* A default ethernet address. Highlevel SW will set the real one later */
53
54static struct sockaddr default_mac = {
55 0,
56 { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 }
57};
58
59/* Information that need to be kept for each board. */
60struct net_local {
1da177e4
LT
61 struct mii_if_info mii_if;
62
63 /* Tx control lock. This protects the transmit buffer ring
64 * state along with the "tx full" state of the driver. This
65 * means all netif_queue flow control actions are protected
66 * by this lock as well.
67 */
68 spinlock_t lock;
bafef0ae
JN
69
70 spinlock_t led_lock; /* Protect LED state */
71 spinlock_t transceiver_lock; /* Protect transceiver state. */
1da177e4
LT
72};
73
74typedef struct etrax_eth_descr
75{
76 etrax_dma_descr descr;
77 struct sk_buff* skb;
78} etrax_eth_descr;
79
80/* Some transceivers requires special handling */
81struct transceiver_ops
82{
83 unsigned int oui;
84 void (*check_speed)(struct net_device* dev);
85 void (*check_duplex)(struct net_device* dev);
86};
87
1da177e4
LT
88/* Duplex settings */
89enum duplex
90{
91 half,
92 full,
93 autoneg
94};
95
96/* Dma descriptors etc. */
97
bafef0ae 98#define MAX_MEDIA_DATA_SIZE 1522
1da177e4
LT
99
100#define MIN_PACKET_LEN 46
101#define ETHER_HEAD_LEN 14
102
103/*
104** MDIO constants.
105*/
106#define MDIO_START 0x1
107#define MDIO_READ 0x2
108#define MDIO_WRITE 0x1
109#define MDIO_PREAMBLE 0xfffffffful
110
111/* Broadcom specific */
112#define MDIO_AUX_CTRL_STATUS_REG 0x18
113#define MDIO_BC_FULL_DUPLEX_IND 0x1
114#define MDIO_BC_SPEED 0x2
115
116/* TDK specific */
117#define MDIO_TDK_DIAGNOSTIC_REG 18
118#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
119#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
120
121/*Intel LXT972A specific*/
122#define MDIO_INT_STATUS_REG_2 0x0011
bafef0ae
JN
123#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
124#define MDIO_INT_SPEED (1 << 14)
1da177e4
LT
125
126/* Network flash constants */
127#define NET_FLASH_TIME (HZ/50) /* 20 ms */
128#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
129#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */
130#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */
131
132#define NO_NETWORK_ACTIVITY 0
133#define NETWORK_ACTIVITY 1
134
bafef0ae
JN
135#define NBR_OF_RX_DESC 32
136#define NBR_OF_TX_DESC 16
1da177e4
LT
137
138/* Large packets are sent directly to upper layers while small packets are */
139/* copied (to reduce memory waste). The following constant decides the breakpoint */
140#define RX_COPYBREAK 256
141
142/* Due to a chip bug we need to flush the cache when descriptors are returned */
143/* to the DMA. To decrease performance impact we return descriptors in chunks. */
144/* The following constant determines the number of descriptors to return. */
145#define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2
146
147#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
148
149/* Define some macros to access ETRAX 100 registers */
150#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
151 IO_FIELD_(reg##_, field##_, val)
152#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
153 IO_STATE_(reg##_, field##_, _##val)
154
155static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
156 to be processed */
157static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
1da177e4
LT
158
159static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
160
161static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */
162static etrax_eth_descr* myLastTxDesc; /* End of send queue */
163static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
164static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
165
166static unsigned int network_rec_config_shadow = 0;
1da177e4
LT
167
168static unsigned int network_tr_ctrl_shadow = 0;
169
170/* Network speed indication. */
8d06afab
IM
171static DEFINE_TIMER(speed_timer, NULL, 0, 0);
172static DEFINE_TIMER(clear_led_timer, NULL, 0, 0);
1da177e4
LT
173static int current_speed; /* Speed read from transceiver */
174static int current_speed_selection; /* Speed selected by user */
175static unsigned long led_next_time;
176static int led_active;
177static int rx_queue_len;
178
179/* Duplex */
8d06afab 180static DEFINE_TIMER(duplex_timer, NULL, 0, 0);
1da177e4
LT
181static int full_duplex;
182static enum duplex current_duplex;
183
184/* Index to functions, as function prototypes. */
185
186static int etrax_ethernet_init(void);
187
188static int e100_open(struct net_device *dev);
189static int e100_set_mac_address(struct net_device *dev, void *addr);
190static int e100_send_packet(struct sk_buff *skb, struct net_device *dev);
7d12e780
DH
191static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id);
192static irqreturn_t e100nw_interrupt(int irq, void *dev_id);
1da177e4
LT
193static void e100_rx(struct net_device *dev);
194static int e100_close(struct net_device *dev);
195static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
1da177e4
LT
196static int e100_set_config(struct net_device* dev, struct ifmap* map);
197static void e100_tx_timeout(struct net_device *dev);
198static struct net_device_stats *e100_get_stats(struct net_device *dev);
199static void set_multicast_list(struct net_device *dev);
bafef0ae 200static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
1da177e4
LT
201static void update_rx_stats(struct net_device_stats *);
202static void update_tx_stats(struct net_device_stats *);
203static int e100_probe_transceiver(struct net_device* dev);
204
205static void e100_check_speed(unsigned long priv);
206static void e100_set_speed(struct net_device* dev, unsigned long speed);
207static void e100_check_duplex(unsigned long priv);
208static void e100_set_duplex(struct net_device* dev, enum duplex);
209static void e100_negotiate(struct net_device* dev);
210
211static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location);
212static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value);
213
214static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd);
215static void e100_send_mdio_bit(unsigned char bit);
216static unsigned char e100_receive_mdio_bit(void);
217static void e100_reset_transceiver(struct net_device* net);
218
219static void e100_clear_network_leds(unsigned long dummy);
220static void e100_set_network_leds(int active);
221
7282d491 222static const struct ethtool_ops e100_ethtool_ops;
bafef0ae
JN
223#if defined(CONFIG_ETRAX_NO_PHY)
224static void dummy_check_speed(struct net_device* dev);
225static void dummy_check_duplex(struct net_device* dev);
226#else
1da177e4
LT
227static void broadcom_check_speed(struct net_device* dev);
228static void broadcom_check_duplex(struct net_device* dev);
229static void tdk_check_speed(struct net_device* dev);
230static void tdk_check_duplex(struct net_device* dev);
231static void intel_check_speed(struct net_device* dev);
232static void intel_check_duplex(struct net_device* dev);
233static void generic_check_speed(struct net_device* dev);
234static void generic_check_duplex(struct net_device* dev);
bafef0ae
JN
235#endif
236#ifdef CONFIG_NET_POLL_CONTROLLER
237static void e100_netpoll(struct net_device* dev);
238#endif
239
240static int autoneg_normal = 1;
1da177e4
LT
241
242struct transceiver_ops transceivers[] =
243{
bafef0ae
JN
244#if defined(CONFIG_ETRAX_NO_PHY)
245 {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
246#else
1da177e4
LT
247 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
248 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
249 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
250 {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
251 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
bafef0ae 252#endif
1da177e4
LT
253};
254
bafef0ae
JN
255struct transceiver_ops* transceiver = &transceivers[0];
256
a95c2a3b
AB
257static const struct net_device_ops e100_netdev_ops = {
258 .ndo_open = e100_open,
259 .ndo_stop = e100_close,
260 .ndo_start_xmit = e100_send_packet,
261 .ndo_tx_timeout = e100_tx_timeout,
262 .ndo_get_stats = e100_get_stats,
afc4b13d 263 .ndo_set_rx_mode = set_multicast_list,
a95c2a3b
AB
264 .ndo_do_ioctl = e100_ioctl,
265 .ndo_set_mac_address = e100_set_mac_address,
266 .ndo_validate_addr = eth_validate_addr,
a95c2a3b
AB
267 .ndo_set_config = e100_set_config,
268#ifdef CONFIG_NET_POLL_CONTROLLER
269 .ndo_poll_controller = e100_netpoll,
270#endif
271};
272
1da177e4
LT
273#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
274
275/*
276 * Check for a network adaptor of this type, and return '0' if one exists.
277 * If dev->base_addr == 0, probe all likely locations.
278 * If dev->base_addr == 1, always return failure.
279 * If dev->base_addr == 2, allocate space for the device and return success
280 * (detachable devices only).
281 */
282
283static int __init
284etrax_ethernet_init(void)
285{
286 struct net_device *dev;
287 struct net_local* np;
288 int i, err;
289
290 printk(KERN_INFO
bafef0ae 291 "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
1da177e4 292
bafef0ae
JN
293 if (cris_request_io_interface(if_eth, cardname)) {
294 printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
295 return -EBUSY;
296 }
1da177e4 297
bafef0ae 298 dev = alloc_etherdev(sizeof(struct net_local));
1da177e4
LT
299 if (!dev)
300 return -ENOMEM;
301
bafef0ae
JN
302 np = netdev_priv(dev);
303
304 /* we do our own locking */
305 dev->features |= NETIF_F_LLTX;
306
1da177e4
LT
307 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
308
309 /* now setup our etrax specific stuff */
310
311 dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */
312 dev->dma = NETWORK_RX_DMA_NBR;
313
314 /* fill in our handlers so the network layer can talk to us in the future */
315
76f2b4d9 316 dev->ethtool_ops = &e100_ethtool_ops;
a95c2a3b 317 dev->netdev_ops = &e100_netdev_ops;
bafef0ae
JN
318
319 spin_lock_init(&np->lock);
320 spin_lock_init(&np->led_lock);
321 spin_lock_init(&np->transceiver_lock);
1da177e4
LT
322
323 /* Initialise the list of Etrax DMA-descriptors */
324
325 /* Initialise receive descriptors */
326
327 for (i = 0; i < NBR_OF_RX_DESC; i++) {
bafef0ae
JN
328 /* Allocate two extra cachelines to make sure that buffer used
329 * by DMA does not share cacheline with any other data (to
330 * avoid cache bug)
1da177e4
LT
331 */
332 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
92b1f905
DR
333 if (!RxDescList[i].skb)
334 return -ENOMEM;
1da177e4
LT
335 RxDescList[i].descr.ctrl = 0;
336 RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
337 RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]);
338 RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
339 RxDescList[i].descr.status = 0;
340 RxDescList[i].descr.hw_len = 0;
341 prepare_rx_descriptor(&RxDescList[i].descr);
342 }
343
344 RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol;
345 RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]);
346 rx_queue_len = 0;
347
348 /* Initialize transmit descriptors */
349 for (i = 0; i < NBR_OF_TX_DESC; i++) {
350 TxDescList[i].descr.ctrl = 0;
351 TxDescList[i].descr.sw_len = 0;
352 TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr);
353 TxDescList[i].descr.buf = 0;
354 TxDescList[i].descr.status = 0;
355 TxDescList[i].descr.hw_len = 0;
356 TxDescList[i].skb = 0;
357 }
358
359 TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol;
360 TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr);
361
362 /* Initialise initial pointers */
363
364 myNextRxDesc = &RxDescList[0];
365 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
1da177e4
LT
366 myFirstTxDesc = &TxDescList[0];
367 myNextTxDesc = &TxDescList[0];
368 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
369
370 /* Register device */
371 err = register_netdev(dev);
372 if (err) {
373 free_netdev(dev);
374 return err;
375 }
376
377 /* set the default MAC address */
378
379 e100_set_mac_address(dev, &default_mac);
380
381 /* Initialize speed indicator stuff. */
382
383 current_speed = 10;
384 current_speed_selection = 0; /* Auto */
385 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
bafef0ae 386 speed_timer.data = (unsigned long)dev;
1da177e4
LT
387 speed_timer.function = e100_check_speed;
388
389 clear_led_timer.function = e100_clear_network_leds;
bafef0ae 390 clear_led_timer.data = (unsigned long)dev;
1da177e4
LT
391
392 full_duplex = 0;
393 current_duplex = autoneg;
394 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
395 duplex_timer.data = (unsigned long)dev;
396 duplex_timer.function = e100_check_duplex;
397
398 /* Initialize mii interface */
1da177e4
LT
399 np->mii_if.phy_id_mask = 0x1f;
400 np->mii_if.reg_num_mask = 0x1f;
401 np->mii_if.dev = dev;
402 np->mii_if.mdio_read = e100_get_mdio_reg;
403 np->mii_if.mdio_write = e100_set_mdio_reg;
404
405 /* Initialize group address registers to make sure that no */
406 /* unwanted addresses are matched */
407 *R_NETWORK_GA_0 = 0x00000000;
408 *R_NETWORK_GA_1 = 0x00000000;
bafef0ae
JN
409
410 /* Initialize next time the led can flash */
411 led_next_time = jiffies;
1da177e4
LT
412 return 0;
413}
414
415/* set MAC address of the interface. called from the core after a
416 * SIOCSIFADDR ioctl, and from the bootup above.
417 */
418
419static int
420e100_set_mac_address(struct net_device *dev, void *p)
421{
bafef0ae 422 struct net_local *np = netdev_priv(dev);
1da177e4 423 struct sockaddr *addr = p;
1da177e4
LT
424
425 spin_lock(&np->lock); /* preemption protection */
426
427 /* remember it */
428
429 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
430
431 /* Write it to the hardware.
432 * Note the way the address is wrapped:
433 * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
434 * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8);
435 */
436
437 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
438 (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
439 *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
440 *R_NETWORK_SA_2 = 0;
441
442 /* show it in the log as well */
443
e174961c 444 printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr);
1da177e4
LT
445
446 spin_unlock(&np->lock);
447
448 return 0;
449}
450
451/*
452 * Open/initialize the board. This is called (in the current kernel)
453 * sometime after booting when the 'ifconfig' program is run.
454 *
455 * This routine should set everything up anew at each open, even
456 * registers that "should" only need to be set once at boot, so that
457 * there is non-reboot way to recover if something goes wrong.
458 */
459
460static int
461e100_open(struct net_device *dev)
462{
463 unsigned long flags;
464
465 /* enable the MDIO output pin */
466
467 *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable);
468
469 *R_IRQ_MASK0_CLR =
470 IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
471 IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
472 IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
473
474 /* clear dma0 and 1 eop and descr irq masks */
475 *R_IRQ_MASK2_CLR =
476 IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
477 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
478 IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
479 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
480
481 /* Reset and wait for the DMA channels */
482
483 RESET_DMA(NETWORK_TX_DMA_NBR);
484 RESET_DMA(NETWORK_RX_DMA_NBR);
485 WAIT_DMA(NETWORK_TX_DMA_NBR);
486 WAIT_DMA(NETWORK_RX_DMA_NBR);
487
488 /* Initialise the etrax network controller */
489
490 /* allocate the irq corresponding to the receiving DMA */
491
ab392d2d
JMC
492 if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname,
493 (void *)dev)) {
1da177e4
LT
494 goto grace_exit0;
495 }
496
497 /* allocate the irq corresponding to the transmitting DMA */
498
499 if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
500 cardname, (void *)dev)) {
501 goto grace_exit1;
502 }
503
504 /* allocate the irq corresponding to the network errors etc */
505
506 if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
507 cardname, (void *)dev)) {
508 goto grace_exit2;
509 }
510
bafef0ae
JN
511 /*
512 * Always allocate the DMA channels after the IRQ,
513 * and clean up on failure.
514 */
515
516 if (cris_request_dma(NETWORK_TX_DMA_NBR,
517 cardname,
518 DMA_VERBOSE_ON_ERROR,
519 dma_eth)) {
520 goto grace_exit3;
521 }
522
523 if (cris_request_dma(NETWORK_RX_DMA_NBR,
524 cardname,
525 DMA_VERBOSE_ON_ERROR,
526 dma_eth)) {
527 goto grace_exit4;
528 }
529
1da177e4
LT
530 /* give the HW an idea of what MAC address we want */
531
532 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
533 (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
534 *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
535 *R_NETWORK_SA_2 = 0;
536
537#if 0
538 /* use promiscuous mode for testing */
539 *R_NETWORK_GA_0 = 0xffffffff;
540 *R_NETWORK_GA_1 = 0xffffffff;
541
542 *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
543#else
bafef0ae 544 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
1da177e4
LT
545 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
546 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
547 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
548 *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
549#endif
550
551 *R_NETWORK_GEN_CONFIG =
552 IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) |
553 IO_STATE(R_NETWORK_GEN_CONFIG, enable, on);
554
555 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
556 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none);
557 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont);
558 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable);
559 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable);
560 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable);
561 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
562 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
563
bafef0ae 564 local_irq_save(flags);
1da177e4
LT
565
566 /* enable the irq's for ethernet DMA */
567
568 *R_IRQ_MASK2_SET =
569 IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
570 IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
571
572 *R_IRQ_MASK0_SET =
573 IO_STATE(R_IRQ_MASK0_SET, overrun, set) |
574 IO_STATE(R_IRQ_MASK0_SET, underrun, set) |
575 IO_STATE(R_IRQ_MASK0_SET, excessive_col, set);
576
577 /* make sure the irqs are cleared */
578
579 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
580 *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
581
582 /* make sure the rec and transmit error counters are cleared */
583
584 (void)*R_REC_COUNTERS; /* dummy read */
585 (void)*R_TR_COUNTERS; /* dummy read */
586
587 /* start the receiving DMA channel so we can receive packets from now on */
588
589 *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc);
590 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start);
591
592 /* Set up transmit DMA channel so it can be restarted later */
593
594 *R_DMA_CH0_FIRST = 0;
595 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
bafef0ae 596 netif_start_queue(dev);
1da177e4 597
bafef0ae 598 local_irq_restore(flags);
1da177e4
LT
599
600 /* Probe for transceiver */
601 if (e100_probe_transceiver(dev))
bafef0ae 602 goto grace_exit5;
1da177e4
LT
603
604 /* Start duplex/speed timers */
605 add_timer(&speed_timer);
606 add_timer(&duplex_timer);
607
608 /* We are now ready to accept transmit requeusts from
609 * the queueing layer of the networking.
610 */
bafef0ae 611 netif_carrier_on(dev);
1da177e4
LT
612
613 return 0;
614
bafef0ae
JN
615grace_exit5:
616 cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
617grace_exit4:
618 cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
1da177e4
LT
619grace_exit3:
620 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
621grace_exit2:
622 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
623grace_exit1:
624 free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
625grace_exit0:
626 return -EAGAIN;
627}
628
bafef0ae
JN
629#if defined(CONFIG_ETRAX_NO_PHY)
630static void
631dummy_check_speed(struct net_device* dev)
632{
633 current_speed = 100;
634}
635#else
1da177e4
LT
636static void
637generic_check_speed(struct net_device* dev)
638{
639 unsigned long data;
bafef0ae
JN
640 struct net_local *np = netdev_priv(dev);
641
642 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
1da177e4
LT
643 if ((data & ADVERTISE_100FULL) ||
644 (data & ADVERTISE_100HALF))
645 current_speed = 100;
646 else
647 current_speed = 10;
648}
649
650static void
651tdk_check_speed(struct net_device* dev)
652{
653 unsigned long data;
bafef0ae
JN
654 struct net_local *np = netdev_priv(dev);
655
656 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
657 MDIO_TDK_DIAGNOSTIC_REG);
1da177e4
LT
658 current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
659}
660
661static void
662broadcom_check_speed(struct net_device* dev)
663{
664 unsigned long data;
bafef0ae
JN
665 struct net_local *np = netdev_priv(dev);
666
667 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
668 MDIO_AUX_CTRL_STATUS_REG);
1da177e4
LT
669 current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
670}
671
672static void
673intel_check_speed(struct net_device* dev)
674{
675 unsigned long data;
bafef0ae
JN
676 struct net_local *np = netdev_priv(dev);
677
678 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
679 MDIO_INT_STATUS_REG_2);
1da177e4
LT
680 current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
681}
bafef0ae 682#endif
1da177e4
LT
683static void
684e100_check_speed(unsigned long priv)
685{
686 struct net_device* dev = (struct net_device*)priv;
bafef0ae 687 struct net_local *np = netdev_priv(dev);
1da177e4
LT
688 static int led_initiated = 0;
689 unsigned long data;
690 int old_speed = current_speed;
691
bafef0ae
JN
692 spin_lock(&np->transceiver_lock);
693
694 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
1da177e4
LT
695 if (!(data & BMSR_LSTATUS)) {
696 current_speed = 0;
697 } else {
698 transceiver->check_speed(dev);
699 }
700
bafef0ae 701 spin_lock(&np->led_lock);
1da177e4
LT
702 if ((old_speed != current_speed) || !led_initiated) {
703 led_initiated = 1;
704 e100_set_network_leds(NO_NETWORK_ACTIVITY);
bafef0ae
JN
705 if (current_speed)
706 netif_carrier_on(dev);
707 else
708 netif_carrier_off(dev);
1da177e4 709 }
bafef0ae 710 spin_unlock(&np->led_lock);
1da177e4
LT
711
712 /* Reinitialize the timer. */
713 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
714 add_timer(&speed_timer);
bafef0ae
JN
715
716 spin_unlock(&np->transceiver_lock);
1da177e4
LT
717}
718
719static void
720e100_negotiate(struct net_device* dev)
721{
bafef0ae
JN
722 struct net_local *np = netdev_priv(dev);
723 unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
724 MII_ADVERTISE);
1da177e4
LT
725
726 /* Discard old speed and duplex settings */
727 data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
728 ADVERTISE_10HALF | ADVERTISE_10FULL);
729
730 switch (current_speed_selection) {
bafef0ae 731 case 10:
1da177e4
LT
732 if (current_duplex == full)
733 data |= ADVERTISE_10FULL;
734 else if (current_duplex == half)
735 data |= ADVERTISE_10HALF;
736 else
737 data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
738 break;
739
bafef0ae 740 case 100:
1da177e4
LT
741 if (current_duplex == full)
742 data |= ADVERTISE_100FULL;
743 else if (current_duplex == half)
744 data |= ADVERTISE_100HALF;
745 else
746 data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
747 break;
748
bafef0ae 749 case 0: /* Auto */
1da177e4
LT
750 if (current_duplex == full)
751 data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
752 else if (current_duplex == half)
753 data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
754 else
755 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
756 ADVERTISE_100HALF | ADVERTISE_100FULL;
757 break;
758
bafef0ae 759 default: /* assume autoneg speed and duplex */
1da177e4
LT
760 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
761 ADVERTISE_100HALF | ADVERTISE_100FULL;
bafef0ae 762 break;
1da177e4
LT
763 }
764
bafef0ae 765 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
1da177e4 766
e6cd1974 767 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
bafef0ae 768 if (autoneg_normal) {
e6cd1974
JN
769 /* Renegotiate with link partner */
770 data |= BMCR_ANENABLE | BMCR_ANRESTART;
771 } else {
772 /* Don't negotiate speed or duplex */
773 data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
774
775 /* Set speed and duplex static */
776 if (current_speed_selection == 10)
777 data &= ~BMCR_SPEED100;
778 else
779 data |= BMCR_SPEED100;
780
781 if (current_duplex != full)
782 data &= ~BMCR_FULLDPLX;
783 else
784 data |= BMCR_FULLDPLX;
bafef0ae
JN
785 }
786 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
1da177e4
LT
787}
788
789static void
790e100_set_speed(struct net_device* dev, unsigned long speed)
791{
bafef0ae
JN
792 struct net_local *np = netdev_priv(dev);
793
794 spin_lock(&np->transceiver_lock);
1da177e4
LT
795 if (speed != current_speed_selection) {
796 current_speed_selection = speed;
797 e100_negotiate(dev);
798 }
bafef0ae 799 spin_unlock(&np->transceiver_lock);
1da177e4
LT
800}
801
802static void
803e100_check_duplex(unsigned long priv)
804{
805 struct net_device *dev = (struct net_device *)priv;
bafef0ae
JN
806 struct net_local *np = netdev_priv(dev);
807 int old_duplex;
808
809 spin_lock(&np->transceiver_lock);
810 old_duplex = full_duplex;
1da177e4
LT
811 transceiver->check_duplex(dev);
812 if (old_duplex != full_duplex) {
813 /* Duplex changed */
814 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
815 *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
816 }
817
818 /* Reinitialize the timer. */
819 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
820 add_timer(&duplex_timer);
821 np->mii_if.full_duplex = full_duplex;
bafef0ae 822 spin_unlock(&np->transceiver_lock);
1da177e4 823}
bafef0ae
JN
824#if defined(CONFIG_ETRAX_NO_PHY)
825static void
826dummy_check_duplex(struct net_device* dev)
827{
828 full_duplex = 1;
829}
830#else
1da177e4
LT
831static void
832generic_check_duplex(struct net_device* dev)
833{
834 unsigned long data;
bafef0ae
JN
835 struct net_local *np = netdev_priv(dev);
836
837 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
1da177e4
LT
838 if ((data & ADVERTISE_10FULL) ||
839 (data & ADVERTISE_100FULL))
840 full_duplex = 1;
841 else
842 full_duplex = 0;
843}
844
845static void
846tdk_check_duplex(struct net_device* dev)
847{
848 unsigned long data;
bafef0ae
JN
849 struct net_local *np = netdev_priv(dev);
850
851 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
852 MDIO_TDK_DIAGNOSTIC_REG);
1da177e4
LT
853 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
854}
855
856static void
857broadcom_check_duplex(struct net_device* dev)
858{
859 unsigned long data;
bafef0ae
JN
860 struct net_local *np = netdev_priv(dev);
861
862 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
863 MDIO_AUX_CTRL_STATUS_REG);
1da177e4
LT
864 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
865}
866
867static void
868intel_check_duplex(struct net_device* dev)
869{
870 unsigned long data;
bafef0ae
JN
871 struct net_local *np = netdev_priv(dev);
872
873 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
874 MDIO_INT_STATUS_REG_2);
1da177e4
LT
875 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
876}
bafef0ae 877#endif
1da177e4
LT
878static void
879e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
880{
bafef0ae
JN
881 struct net_local *np = netdev_priv(dev);
882
883 spin_lock(&np->transceiver_lock);
1da177e4
LT
884 if (new_duplex != current_duplex) {
885 current_duplex = new_duplex;
886 e100_negotiate(dev);
887 }
bafef0ae 888 spin_unlock(&np->transceiver_lock);
1da177e4
LT
889}
890
891static int
892e100_probe_transceiver(struct net_device* dev)
893{
633edf5a
AM
894 int ret = 0;
895
bafef0ae 896#if !defined(CONFIG_ETRAX_NO_PHY)
1da177e4
LT
897 unsigned int phyid_high;
898 unsigned int phyid_low;
899 unsigned int oui;
900 struct transceiver_ops* ops = NULL;
bafef0ae
JN
901 struct net_local *np = netdev_priv(dev);
902
903 spin_lock(&np->transceiver_lock);
1da177e4
LT
904
905 /* Probe MDIO physical address */
bafef0ae
JN
906 for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
907 np->mii_if.phy_id++) {
908 if (e100_get_mdio_reg(dev,
909 np->mii_if.phy_id, MII_BMSR) != 0xffff)
1da177e4
LT
910 break;
911 }
633edf5a
AM
912 if (np->mii_if.phy_id == 32) {
913 ret = -ENODEV;
914 goto out;
915 }
1da177e4
LT
916
917 /* Get manufacturer */
bafef0ae
JN
918 phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
919 phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
1da177e4
LT
920 oui = (phyid_high << 6) | (phyid_low >> 10);
921
922 for (ops = &transceivers[0]; ops->oui; ops++) {
923 if (ops->oui == oui)
924 break;
925 }
926 transceiver = ops;
633edf5a 927out:
bafef0ae
JN
928 spin_unlock(&np->transceiver_lock);
929#endif
633edf5a 930 return ret;
1da177e4
LT
931}
932
933static int
934e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
935{
936 unsigned short cmd; /* Data to be sent on MDIO port */
937 int data; /* Data read from MDIO */
938 int bitCounter;
939
940 /* Start of frame, OP Code, Physical Address, Register Address */
941 cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) |
942 (location << 2);
943
944 e100_send_mdio_cmd(cmd, 0);
945
946 data = 0;
947
948 /* Data... */
949 for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
950 data |= (e100_receive_mdio_bit() << bitCounter);
951 }
952
953 return data;
954}
955
956static void
957e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value)
958{
959 int bitCounter;
960 unsigned short cmd;
961
962 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) |
963 (location << 2);
964
965 e100_send_mdio_cmd(cmd, 1);
966
967 /* Data... */
968 for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
969 e100_send_mdio_bit(GET_BIT(bitCounter, value));
970 }
971
972}
973
974static void
975e100_send_mdio_cmd(unsigned short cmd, int write_cmd)
976{
977 int bitCounter;
978 unsigned char data = 0x2;
979
980 /* Preamble */
981 for (bitCounter = 31; bitCounter>= 0; bitCounter--)
982 e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE));
983
984 for (bitCounter = 15; bitCounter >= 2; bitCounter--)
985 e100_send_mdio_bit(GET_BIT(bitCounter, cmd));
986
987 /* Turnaround */
988 for (bitCounter = 1; bitCounter >= 0 ; bitCounter--)
989 if (write_cmd)
990 e100_send_mdio_bit(GET_BIT(bitCounter, data));
991 else
992 e100_receive_mdio_bit();
993}
994
995static void
996e100_send_mdio_bit(unsigned char bit)
997{
998 *R_NETWORK_MGM_CTRL =
999 IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
1000 IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
1001 udelay(1);
1002 *R_NETWORK_MGM_CTRL =
1003 IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
1004 IO_MASK(R_NETWORK_MGM_CTRL, mdck) |
1005 IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
1006 udelay(1);
1007}
1008
1009static unsigned char
a55b138b 1010e100_receive_mdio_bit(void)
1da177e4
LT
1011{
1012 unsigned char bit;
1013 *R_NETWORK_MGM_CTRL = 0;
1014 bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
1015 udelay(1);
1016 *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck);
1017 udelay(1);
1018 return bit;
1019}
1020
1021static void
1022e100_reset_transceiver(struct net_device* dev)
1023{
bafef0ae 1024 struct net_local *np = netdev_priv(dev);
1da177e4
LT
1025 unsigned short cmd;
1026 unsigned short data;
1027 int bitCounter;
1028
bafef0ae 1029 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
1da177e4 1030
bafef0ae 1031 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
1da177e4
LT
1032
1033 e100_send_mdio_cmd(cmd, 1);
1034
1035 data |= 0x8000;
1036
1037 for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) {
1038 e100_send_mdio_bit(GET_BIT(bitCounter, data));
1039 }
1040}
1041
1042/* Called by upper layers if they decide it took too long to complete
1043 * sending a packet - we need to reset and stuff.
1044 */
1045
1046static void
1047e100_tx_timeout(struct net_device *dev)
1048{
bafef0ae 1049 struct net_local *np = netdev_priv(dev);
1da177e4
LT
1050 unsigned long flags;
1051
1052 spin_lock_irqsave(&np->lock, flags);
1053
1054 printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
1055 tx_done(dev) ? "IRQ problem" : "network cable problem");
1056
1057 /* remember we got an error */
1058
40fe7d88 1059 dev->stats.tx_errors++;
1da177e4
LT
1060
1061 /* reset the TX DMA in case it has hung on something */
1062
1063 RESET_DMA(NETWORK_TX_DMA_NBR);
1064 WAIT_DMA(NETWORK_TX_DMA_NBR);
1065
1066 /* Reset the transceiver. */
1067
1068 e100_reset_transceiver(dev);
1069
1070 /* and get rid of the packets that never got an interrupt */
bafef0ae 1071 while (myFirstTxDesc != myNextTxDesc) {
1da177e4
LT
1072 dev_kfree_skb(myFirstTxDesc->skb);
1073 myFirstTxDesc->skb = 0;
1074 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1075 }
1076
1077 /* Set up transmit DMA channel so it can be restarted later */
1078 *R_DMA_CH0_FIRST = 0;
1079 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
1080
1081 /* tell the upper layers we're ok again */
1082
1083 netif_wake_queue(dev);
1084 spin_unlock_irqrestore(&np->lock, flags);
1085}
1086
1087
1088/* This will only be invoked if the driver is _not_ in XOFF state.
1089 * What this means is that we need not check it, and that this
1090 * invariant will hold if we make sure that the netif_*_queue()
1091 * calls are done at the proper times.
1092 */
1093
1094static int
1095e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1096{
bafef0ae 1097 struct net_local *np = netdev_priv(dev);
1da177e4
LT
1098 unsigned char *buf = skb->data;
1099 unsigned long flags;
1100
1101#ifdef ETHDEBUG
1102 printk("send packet len %d\n", length);
1103#endif
1104 spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */
1105
1106 myNextTxDesc->skb = skb;
1107
860e9538 1108 netif_trans_update(dev); /* NETIF_F_LLTX driver :( */
1da177e4 1109
bafef0ae 1110 e100_hardware_send_packet(np, buf, skb->len);
1da177e4
LT
1111
1112 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
1113
1114 /* Stop queue if full */
1115 if (myNextTxDesc == myFirstTxDesc) {
1116 netif_stop_queue(dev);
1117 }
1118
1119 spin_unlock_irqrestore(&np->lock, flags);
1120
6ed10654 1121 return NETDEV_TX_OK;
1da177e4
LT
1122}
1123
1124/*
1125 * The typical workload of the driver:
1126 * Handle the network interface interrupts.
1127 */
1128
1129static irqreturn_t
7d12e780 1130e100rxtx_interrupt(int irq, void *dev_id)
1da177e4
LT
1131{
1132 struct net_device *dev = (struct net_device *)dev_id;
bafef0ae 1133 unsigned long irqbits;
1da177e4 1134
bafef0ae
JN
1135 /*
1136 * Note that both rx and tx interrupts are blocked at this point,
1137 * regardless of which got us here.
1138 */
1139
1140 irqbits = *R_IRQ_MASK2_RD;
1da177e4
LT
1141
1142 /* Handle received packets */
1143 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
1144 /* acknowledge the eop interrupt */
1145
1146 *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
1147
1148 /* check if one or more complete packets were indeed received */
1149
1150 while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) &&
1151 (myNextRxDesc != myLastRxDesc)) {
1152 /* Take out the buffer and give it to the OS, then
1153 * allocate a new buffer to put a packet in.
1154 */
1155 e100_rx(dev);
40fe7d88 1156 dev->stats.rx_packets++;
1da177e4
LT
1157 /* restart/continue on the channel, for safety */
1158 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1159 /* clear dma channel 1 eop/descr irq bits */
1160 *R_DMA_CH1_CLR_INTR =
1161 IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) |
1162 IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do);
1163
1164 /* now, we might have gotten another packet
1165 so we have to loop back and check if so */
1166 }
1167 }
1168
1169 /* Report any packets that have been sent */
bafef0ae
JN
1170 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1171 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
40fe7d88
TK
1172 dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1173 dev->stats.tx_packets++;
1da177e4
LT
1174
1175 /* dma is ready with the transmission of the data in tx_skb, so now
1176 we can release the skb memory */
1177 dev_kfree_skb_irq(myFirstTxDesc->skb);
1178 myFirstTxDesc->skb = 0;
1179 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
bafef0ae
JN
1180 /* Wake up queue. */
1181 netif_wake_queue(dev);
1da177e4
LT
1182 }
1183
1184 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
bafef0ae 1185 /* acknowledge the eop interrupt. */
1da177e4 1186 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
1da177e4
LT
1187 }
1188
1da177e4
LT
1189 return IRQ_HANDLED;
1190}
1191
1192static irqreturn_t
7d12e780 1193e100nw_interrupt(int irq, void *dev_id)
1da177e4
LT
1194{
1195 struct net_device *dev = (struct net_device *)dev_id;
1da177e4
LT
1196 unsigned long irqbits = *R_IRQ_MASK0_RD;
1197
1198 /* check for underrun irq */
1199 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) {
1200 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1201 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1202 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
40fe7d88 1203 dev->stats.tx_errors++;
1da177e4
LT
1204 D(printk("ethernet receiver underrun!\n"));
1205 }
1206
1207 /* check for overrun irq */
1208 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
40fe7d88 1209 update_rx_stats(&dev->stats); /* this will ack the irq */
1da177e4
LT
1210 D(printk("ethernet receiver overrun!\n"));
1211 }
1212 /* check for excessive collision irq */
1213 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) {
1214 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1215 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1216 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
40fe7d88 1217 dev->stats.tx_errors++;
1da177e4
LT
1218 D(printk("ethernet excessive collisions!\n"));
1219 }
1220 return IRQ_HANDLED;
1221}
1222
1223/* We have a good packet(s), get it/them out of the buffers. */
1224static void
1225e100_rx(struct net_device *dev)
1226{
1227 struct sk_buff *skb;
1228 int length = 0;
bafef0ae 1229 struct net_local *np = netdev_priv(dev);
1da177e4
LT
1230 unsigned char *skb_data_ptr;
1231#ifdef ETHDEBUG
1232 int i;
1233#endif
bafef0ae
JN
1234 etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
1235 spin_lock(&np->led_lock);
1da177e4
LT
1236 if (!led_active && time_after(jiffies, led_next_time)) {
1237 /* light the network leds depending on the current speed. */
1238 e100_set_network_leds(NETWORK_ACTIVITY);
1239
1240 /* Set the earliest time we may clear the LED */
1241 led_next_time = jiffies + NET_FLASH_TIME;
1242 led_active = 1;
1243 mod_timer(&clear_led_timer, jiffies + HZ/10);
1244 }
bafef0ae 1245 spin_unlock(&np->led_lock);
1da177e4
LT
1246
1247 length = myNextRxDesc->descr.hw_len - 4;
40fe7d88 1248 dev->stats.rx_bytes += length;
1da177e4
LT
1249
1250#ifdef ETHDEBUG
1251 printk("Got a packet of length %d:\n", length);
1252 /* dump the first bytes in the packet */
1253 skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf);
1254 for (i = 0; i < 8; i++) {
1255 printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8,
1256 skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3],
1257 skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]);
1258 skb_data_ptr += 8;
1259 }
1260#endif
1261
1262 if (length < RX_COPYBREAK) {
1263 /* Small packet, copy data */
1264 skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1265 if (!skb) {
40fe7d88 1266 dev->stats.rx_errors++;
1da177e4 1267 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
bafef0ae 1268 goto update_nextrxdesc;
1da177e4
LT
1269 }
1270
1271 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
1272 skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */
1273
1274#ifdef ETHDEBUG
1275 printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
4305b541
ACM
1276 skb->head, skb->data, skb_tail_pointer(skb),
1277 skb_end_pointer(skb));
1da177e4
LT
1278 printk("copying packet to 0x%x.\n", skb_data_ptr);
1279#endif
1280
1281 memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length);
1282 }
1283 else {
1284 /* Large packet, send directly to upper layers and allocate new
1285 * memory (aligned to cache line boundary to avoid bug).
bafef0ae
JN
1286 * Before sending the skb to upper layers we must make sure
1287 * that skb->data points to the aligned start of the packet.
1da177e4
LT
1288 */
1289 int align;
1290 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1291 if (!new_skb) {
40fe7d88 1292 dev->stats.rx_errors++;
1da177e4 1293 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
bafef0ae 1294 goto update_nextrxdesc;
1da177e4
LT
1295 }
1296 skb = myNextRxDesc->skb;
1297 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
1298 skb_put(skb, length + align);
1299 skb_pull(skb, align); /* Remove alignment bytes */
1300 myNextRxDesc->skb = new_skb;
1301 myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
1302 }
1303
1da177e4
LT
1304 skb->protocol = eth_type_trans(skb, dev);
1305
1306 /* Send the packet to the upper layers */
1307 netif_rx(skb);
1308
bafef0ae 1309 update_nextrxdesc:
1da177e4
LT
1310 /* Prepare for next packet */
1311 myNextRxDesc->descr.status = 0;
bafef0ae 1312 prevRxDesc = myNextRxDesc;
1da177e4
LT
1313 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
1314
1315 rx_queue_len++;
1316
1317 /* Check if descriptors should be returned */
1318 if (rx_queue_len == RX_QUEUE_THRESHOLD) {
1319 flush_etrax_cache();
bafef0ae 1320 prevRxDesc->descr.ctrl |= d_eol;
1da177e4 1321 myLastRxDesc->descr.ctrl &= ~d_eol;
bafef0ae 1322 myLastRxDesc = prevRxDesc;
1da177e4
LT
1323 rx_queue_len = 0;
1324 }
1325}
1326
1327/* The inverse routine to net_open(). */
1328static int
1329e100_close(struct net_device *dev)
1330{
1da177e4
LT
1331 printk(KERN_INFO "Closing %s.\n", dev->name);
1332
1333 netif_stop_queue(dev);
1334
1335 *R_IRQ_MASK0_CLR =
1336 IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
1337 IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
1338 IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
1339
1340 *R_IRQ_MASK2_CLR =
1341 IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
1342 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
1343 IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
1344 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
1345
1346 /* Stop the receiver and the transmitter */
1347
1348 RESET_DMA(NETWORK_TX_DMA_NBR);
1349 RESET_DMA(NETWORK_RX_DMA_NBR);
1350
1351 /* Flush the Tx and disable Rx here. */
1352
1353 free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
1354 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
1355 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
1356
bafef0ae
JN
1357 cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
1358 cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
1359
1da177e4
LT
1360 /* Update the statistics here. */
1361
40fe7d88
TK
1362 update_rx_stats(&dev->stats);
1363 update_tx_stats(&dev->stats);
1da177e4
LT
1364
1365 /* Stop speed/duplex timers */
1366 del_timer(&speed_timer);
1367 del_timer(&duplex_timer);
1368
1369 return 0;
1370}
1371
1372static int
1373e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1374{
1375 struct mii_ioctl_data *data = if_mii(ifr);
1376 struct net_local *np = netdev_priv(dev);
bafef0ae
JN
1377 int rc = 0;
1378 int old_autoneg;
1da177e4
LT
1379
1380 spin_lock(&np->lock); /* Preempt protection */
1381 switch (cmd) {
1da177e4 1382 /* The ioctls below should be considered obsolete but are */
25985edc 1383 /* still present for compatibility with old scripts/apps */
1da177e4
LT
1384 case SET_ETH_SPEED_10: /* 10 Mbps */
1385 e100_set_speed(dev, 10);
1386 break;
1387 case SET_ETH_SPEED_100: /* 100 Mbps */
1388 e100_set_speed(dev, 100);
1389 break;
bafef0ae 1390 case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
1da177e4
LT
1391 e100_set_speed(dev, 0);
1392 break;
bafef0ae 1393 case SET_ETH_DUPLEX_HALF: /* Half duplex */
1da177e4
LT
1394 e100_set_duplex(dev, half);
1395 break;
bafef0ae 1396 case SET_ETH_DUPLEX_FULL: /* Full duplex */
1da177e4
LT
1397 e100_set_duplex(dev, full);
1398 break;
bafef0ae 1399 case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
1da177e4
LT
1400 e100_set_duplex(dev, autoneg);
1401 break;
bafef0ae
JN
1402 case SET_ETH_AUTONEG:
1403 old_autoneg = autoneg_normal;
1404 autoneg_normal = *(int*)data;
1405 if (autoneg_normal != old_autoneg)
1406 e100_negotiate(dev);
1407 break;
1da177e4 1408 default:
bafef0ae
JN
1409 rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
1410 cmd, NULL);
1411 break;
1da177e4
LT
1412 }
1413 spin_unlock(&np->lock);
bafef0ae 1414 return rc;
1da177e4
LT
1415}
1416
bafef0ae
JN
1417static int e100_get_settings(struct net_device *dev,
1418 struct ethtool_cmd *cmd)
1da177e4 1419{
bafef0ae
JN
1420 struct net_local *np = netdev_priv(dev);
1421 int err;
76f2b4d9 1422
bafef0ae
JN
1423 spin_lock_irq(&np->lock);
1424 err = mii_ethtool_gset(&np->mii_if, cmd);
1425 spin_unlock_irq(&np->lock);
76f2b4d9 1426
bafef0ae
JN
1427 /* The PHY may support 1000baseT, but the Etrax100 does not. */
1428 cmd->supported &= ~(SUPPORTED_1000baseT_Half
1429 | SUPPORTED_1000baseT_Full);
1430 return err;
76f2b4d9
CH
1431}
1432
1433static int e100_set_settings(struct net_device *dev,
1434 struct ethtool_cmd *ecmd)
1435{
1436 if (ecmd->autoneg == AUTONEG_ENABLE) {
1437 e100_set_duplex(dev, autoneg);
1438 e100_set_speed(dev, 0);
1439 } else {
1440 e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
1441 e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
1da177e4 1442 }
76f2b4d9
CH
1443
1444 return 0;
1445}
1446
1447static void e100_get_drvinfo(struct net_device *dev,
1448 struct ethtool_drvinfo *info)
1449{
7826d43f
JP
1450 strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver));
1451 strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version));
1452 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1453 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
76f2b4d9
CH
1454}
1455
1456static int e100_nway_reset(struct net_device *dev)
1457{
1458 if (current_duplex == autoneg && current_speed_selection == 0)
1459 e100_negotiate(dev);
1da177e4
LT
1460 return 0;
1461}
1462
7282d491 1463static const struct ethtool_ops e100_ethtool_ops = {
76f2b4d9
CH
1464 .get_settings = e100_get_settings,
1465 .set_settings = e100_set_settings,
1466 .get_drvinfo = e100_get_drvinfo,
1467 .nway_reset = e100_nway_reset,
1468 .get_link = ethtool_op_get_link,
1469};
1470
1da177e4
LT
1471static int
1472e100_set_config(struct net_device *dev, struct ifmap *map)
1473{
bafef0ae
JN
1474 struct net_local *np = netdev_priv(dev);
1475
1da177e4
LT
1476 spin_lock(&np->lock); /* Preempt protection */
1477
1478 switch(map->port) {
1479 case IF_PORT_UNKNOWN:
1480 /* Use autoneg */
1481 e100_set_speed(dev, 0);
1482 e100_set_duplex(dev, autoneg);
1483 break;
1484 case IF_PORT_10BASET:
1485 e100_set_speed(dev, 10);
1486 e100_set_duplex(dev, autoneg);
1487 break;
1488 case IF_PORT_100BASET:
1489 case IF_PORT_100BASETX:
1490 e100_set_speed(dev, 100);
1491 e100_set_duplex(dev, autoneg);
1492 break;
1493 case IF_PORT_100BASEFX:
1494 case IF_PORT_10BASE2:
1495 case IF_PORT_AUI:
1496 spin_unlock(&np->lock);
1497 return -EOPNOTSUPP;
1da177e4
LT
1498 default:
1499 printk(KERN_ERR "%s: Invalid media selected", dev->name);
1500 spin_unlock(&np->lock);
1501 return -EINVAL;
1502 }
1503 spin_unlock(&np->lock);
1504 return 0;
1505}
1506
1507static void
1508update_rx_stats(struct net_device_stats *es)
1509{
1510 unsigned long r = *R_REC_COUNTERS;
1511 /* update stats relevant to reception errors */
1512 es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
1513 es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
1514 es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r);
1515 es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r);
1516}
1517
1518static void
1519update_tx_stats(struct net_device_stats *es)
1520{
1521 unsigned long r = *R_TR_COUNTERS;
1522 /* update stats relevant to transmission errors */
1523 es->collisions +=
1524 IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
1525 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
1da177e4
LT
1526}
1527
1528/*
1529 * Get the current statistics.
1530 * This may be called with the card open or closed.
1531 */
1532static struct net_device_stats *
1533e100_get_stats(struct net_device *dev)
1534{
bafef0ae 1535 struct net_local *lp = netdev_priv(dev);
1da177e4 1536 unsigned long flags;
bafef0ae 1537
1da177e4
LT
1538 spin_lock_irqsave(&lp->lock, flags);
1539
40fe7d88
TK
1540 update_rx_stats(&dev->stats);
1541 update_tx_stats(&dev->stats);
1da177e4
LT
1542
1543 spin_unlock_irqrestore(&lp->lock, flags);
40fe7d88 1544 return &dev->stats;
1da177e4
LT
1545}
1546
1547/*
1548 * Set or clear the multicast filter for this adaptor.
1549 * num_addrs == -1 Promiscuous mode, receive all packets
1550 * num_addrs == 0 Normal mode, clear multicast list
1551 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1552 * and do best-effort filtering.
1553 */
1554static void
1555set_multicast_list(struct net_device *dev)
1556{
bafef0ae 1557 struct net_local *lp = netdev_priv(dev);
4cd24eaf 1558 int num_addr = netdev_mc_count(dev);
1da177e4
LT
1559 unsigned long int lo_bits;
1560 unsigned long int hi_bits;
bafef0ae 1561
1da177e4 1562 spin_lock(&lp->lock);
bafef0ae 1563 if (dev->flags & IFF_PROMISC) {
1da177e4
LT
1564 /* promiscuous mode */
1565 lo_bits = 0xfffffffful;
1566 hi_bits = 0xfffffffful;
1567
1568 /* Enable individual receive */
1569 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive);
1570 *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1571 } else if (dev->flags & IFF_ALLMULTI) {
1572 /* enable all multicasts */
1573 lo_bits = 0xfffffffful;
1574 hi_bits = 0xfffffffful;
1575
1576 /* Disable individual receive */
1577 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1578 *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1579 } else if (num_addr == 0) {
1580 /* Normal, clear the mc list */
1581 lo_bits = 0x00000000ul;
1582 hi_bits = 0x00000000ul;
1583
1584 /* Disable individual receive */
1585 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1586 *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1587 } else {
1588 /* MC mode, receive normal and MC packets */
1589 char hash_ix;
22bedad3 1590 struct netdev_hw_addr *ha;
1da177e4 1591 char *baddr;
bafef0ae 1592
1da177e4
LT
1593 lo_bits = 0x00000000ul;
1594 hi_bits = 0x00000000ul;
22bedad3 1595 netdev_for_each_mc_addr(ha, dev) {
1da177e4
LT
1596 /* Calculate the hash index for the GA registers */
1597
1598 hash_ix = 0;
22bedad3 1599 baddr = ha->addr;
1da177e4
LT
1600 hash_ix ^= (*baddr) & 0x3f;
1601 hash_ix ^= ((*baddr) >> 6) & 0x03;
1602 ++baddr;
1603 hash_ix ^= ((*baddr) << 2) & 0x03c;
1604 hash_ix ^= ((*baddr) >> 4) & 0xf;
1605 ++baddr;
1606 hash_ix ^= ((*baddr) << 4) & 0x30;
1607 hash_ix ^= ((*baddr) >> 2) & 0x3f;
1608 ++baddr;
1609 hash_ix ^= (*baddr) & 0x3f;
1610 hash_ix ^= ((*baddr) >> 6) & 0x03;
1611 ++baddr;
1612 hash_ix ^= ((*baddr) << 2) & 0x03c;
1613 hash_ix ^= ((*baddr) >> 4) & 0xf;
1614 ++baddr;
1615 hash_ix ^= ((*baddr) << 4) & 0x30;
1616 hash_ix ^= ((*baddr) >> 2) & 0x3f;
1617
1618 hash_ix &= 0x3f;
1619
1620 if (hash_ix >= 32) {
1621 hi_bits |= (1 << (hash_ix-32));
bafef0ae 1622 } else {
1da177e4
LT
1623 lo_bits |= (1 << hash_ix);
1624 }
1da177e4
LT
1625 }
1626 /* Disable individual receive */
1627 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1628 *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1629 }
1630 *R_NETWORK_GA_0 = lo_bits;
1631 *R_NETWORK_GA_1 = hi_bits;
1632 spin_unlock(&lp->lock);
1633}
1634
1635void
bafef0ae 1636e100_hardware_send_packet(struct net_local *np, char *buf, int length)
1da177e4
LT
1637{
1638 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
1639
bafef0ae 1640 spin_lock(&np->led_lock);
1da177e4
LT
1641 if (!led_active && time_after(jiffies, led_next_time)) {
1642 /* light the network leds depending on the current speed. */
1643 e100_set_network_leds(NETWORK_ACTIVITY);
1644
1645 /* Set the earliest time we may clear the LED */
1646 led_next_time = jiffies + NET_FLASH_TIME;
1647 led_active = 1;
1648 mod_timer(&clear_led_timer, jiffies + HZ/10);
1649 }
bafef0ae 1650 spin_unlock(&np->led_lock);
1da177e4
LT
1651
1652 /* configure the tx dma descriptor */
1653 myNextTxDesc->descr.sw_len = length;
1654 myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
1655 myNextTxDesc->descr.buf = virt_to_phys(buf);
1656
1657 /* Move end of list */
1658 myLastTxDesc->descr.ctrl &= ~d_eol;
1659 myLastTxDesc = myNextTxDesc;
1660
1661 /* Restart DMA channel */
1662 *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
1663}
1664
1665static void
1666e100_clear_network_leds(unsigned long dummy)
1667{
bafef0ae
JN
1668 struct net_device *dev = (struct net_device *)dummy;
1669 struct net_local *np = netdev_priv(dev);
1670
1671 spin_lock(&np->led_lock);
1672
1da177e4
LT
1673 if (led_active && time_after(jiffies, led_next_time)) {
1674 e100_set_network_leds(NO_NETWORK_ACTIVITY);
1675
1676 /* Set the earliest time we may set the LED */
1677 led_next_time = jiffies + NET_FLASH_PAUSE;
1678 led_active = 0;
1679 }
bafef0ae
JN
1680
1681 spin_unlock(&np->led_lock);
1da177e4
LT
1682}
1683
1684static void
1685e100_set_network_leds(int active)
1686{
1687#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
1688 int light_leds = (active == NO_NETWORK_ACTIVITY);
1689#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
1690 int light_leds = (active == NETWORK_ACTIVITY);
1691#else
1692#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
1693#endif
1694
1695 if (!current_speed) {
1696 /* Make LED red, link is down */
5efa1d1c 1697 CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
bafef0ae 1698 } else if (light_leds) {
1da177e4 1699 if (current_speed == 10) {
5efa1d1c 1700 CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE);
1da177e4 1701 } else {
5efa1d1c 1702 CRIS_LED_NETWORK_SET(CRIS_LED_GREEN);
1da177e4 1703 }
bafef0ae 1704 } else {
5efa1d1c 1705 CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
1da177e4
LT
1706 }
1707}
1708
bafef0ae
JN
1709#ifdef CONFIG_NET_POLL_CONTROLLER
1710static void
1711e100_netpoll(struct net_device* netdev)
1712{
ff6e1225 1713 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
bafef0ae
JN
1714}
1715#endif
1716
1da177e4
LT
1717static int
1718etrax_init_module(void)
1719{
1720 return etrax_ethernet_init();
1721}
1722
1723static int __init
1724e100_boot_setup(char* str)
1725{
1726 struct sockaddr sa = {0};
1727 int i;
1728
1729 /* Parse the colon separated Ethernet station address */
1730 for (i = 0; i < ETH_ALEN; i++) {
1731 unsigned int tmp;
1732 if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
1733 printk(KERN_WARNING "Malformed station address");
1734 return 0;
1735 }
1736 sa.sa_data[i] = (char)tmp;
1737 }
1738
1739 default_mac = sa;
1740 return 1;
1741}
1742
1743__setup("etrax100_eth=", e100_boot_setup);
1744
1745module_init(etrax_init_module);