]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/smsc/smc91x.c
smc911x: Call dev_kfree_skb_any instead of dev_kfree_skb.
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / smsc / smc91x.c
CommitLineData
1da177e4
LT
1/*
2 * smc91x.c
3 * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices.
4 *
5 * Copyright (C) 1996 by Erik Stahlman
6 * Copyright (C) 2001 Standard Microsystems Corporation
7 * Developed by Simple Network Magic Corporation
8 * Copyright (C) 2003 Monta Vista Software, Inc.
9 * Unified SMC91x driver by Nicolas Pitre
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
0ab75ae8 22 * along with this program; if not, see <http://www.gnu.org/licenses/>.
1da177e4
LT
23 *
24 * Arguments:
25 * io = for the base address
26 * irq = for the IRQ
27 * nowait = 0 for normal wait states, 1 eliminates additional wait states
28 *
29 * original author:
30 * Erik Stahlman <erik@vt.edu>
31 *
32 * hardware multicast code:
33 * Peter Cammaert <pc@denkart.be>
34 *
35 * contributors:
36 * Daris A Nevil <dnevil@snmc.com>
2f82af08 37 * Nicolas Pitre <nico@fluxnic.net>
1da177e4
LT
38 * Russell King <rmk@arm.linux.org.uk>
39 *
40 * History:
41 * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet
42 * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ"
43 * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111
44 * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111
45 * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111
46 * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support
47 * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races,
48 * more bus abstraction, big cleanup, etc.
49 * 29/09/03 Russell King - add driver model support
50 * - ethtool support
51 * - convert to use generic MII interface
52 * - add link up/down notification
53 * - don't try to handle full negotiation in
54 * smc_phy_configure
55 * - clean up (and fix stack overrun) in PHY
56 * MII read/write functions
57 * 22/09/04 Nicolas Pitre big update (see commit log for details)
58 */
59static const char version[] =
6389aa45 60 "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>";
1da177e4
LT
61
62/* Debugging level */
63#ifndef SMC_DEBUG
64#define SMC_DEBUG 0
65#endif
66
67
1da177e4
LT
68#include <linux/module.h>
69#include <linux/kernel.h>
70#include <linux/sched.h>
1da177e4
LT
71#include <linux/delay.h>
72#include <linux/interrupt.h>
476c32c4 73#include <linux/irq.h>
1da177e4
LT
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/crc32.h>
d052d1be 77#include <linux/platform_device.h>
1da177e4
LT
78#include <linux/spinlock.h>
79#include <linux/ethtool.h>
80#include <linux/mii.h>
81#include <linux/workqueue.h>
682a1694 82#include <linux/of.h>
3f823c15 83#include <linux/of_device.h>
1da177e4
LT
84
85#include <linux/netdevice.h>
86#include <linux/etherdevice.h>
87#include <linux/skbuff.h>
88
89#include <asm/io.h>
1da177e4
LT
90
91#include "smc91x.h"
92
1da177e4
LT
93#ifndef SMC_NOWAIT
94# define SMC_NOWAIT 0
95#endif
96static int nowait = SMC_NOWAIT;
97module_param(nowait, int, 0400);
98MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
99
100/*
101 * Transmit timeout, default 5 seconds.
102 */
ea937560 103static int watchdog = 1000;
1da177e4
LT
104module_param(watchdog, int, 0400);
105MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
106
107MODULE_LICENSE("GPL");
72abb461 108MODULE_ALIAS("platform:smc91x");
1da177e4
LT
109
110/*
111 * The internal workings of the driver. If you are changing anything
112 * here with the SMC stuff, you should have the datasheet and know
113 * what you are doing.
114 */
115#define CARDNAME "smc91x"
116
117/*
118 * Use power-down feature of the chip
119 */
120#define POWER_DOWN 1
121
122/*
123 * Wait time for memory to be free. This probably shouldn't be
124 * tuned that much, as waiting for this means nothing else happens
125 * in the system
126 */
127#define MEMORY_WAIT_TIME 16
128
5d0571d9
NP
129/*
130 * The maximum number of processing loops allowed for each call to the
6aa20a22 131 * IRQ handler.
5d0571d9
NP
132 */
133#define MAX_IRQ_LOOPS 8
134
1da177e4
LT
135/*
136 * This selects whether TX packets are sent one by one to the SMC91x internal
137 * memory and throttled until transmission completes. This may prevent
138 * RX overruns a litle by keeping much of the memory free for RX packets
139 * but to the expense of reduced TX throughput and increased IRQ overhead.
140 * Note this is not a cure for a too slow data bus or too high IRQ latency.
141 */
142#define THROTTLE_TX_PKTS 0
143
144/*
145 * The MII clock high/low times. 2x this number gives the MII clock period
146 * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!)
147 */
148#define MII_DELAY 1
149
1da177e4 150#if SMC_DEBUG > 0
6389aa45 151#define DBG(n, dev, args...) \
1da177e4
LT
152 do { \
153 if (SMC_DEBUG >= (n)) \
6389aa45 154 netdev_dbg(dev, args); \
1da177e4
LT
155 } while (0)
156
6389aa45 157#define PRINTK(dev, args...) netdev_info(dev, args)
1da177e4 158#else
6389aa45
BB
159#define DBG(n, dev, args...) do { } while (0)
160#define PRINTK(dev, args...) netdev_dbg(dev, args)
1da177e4
LT
161#endif
162
163#if SMC_DEBUG > 3
164static void PRINT_PKT(u_char *buf, int length)
165{
166 int i;
167 int remainder;
168 int lines;
169
170 lines = length / 16;
171 remainder = length % 16;
172
173 for (i = 0; i < lines ; i ++) {
174 int cur;
6389aa45 175 printk(KERN_DEBUG);
1da177e4
LT
176 for (cur = 0; cur < 8; cur++) {
177 u_char a, b;
178 a = *buf++;
179 b = *buf++;
6389aa45 180 pr_cont("%02x%02x ", a, b);
1da177e4 181 }
6389aa45 182 pr_cont("\n");
1da177e4 183 }
6389aa45 184 printk(KERN_DEBUG);
1da177e4
LT
185 for (i = 0; i < remainder/2 ; i++) {
186 u_char a, b;
187 a = *buf++;
188 b = *buf++;
6389aa45 189 pr_cont("%02x%02x ", a, b);
1da177e4 190 }
6389aa45 191 pr_cont("\n");
1da177e4
LT
192}
193#else
6389aa45 194#define PRINT_PKT(x...) do { } while (0)
1da177e4
LT
195#endif
196
197
198/* this enables an interrupt in the interrupt mask register */
cfdfa865 199#define SMC_ENABLE_INT(lp, x) do { \
1da177e4 200 unsigned char mask; \
8ff499e4
DD
201 unsigned long smc_enable_flags; \
202 spin_lock_irqsave(&lp->lock, smc_enable_flags); \
cfdfa865 203 mask = SMC_GET_INT_MASK(lp); \
1da177e4 204 mask |= (x); \
cfdfa865 205 SMC_SET_INT_MASK(lp, mask); \
8ff499e4 206 spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
1da177e4
LT
207} while (0)
208
209/* this disables an interrupt from the interrupt mask register */
cfdfa865 210#define SMC_DISABLE_INT(lp, x) do { \
1da177e4 211 unsigned char mask; \
8ff499e4
DD
212 unsigned long smc_disable_flags; \
213 spin_lock_irqsave(&lp->lock, smc_disable_flags); \
cfdfa865 214 mask = SMC_GET_INT_MASK(lp); \
1da177e4 215 mask &= ~(x); \
cfdfa865 216 SMC_SET_INT_MASK(lp, mask); \
8ff499e4 217 spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
1da177e4
LT
218} while (0)
219
220/*
221 * Wait while MMU is busy. This is usually in the order of a few nanosecs
222 * if at all, but let's avoid deadlocking the system if the hardware
223 * decides to go south.
224 */
cfdfa865
MD
225#define SMC_WAIT_MMU_BUSY(lp) do { \
226 if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \
1da177e4 227 unsigned long timeout = jiffies + 2; \
cfdfa865 228 while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
1da177e4 229 if (time_after(jiffies, timeout)) { \
6389aa45
BB
230 netdev_dbg(dev, "timeout %s line %d\n", \
231 __FILE__, __LINE__); \
1da177e4
LT
232 break; \
233 } \
234 cpu_relax(); \
235 } \
236 } \
237} while (0)
238
239
240/*
241 * this does a soft reset on the device
242 */
243static void smc_reset(struct net_device *dev)
244{
245 struct smc_local *lp = netdev_priv(dev);
246 void __iomem *ioaddr = lp->base;
247 unsigned int ctl, cfg;
be83668a 248 struct sk_buff *pending_skb;
1da177e4 249
6389aa45 250 DBG(2, dev, "%s\n", __func__);
1da177e4 251
be83668a 252 /* Disable all interrupts, block TX tasklet */
76cb4fe7 253 spin_lock_irq(&lp->lock);
cfdfa865
MD
254 SMC_SELECT_BANK(lp, 2);
255 SMC_SET_INT_MASK(lp, 0);
be83668a
NP
256 pending_skb = lp->pending_tx_skb;
257 lp->pending_tx_skb = NULL;
76cb4fe7 258 spin_unlock_irq(&lp->lock);
1da177e4 259
be83668a
NP
260 /* free any pending tx skb */
261 if (pending_skb) {
262 dev_kfree_skb(pending_skb);
09f75cd7
JG
263 dev->stats.tx_errors++;
264 dev->stats.tx_aborted_errors++;
be83668a
NP
265 }
266
1da177e4
LT
267 /*
268 * This resets the registers mostly to defaults, but doesn't
269 * affect EEPROM. That seems unnecessary
270 */
cfdfa865
MD
271 SMC_SELECT_BANK(lp, 0);
272 SMC_SET_RCR(lp, RCR_SOFTRST);
1da177e4
LT
273
274 /*
275 * Setup the Configuration Register
276 * This is necessary because the CONFIG_REG is not affected
277 * by a soft reset
278 */
cfdfa865 279 SMC_SELECT_BANK(lp, 1);
1da177e4
LT
280
281 cfg = CONFIG_DEFAULT;
282
283 /*
284 * Setup for fast accesses if requested. If the card/system
285 * can't handle it then there will be no recovery except for
286 * a hard reset or power cycle
287 */
c4f0e767 288 if (lp->cfg.flags & SMC91X_NOWAIT)
1da177e4
LT
289 cfg |= CONFIG_NO_WAIT;
290
291 /*
292 * Release from possible power-down state
293 * Configuration register is not affected by Soft Reset
294 */
295 cfg |= CONFIG_EPH_POWER_EN;
296
cfdfa865 297 SMC_SET_CONFIG(lp, cfg);
1da177e4
LT
298
299 /* this should pause enough for the chip to be happy */
300 /*
301 * elaborate? What does the chip _need_? --jgarzik
302 *
303 * This seems to be undocumented, but something the original
304 * driver(s) have always done. Suspect undocumented timing
305 * info/determined empirically. --rmk
306 */
307 udelay(1);
308
309 /* Disable transmit and receive functionality */
cfdfa865
MD
310 SMC_SELECT_BANK(lp, 0);
311 SMC_SET_RCR(lp, RCR_CLEAR);
312 SMC_SET_TCR(lp, TCR_CLEAR);
1da177e4 313
cfdfa865
MD
314 SMC_SELECT_BANK(lp, 1);
315 ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE;
1da177e4
LT
316
317 /*
318 * Set the control register to automatically release successfully
319 * transmitted packets, to make the best use out of our limited
320 * memory
321 */
322 if(!THROTTLE_TX_PKTS)
323 ctl |= CTL_AUTO_RELEASE;
324 else
325 ctl &= ~CTL_AUTO_RELEASE;
cfdfa865 326 SMC_SET_CTL(lp, ctl);
1da177e4
LT
327
328 /* Reset the MMU */
cfdfa865
MD
329 SMC_SELECT_BANK(lp, 2);
330 SMC_SET_MMU_CMD(lp, MC_RESET);
331 SMC_WAIT_MMU_BUSY(lp);
1da177e4
LT
332}
333
334/*
335 * Enable Interrupts, Receive, and Transmit
336 */
337static void smc_enable(struct net_device *dev)
338{
339 struct smc_local *lp = netdev_priv(dev);
340 void __iomem *ioaddr = lp->base;
341 int mask;
342
6389aa45 343 DBG(2, dev, "%s\n", __func__);
1da177e4
LT
344
345 /* see the header file for options in TCR/RCR DEFAULT */
cfdfa865
MD
346 SMC_SELECT_BANK(lp, 0);
347 SMC_SET_TCR(lp, lp->tcr_cur_mode);
348 SMC_SET_RCR(lp, lp->rcr_cur_mode);
1da177e4 349
cfdfa865
MD
350 SMC_SELECT_BANK(lp, 1);
351 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
1da177e4
LT
352
353 /* now, enable interrupts */
354 mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT;
355 if (lp->version >= (CHIP_91100 << 4))
356 mask |= IM_MDINT;
cfdfa865
MD
357 SMC_SELECT_BANK(lp, 2);
358 SMC_SET_INT_MASK(lp, mask);
1da177e4
LT
359
360 /*
361 * From this point the register bank must _NOT_ be switched away
362 * to something else than bank 2 without proper locking against
363 * races with any tasklet or interrupt handlers until smc_shutdown()
364 * or smc_reset() is called.
365 */
366}
367
368/*
369 * this puts the device in an inactive state
370 */
371static void smc_shutdown(struct net_device *dev)
372{
373 struct smc_local *lp = netdev_priv(dev);
374 void __iomem *ioaddr = lp->base;
be83668a 375 struct sk_buff *pending_skb;
1da177e4 376
6389aa45 377 DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
1da177e4
LT
378
379 /* no more interrupts for me */
76cb4fe7 380 spin_lock_irq(&lp->lock);
cfdfa865
MD
381 SMC_SELECT_BANK(lp, 2);
382 SMC_SET_INT_MASK(lp, 0);
be83668a
NP
383 pending_skb = lp->pending_tx_skb;
384 lp->pending_tx_skb = NULL;
76cb4fe7 385 spin_unlock_irq(&lp->lock);
be83668a
NP
386 if (pending_skb)
387 dev_kfree_skb(pending_skb);
1da177e4
LT
388
389 /* and tell the card to stay away from that nasty outside world */
cfdfa865
MD
390 SMC_SELECT_BANK(lp, 0);
391 SMC_SET_RCR(lp, RCR_CLEAR);
392 SMC_SET_TCR(lp, TCR_CLEAR);
1da177e4
LT
393
394#ifdef POWER_DOWN
395 /* finally, shut the chip down */
cfdfa865
MD
396 SMC_SELECT_BANK(lp, 1);
397 SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN);
1da177e4
LT
398#endif
399}
400
401/*
402 * This is the procedure to handle the receipt of a packet.
403 */
404static inline void smc_rcv(struct net_device *dev)
405{
406 struct smc_local *lp = netdev_priv(dev);
407 void __iomem *ioaddr = lp->base;
408 unsigned int packet_number, status, packet_len;
409
6389aa45 410 DBG(3, dev, "%s\n", __func__);
1da177e4 411
cfdfa865 412 packet_number = SMC_GET_RXFIFO(lp);
1da177e4 413 if (unlikely(packet_number & RXFIFO_REMPTY)) {
6389aa45 414 PRINTK(dev, "smc_rcv with nothing on FIFO.\n");
1da177e4
LT
415 return;
416 }
417
418 /* read from start of packet */
cfdfa865 419 SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC);
1da177e4
LT
420
421 /* First two words are status and packet length */
cfdfa865 422 SMC_GET_PKT_HDR(lp, status, packet_len);
1da177e4 423 packet_len &= 0x07ff; /* mask off top bits */
6389aa45
BB
424 DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
425 packet_number, status, packet_len, packet_len);
1da177e4
LT
426
427 back:
428 if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
429 if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) {
430 /* accept VLAN packets */
431 status &= ~RS_TOOLONG;
432 goto back;
433 }
434 if (packet_len < 6) {
435 /* bloody hardware */
6389aa45
BB
436 netdev_err(dev, "fubar (rxlen %u status %x\n",
437 packet_len, status);
1da177e4
LT
438 status |= RS_TOOSHORT;
439 }
cfdfa865
MD
440 SMC_WAIT_MMU_BUSY(lp);
441 SMC_SET_MMU_CMD(lp, MC_RELEASE);
09f75cd7 442 dev->stats.rx_errors++;
1da177e4 443 if (status & RS_ALGNERR)
09f75cd7 444 dev->stats.rx_frame_errors++;
1da177e4 445 if (status & (RS_TOOSHORT | RS_TOOLONG))
09f75cd7 446 dev->stats.rx_length_errors++;
1da177e4 447 if (status & RS_BADCRC)
09f75cd7 448 dev->stats.rx_crc_errors++;
1da177e4
LT
449 } else {
450 struct sk_buff *skb;
451 unsigned char *data;
452 unsigned int data_len;
453
454 /* set multicast stats */
455 if (status & RS_MULTICAST)
09f75cd7 456 dev->stats.multicast++;
1da177e4
LT
457
458 /*
459 * Actual payload is packet_len - 6 (or 5 if odd byte).
460 * We want skb_reserve(2) and the final ctrl word
461 * (2 bytes, possibly containing the payload odd byte).
462 * Furthermore, we add 2 bytes to allow rounding up to
463 * multiple of 4 bytes on 32 bit buses.
464 * Hence packet_len - 6 + 2 + 2 + 2.
465 */
dae2e9f4 466 skb = netdev_alloc_skb(dev, packet_len);
1da177e4 467 if (unlikely(skb == NULL)) {
cfdfa865
MD
468 SMC_WAIT_MMU_BUSY(lp);
469 SMC_SET_MMU_CMD(lp, MC_RELEASE);
09f75cd7 470 dev->stats.rx_dropped++;
1da177e4
LT
471 return;
472 }
473
474 /* Align IP header to 32 bits */
475 skb_reserve(skb, 2);
476
477 /* BUG: the LAN91C111 rev A never sets this bit. Force it. */
478 if (lp->version == 0x90)
479 status |= RS_ODDFRAME;
480
481 /*
482 * If odd length: packet_len - 5,
483 * otherwise packet_len - 6.
484 * With the trailing ctrl byte it's packet_len - 4.
485 */
486 data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
487 data = skb_put(skb, data_len);
cfdfa865 488 SMC_PULL_DATA(lp, data, packet_len - 4);
1da177e4 489
cfdfa865
MD
490 SMC_WAIT_MMU_BUSY(lp);
491 SMC_SET_MMU_CMD(lp, MC_RELEASE);
1da177e4
LT
492
493 PRINT_PKT(data, packet_len - 4);
494
1da177e4
LT
495 skb->protocol = eth_type_trans(skb, dev);
496 netif_rx(skb);
09f75cd7
JG
497 dev->stats.rx_packets++;
498 dev->stats.rx_bytes += data_len;
1da177e4
LT
499 }
500}
501
502#ifdef CONFIG_SMP
503/*
504 * On SMP we have the following problem:
505 *
506 * A = smc_hardware_send_pkt()
507 * B = smc_hard_start_xmit()
508 * C = smc_interrupt()
509 *
510 * A and B can never be executed simultaneously. However, at least on UP,
511 * it is possible (and even desirable) for C to interrupt execution of
512 * A or B in order to have better RX reliability and avoid overruns.
513 * C, just like A and B, must have exclusive access to the chip and
514 * each of them must lock against any other concurrent access.
515 * Unfortunately this is not possible to have C suspend execution of A or
516 * B taking place on another CPU. On UP this is no an issue since A and B
517 * are run from softirq context and C from hard IRQ context, and there is
518 * no other CPU where concurrent access can happen.
519 * If ever there is a way to force at least B and C to always be executed
520 * on the same CPU then we could use read/write locks to protect against
521 * any other concurrent access and C would always interrupt B. But life
522 * isn't that easy in a SMP world...
523 */
8ff499e4 524#define smc_special_trylock(lock, flags) \
1da177e4
LT
525({ \
526 int __ret; \
8ff499e4 527 local_irq_save(flags); \
1da177e4
LT
528 __ret = spin_trylock(lock); \
529 if (!__ret) \
8ff499e4 530 local_irq_restore(flags); \
1da177e4
LT
531 __ret; \
532})
0b4f2928 533#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
8ff499e4 534#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
1da177e4 535#else
fd0775bf
MF
536#define smc_special_trylock(lock, flags) (flags == flags)
537#define smc_special_lock(lock, flags) do { flags = 0; } while (0)
538#define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
1da177e4
LT
539#endif
540
541/*
542 * This is called to actually send a packet to the chip.
543 */
544static void smc_hardware_send_pkt(unsigned long data)
545{
546 struct net_device *dev = (struct net_device *)data;
547 struct smc_local *lp = netdev_priv(dev);
548 void __iomem *ioaddr = lp->base;
549 struct sk_buff *skb;
550 unsigned int packet_no, len;
551 unsigned char *buf;
8ff499e4 552 unsigned long flags;
1da177e4 553
6389aa45 554 DBG(3, dev, "%s\n", __func__);
1da177e4 555
8ff499e4 556 if (!smc_special_trylock(&lp->lock, flags)) {
1da177e4
LT
557 netif_stop_queue(dev);
558 tasklet_schedule(&lp->tx_task);
559 return;
560 }
561
562 skb = lp->pending_tx_skb;
be83668a 563 if (unlikely(!skb)) {
8ff499e4 564 smc_special_unlock(&lp->lock, flags);
be83668a
NP
565 return;
566 }
1da177e4 567 lp->pending_tx_skb = NULL;
be83668a 568
cfdfa865 569 packet_no = SMC_GET_AR(lp);
1da177e4 570 if (unlikely(packet_no & AR_FAILED)) {
6389aa45 571 netdev_err(dev, "Memory allocation failed.\n");
09f75cd7
JG
572 dev->stats.tx_errors++;
573 dev->stats.tx_fifo_errors++;
8ff499e4 574 smc_special_unlock(&lp->lock, flags);
1da177e4
LT
575 goto done;
576 }
577
578 /* point to the beginning of the packet */
cfdfa865
MD
579 SMC_SET_PN(lp, packet_no);
580 SMC_SET_PTR(lp, PTR_AUTOINC);
1da177e4
LT
581
582 buf = skb->data;
583 len = skb->len;
6389aa45
BB
584 DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
585 packet_no, len, len, buf);
1da177e4
LT
586 PRINT_PKT(buf, len);
587
588 /*
589 * Send the packet length (+6 for status words, length, and ctl.
590 * The card will pad to 64 bytes with zeroes if packet is too small.
591 */
cfdfa865 592 SMC_PUT_PKT_HDR(lp, 0, len + 6);
1da177e4
LT
593
594 /* send the actual data */
cfdfa865 595 SMC_PUSH_DATA(lp, buf, len & ~1);
1da177e4
LT
596
597 /* Send final ctl word with the last byte if there is one */
cfdfa865 598 SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp));
1da177e4
LT
599
600 /*
ea937560
NP
601 * If THROTTLE_TX_PKTS is set, we stop the queue here. This will
602 * have the effect of having at most one packet queued for TX
603 * in the chip's memory at all time.
604 *
605 * If THROTTLE_TX_PKTS is not set then the queue is stopped only
606 * when memory allocation (MC_ALLOC) does not succeed right away.
1da177e4 607 */
ea937560 608 if (THROTTLE_TX_PKTS)
1da177e4
LT
609 netif_stop_queue(dev);
610
611 /* queue the packet for TX */
cfdfa865 612 SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
8ff499e4 613 smc_special_unlock(&lp->lock, flags);
1da177e4
LT
614
615 dev->trans_start = jiffies;
09f75cd7
JG
616 dev->stats.tx_packets++;
617 dev->stats.tx_bytes += len;
1da177e4 618
cfdfa865 619 SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT);
1da177e4
LT
620
621done: if (!THROTTLE_TX_PKTS)
622 netif_wake_queue(dev);
623
624 dev_kfree_skb(skb);
625}
626
627/*
628 * Since I am not sure if I will have enough room in the chip's ram
629 * to store the packet, I call this routine which either sends it
630 * now, or set the card to generates an interrupt when ready
631 * for the packet.
632 */
633static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
634{
635 struct smc_local *lp = netdev_priv(dev);
636 void __iomem *ioaddr = lp->base;
637 unsigned int numPages, poll_count, status;
8ff499e4 638 unsigned long flags;
1da177e4 639
6389aa45 640 DBG(3, dev, "%s\n", __func__);
1da177e4
LT
641
642 BUG_ON(lp->pending_tx_skb != NULL);
1da177e4
LT
643
644 /*
645 * The MMU wants the number of pages to be the number of 256 bytes
646 * 'pages', minus 1 (since a packet can't ever have 0 pages :))
647 *
648 * The 91C111 ignores the size bits, but earlier models don't.
649 *
650 * Pkt size for allocating is data length +6 (for additional status
651 * words, length and ctl)
652 *
653 * If odd size then last byte is included in ctl word.
654 */
655 numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
656 if (unlikely(numPages > 7)) {
6389aa45 657 netdev_warn(dev, "Far too big packet error.\n");
09f75cd7
JG
658 dev->stats.tx_errors++;
659 dev->stats.tx_dropped++;
1da177e4 660 dev_kfree_skb(skb);
6ed10654 661 return NETDEV_TX_OK;
1da177e4
LT
662 }
663
8ff499e4 664 smc_special_lock(&lp->lock, flags);
1da177e4
LT
665
666 /* now, try to allocate the memory */
cfdfa865 667 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
1da177e4
LT
668
669 /*
670 * Poll the chip for a short amount of time in case the
671 * allocation succeeds quickly.
672 */
673 poll_count = MEMORY_WAIT_TIME;
674 do {
cfdfa865 675 status = SMC_GET_INT(lp);
1da177e4 676 if (status & IM_ALLOC_INT) {
cfdfa865 677 SMC_ACK_INT(lp, IM_ALLOC_INT);
1da177e4
LT
678 break;
679 }
680 } while (--poll_count);
681
8ff499e4 682 smc_special_unlock(&lp->lock, flags);
1da177e4 683
be83668a 684 lp->pending_tx_skb = skb;
1da177e4
LT
685 if (!poll_count) {
686 /* oh well, wait until the chip finds memory later */
687 netif_stop_queue(dev);
6389aa45 688 DBG(2, dev, "TX memory allocation deferred.\n");
cfdfa865 689 SMC_ENABLE_INT(lp, IM_ALLOC_INT);
1da177e4
LT
690 } else {
691 /*
692 * Allocation succeeded: push packet to the chip's own memory
693 * immediately.
6aa20a22 694 */
1da177e4
LT
695 smc_hardware_send_pkt((unsigned long)dev);
696 }
697
6ed10654 698 return NETDEV_TX_OK;
1da177e4
LT
699}
700
701/*
702 * This handles a TX interrupt, which is only called when:
703 * - a TX error occurred, or
704 * - CTL_AUTO_RELEASE is not set and TX of a packet completed.
705 */
706static void smc_tx(struct net_device *dev)
707{
708 struct smc_local *lp = netdev_priv(dev);
709 void __iomem *ioaddr = lp->base;
710 unsigned int saved_packet, packet_no, tx_status, pkt_len;
711
6389aa45 712 DBG(3, dev, "%s\n", __func__);
1da177e4
LT
713
714 /* If the TX FIFO is empty then nothing to do */
cfdfa865 715 packet_no = SMC_GET_TXFIFO(lp);
1da177e4 716 if (unlikely(packet_no & TXFIFO_TEMPTY)) {
6389aa45 717 PRINTK(dev, "smc_tx with nothing on FIFO.\n");
1da177e4
LT
718 return;
719 }
720
721 /* select packet to read from */
cfdfa865
MD
722 saved_packet = SMC_GET_PN(lp);
723 SMC_SET_PN(lp, packet_no);
1da177e4
LT
724
725 /* read the first word (status word) from this packet */
cfdfa865
MD
726 SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
727 SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
6389aa45
BB
728 DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n",
729 tx_status, packet_no);
1da177e4 730
8de90115 731 if (!(tx_status & ES_TX_SUC))
09f75cd7 732 dev->stats.tx_errors++;
8de90115
NP
733
734 if (tx_status & ES_LOSTCARR)
09f75cd7 735 dev->stats.tx_carrier_errors++;
1da177e4 736
8de90115 737 if (tx_status & (ES_LATCOL | ES_16COL)) {
6389aa45 738 PRINTK(dev, "%s occurred on last xmit\n",
8de90115
NP
739 (tx_status & ES_LATCOL) ?
740 "late collision" : "too many collisions");
09f75cd7
JG
741 dev->stats.tx_window_errors++;
742 if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
6389aa45 743 netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n");
1da177e4
LT
744 }
745 }
746
747 /* kill the packet */
cfdfa865
MD
748 SMC_WAIT_MMU_BUSY(lp);
749 SMC_SET_MMU_CMD(lp, MC_FREEPKT);
1da177e4
LT
750
751 /* Don't restore Packet Number Reg until busy bit is cleared */
cfdfa865
MD
752 SMC_WAIT_MMU_BUSY(lp);
753 SMC_SET_PN(lp, saved_packet);
1da177e4
LT
754
755 /* re-enable transmit */
cfdfa865
MD
756 SMC_SELECT_BANK(lp, 0);
757 SMC_SET_TCR(lp, lp->tcr_cur_mode);
758 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
759}
760
761
762/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
763
764static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
765{
766 struct smc_local *lp = netdev_priv(dev);
767 void __iomem *ioaddr = lp->base;
768 unsigned int mii_reg, mask;
769
cfdfa865 770 mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
1da177e4
LT
771 mii_reg |= MII_MDOE;
772
773 for (mask = 1 << (bits - 1); mask; mask >>= 1) {
774 if (val & mask)
775 mii_reg |= MII_MDO;
776 else
777 mii_reg &= ~MII_MDO;
778
cfdfa865 779 SMC_SET_MII(lp, mii_reg);
1da177e4 780 udelay(MII_DELAY);
cfdfa865 781 SMC_SET_MII(lp, mii_reg | MII_MCLK);
1da177e4
LT
782 udelay(MII_DELAY);
783 }
784}
785
786static unsigned int smc_mii_in(struct net_device *dev, int bits)
787{
788 struct smc_local *lp = netdev_priv(dev);
789 void __iomem *ioaddr = lp->base;
790 unsigned int mii_reg, mask, val;
791
cfdfa865
MD
792 mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
793 SMC_SET_MII(lp, mii_reg);
1da177e4
LT
794
795 for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) {
cfdfa865 796 if (SMC_GET_MII(lp) & MII_MDI)
1da177e4
LT
797 val |= mask;
798
cfdfa865 799 SMC_SET_MII(lp, mii_reg);
1da177e4 800 udelay(MII_DELAY);
cfdfa865 801 SMC_SET_MII(lp, mii_reg | MII_MCLK);
1da177e4
LT
802 udelay(MII_DELAY);
803 }
804
805 return val;
806}
807
808/*
809 * Reads a register from the MII Management serial interface
810 */
811static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
812{
813 struct smc_local *lp = netdev_priv(dev);
814 void __iomem *ioaddr = lp->base;
815 unsigned int phydata;
816
cfdfa865 817 SMC_SELECT_BANK(lp, 3);
1da177e4
LT
818
819 /* Idle - 32 ones */
820 smc_mii_out(dev, 0xffffffff, 32);
821
822 /* Start code (01) + read (10) + phyaddr + phyreg */
823 smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14);
824
825 /* Turnaround (2bits) + phydata */
826 phydata = smc_mii_in(dev, 18);
827
828 /* Return to idle state */
cfdfa865 829 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
1da177e4 830
6389aa45
BB
831 DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
832 __func__, phyaddr, phyreg, phydata);
1da177e4 833
cfdfa865 834 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
835 return phydata;
836}
837
838/*
839 * Writes a register to the MII Management serial interface
840 */
841static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
842 int phydata)
843{
844 struct smc_local *lp = netdev_priv(dev);
845 void __iomem *ioaddr = lp->base;
846
cfdfa865 847 SMC_SELECT_BANK(lp, 3);
1da177e4
LT
848
849 /* Idle - 32 ones */
850 smc_mii_out(dev, 0xffffffff, 32);
851
852 /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */
853 smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32);
854
855 /* Return to idle state */
cfdfa865 856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
1da177e4 857
6389aa45
BB
858 DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
859 __func__, phyaddr, phyreg, phydata);
1da177e4 860
cfdfa865 861 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
862}
863
864/*
865 * Finds and reports the PHY address
866 */
867static void smc_phy_detect(struct net_device *dev)
868{
869 struct smc_local *lp = netdev_priv(dev);
870 int phyaddr;
871
6389aa45 872 DBG(2, dev, "%s\n", __func__);
1da177e4
LT
873
874 lp->phy_type = 0;
875
876 /*
877 * Scan all 32 PHY addresses if necessary, starting at
878 * PHY#1 to PHY#31, and then PHY#0 last.
879 */
880 for (phyaddr = 1; phyaddr < 33; ++phyaddr) {
881 unsigned int id1, id2;
882
883 /* Read the PHY identifiers */
884 id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
885 id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
886
6389aa45
BB
887 DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n",
888 id1, id2);
1da177e4
LT
889
890 /* Make sure it is a valid identifier */
891 if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
892 id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) {
893 /* Save the PHY's address */
894 lp->mii.phy_id = phyaddr & 31;
895 lp->phy_type = id1 << 16 | id2;
896 break;
897 }
898 }
899}
900
901/*
902 * Sets the PHY to a configuration as determined by the user
903 */
904static int smc_phy_fixed(struct net_device *dev)
905{
906 struct smc_local *lp = netdev_priv(dev);
907 void __iomem *ioaddr = lp->base;
908 int phyaddr = lp->mii.phy_id;
909 int bmcr, cfg1;
910
6389aa45 911 DBG(3, dev, "%s\n", __func__);
1da177e4
LT
912
913 /* Enter Link Disable state */
914 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
915 cfg1 |= PHY_CFG1_LNKDIS;
916 smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1);
917
918 /*
919 * Set our fixed capabilities
920 * Disable auto-negotiation
921 */
922 bmcr = 0;
923
924 if (lp->ctl_rfduplx)
925 bmcr |= BMCR_FULLDPLX;
926
927 if (lp->ctl_rspeed == 100)
928 bmcr |= BMCR_SPEED100;
929
930 /* Write our capabilities to the phy control register */
931 smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
932
933 /* Re-Configure the Receive/Phy Control register */
cfdfa865
MD
934 SMC_SELECT_BANK(lp, 0);
935 SMC_SET_RPC(lp, lp->rpc_cur_mode);
936 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
937
938 return 1;
939}
940
49ce9c2c 941/**
1da177e4
LT
942 * smc_phy_reset - reset the phy
943 * @dev: net device
944 * @phy: phy address
945 *
946 * Issue a software reset for the specified PHY and
947 * wait up to 100ms for the reset to complete. We should
948 * not access the PHY for 50ms after issuing the reset.
949 *
950 * The time to wait appears to be dependent on the PHY.
951 *
952 * Must be called with lp->lock locked.
953 */
954static int smc_phy_reset(struct net_device *dev, int phy)
955{
956 struct smc_local *lp = netdev_priv(dev);
957 unsigned int bmcr;
958 int timeout;
959
960 smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET);
961
962 for (timeout = 2; timeout; timeout--) {
963 spin_unlock_irq(&lp->lock);
964 msleep(50);
965 spin_lock_irq(&lp->lock);
966
967 bmcr = smc_phy_read(dev, phy, MII_BMCR);
968 if (!(bmcr & BMCR_RESET))
969 break;
970 }
971
972 return bmcr & BMCR_RESET;
973}
974
49ce9c2c 975/**
1da177e4
LT
976 * smc_phy_powerdown - powerdown phy
977 * @dev: net device
978 *
979 * Power down the specified PHY
980 */
981static void smc_phy_powerdown(struct net_device *dev)
982{
983 struct smc_local *lp = netdev_priv(dev);
984 unsigned int bmcr;
985 int phy = lp->mii.phy_id;
986
987 if (lp->phy_type == 0)
988 return;
989
990 /* We need to ensure that no calls to smc_phy_configure are
991 pending.
1da177e4 992 */
4bb073c0 993 cancel_work_sync(&lp->phy_configure);
1da177e4
LT
994
995 bmcr = smc_phy_read(dev, phy, MII_BMCR);
996 smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
997}
998
49ce9c2c 999/**
1da177e4
LT
1000 * smc_phy_check_media - check the media status and adjust TCR
1001 * @dev: net device
1002 * @init: set true for initialisation
1003 *
1004 * Select duplex mode depending on negotiation state. This
1005 * also updates our carrier state.
1006 */
1007static void smc_phy_check_media(struct net_device *dev, int init)
1008{
1009 struct smc_local *lp = netdev_priv(dev);
1010 void __iomem *ioaddr = lp->base;
1011
1012 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
1013 /* duplex state has changed */
1014 if (lp->mii.full_duplex) {
1015 lp->tcr_cur_mode |= TCR_SWFDUP;
1016 } else {
1017 lp->tcr_cur_mode &= ~TCR_SWFDUP;
1018 }
1019
cfdfa865
MD
1020 SMC_SELECT_BANK(lp, 0);
1021 SMC_SET_TCR(lp, lp->tcr_cur_mode);
1da177e4
LT
1022 }
1023}
1024
1025/*
1026 * Configures the specified PHY through the MII management interface
1027 * using Autonegotiation.
1028 * Calls smc_phy_fixed() if the user has requested a certain config.
1029 * If RPC ANEG bit is set, the media selection is dependent purely on
1030 * the selection by the MII (either in the MII BMCR reg or the result
1031 * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
1032 * is controlled by the RPC SPEED and RPC DPLX bits.
1033 */
6d5aefb8 1034static void smc_phy_configure(struct work_struct *work)
1da177e4 1035{
6d5aefb8
DH
1036 struct smc_local *lp =
1037 container_of(work, struct smc_local, phy_configure);
1038 struct net_device *dev = lp->dev;
1da177e4
LT
1039 void __iomem *ioaddr = lp->base;
1040 int phyaddr = lp->mii.phy_id;
1041 int my_phy_caps; /* My PHY capabilities */
1042 int my_ad_caps; /* My Advertised capabilities */
1043 int status;
1044
6389aa45 1045 DBG(3, dev, "smc_program_phy()\n");
1da177e4
LT
1046
1047 spin_lock_irq(&lp->lock);
1048
1049 /*
1050 * We should not be called if phy_type is zero.
1051 */
1052 if (lp->phy_type == 0)
1053 goto smc_phy_configure_exit;
1054
1055 if (smc_phy_reset(dev, phyaddr)) {
6389aa45 1056 netdev_info(dev, "PHY reset timed out\n");
1da177e4
LT
1057 goto smc_phy_configure_exit;
1058 }
1059
1060 /*
1061 * Enable PHY Interrupts (for register 18)
1062 * Interrupts listed here are disabled
1063 */
1064 smc_phy_write(dev, phyaddr, PHY_MASK_REG,
1065 PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD |
1066 PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB |
1067 PHY_INT_SPDDET | PHY_INT_DPLXDET);
1068
1069 /* Configure the Receive/Phy Control register */
cfdfa865
MD
1070 SMC_SELECT_BANK(lp, 0);
1071 SMC_SET_RPC(lp, lp->rpc_cur_mode);
1da177e4
LT
1072
1073 /* If the user requested no auto neg, then go set his request */
1074 if (lp->mii.force_media) {
1075 smc_phy_fixed(dev);
1076 goto smc_phy_configure_exit;
1077 }
1078
1079 /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
1080 my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
1081
1082 if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
6389aa45 1083 netdev_info(dev, "Auto negotiation NOT supported\n");
1da177e4
LT
1084 smc_phy_fixed(dev);
1085 goto smc_phy_configure_exit;
1086 }
1087
1088 my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */
1089
1090 if (my_phy_caps & BMSR_100BASE4)
1091 my_ad_caps |= ADVERTISE_100BASE4;
1092 if (my_phy_caps & BMSR_100FULL)
1093 my_ad_caps |= ADVERTISE_100FULL;
1094 if (my_phy_caps & BMSR_100HALF)
1095 my_ad_caps |= ADVERTISE_100HALF;
1096 if (my_phy_caps & BMSR_10FULL)
1097 my_ad_caps |= ADVERTISE_10FULL;
1098 if (my_phy_caps & BMSR_10HALF)
1099 my_ad_caps |= ADVERTISE_10HALF;
1100
1101 /* Disable capabilities not selected by our user */
1102 if (lp->ctl_rspeed != 100)
1103 my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
1104
1105 if (!lp->ctl_rfduplx)
1106 my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
1107
1108 /* Update our Auto-Neg Advertisement Register */
1109 smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps);
1110 lp->mii.advertising = my_ad_caps;
1111
1112 /*
1113 * Read the register back. Without this, it appears that when
1114 * auto-negotiation is restarted, sometimes it isn't ready and
1115 * the link does not come up.
1116 */
1117 status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
1118
6389aa45
BB
1119 DBG(2, dev, "phy caps=%x\n", my_phy_caps);
1120 DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
1da177e4
LT
1121
1122 /* Restart auto-negotiation process in order to advertise my caps */
1123 smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1124
1125 smc_phy_check_media(dev, 1);
1126
1127smc_phy_configure_exit:
cfdfa865 1128 SMC_SELECT_BANK(lp, 2);
1da177e4 1129 spin_unlock_irq(&lp->lock);
1da177e4
LT
1130}
1131
1132/*
1133 * smc_phy_interrupt
1134 *
1135 * Purpose: Handle interrupts relating to PHY register 18. This is
1136 * called from the "hard" interrupt handler under our private spinlock.
1137 */
1138static void smc_phy_interrupt(struct net_device *dev)
1139{
1140 struct smc_local *lp = netdev_priv(dev);
1141 int phyaddr = lp->mii.phy_id;
1142 int phy18;
1143
6389aa45 1144 DBG(2, dev, "%s\n", __func__);
1da177e4
LT
1145
1146 if (lp->phy_type == 0)
1147 return;
1148
1149 for(;;) {
1150 smc_phy_check_media(dev, 0);
1151
1152 /* Read PHY Register 18, Status Output */
1153 phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG);
1154 if ((phy18 & PHY_INT_INT) == 0)
1155 break;
1156 }
1157}
1158
1159/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
1160
1161static void smc_10bt_check_media(struct net_device *dev, int init)
1162{
1163 struct smc_local *lp = netdev_priv(dev);
1164 void __iomem *ioaddr = lp->base;
1165 unsigned int old_carrier, new_carrier;
1166
1167 old_carrier = netif_carrier_ok(dev) ? 1 : 0;
1168
cfdfa865
MD
1169 SMC_SELECT_BANK(lp, 0);
1170 new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0;
1171 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
1172
1173 if (init || (old_carrier != new_carrier)) {
1174 if (!new_carrier) {
1175 netif_carrier_off(dev);
1176 } else {
1177 netif_carrier_on(dev);
1178 }
1179 if (netif_msg_link(lp))
6389aa45
BB
1180 netdev_info(dev, "link %s\n",
1181 new_carrier ? "up" : "down");
1da177e4
LT
1182 }
1183}
1184
1185static void smc_eph_interrupt(struct net_device *dev)
1186{
1187 struct smc_local *lp = netdev_priv(dev);
1188 void __iomem *ioaddr = lp->base;
1189 unsigned int ctl;
1190
1191 smc_10bt_check_media(dev, 0);
1192
cfdfa865
MD
1193 SMC_SELECT_BANK(lp, 1);
1194 ctl = SMC_GET_CTL(lp);
1195 SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE);
1196 SMC_SET_CTL(lp, ctl);
1197 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
1198}
1199
1200/*
1201 * This is the main routine of the driver, to handle the device when
1202 * it needs some attention.
1203 */
7d12e780 1204static irqreturn_t smc_interrupt(int irq, void *dev_id)
1da177e4
LT
1205{
1206 struct net_device *dev = dev_id;
1207 struct smc_local *lp = netdev_priv(dev);
1208 void __iomem *ioaddr = lp->base;
1209 int status, mask, timeout, card_stats;
1210 int saved_pointer;
1211
6389aa45 1212 DBG(3, dev, "%s\n", __func__);
1da177e4
LT
1213
1214 spin_lock(&lp->lock);
1215
1216 /* A preamble may be used when there is a potential race
1217 * between the interruptible transmit functions and this
1218 * ISR. */
1219 SMC_INTERRUPT_PREAMBLE;
1220
cfdfa865
MD
1221 saved_pointer = SMC_GET_PTR(lp);
1222 mask = SMC_GET_INT_MASK(lp);
1223 SMC_SET_INT_MASK(lp, 0);
1da177e4
LT
1224
1225 /* set a timeout value, so I don't stay here forever */
5d0571d9 1226 timeout = MAX_IRQ_LOOPS;
1da177e4
LT
1227
1228 do {
cfdfa865 1229 status = SMC_GET_INT(lp);
1da177e4 1230
6389aa45
BB
1231 DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
1232 status, mask,
1233 ({ int meminfo; SMC_SELECT_BANK(lp, 0);
1234 meminfo = SMC_GET_MIR(lp);
1235 SMC_SELECT_BANK(lp, 2); meminfo; }),
1236 SMC_GET_FIFO(lp));
1da177e4
LT
1237
1238 status &= mask;
1239 if (!status)
1240 break;
1241
ea937560
NP
1242 if (status & IM_TX_INT) {
1243 /* do this before RX as it will free memory quickly */
6389aa45 1244 DBG(3, dev, "TX int\n");
1da177e4 1245 smc_tx(dev);
cfdfa865 1246 SMC_ACK_INT(lp, IM_TX_INT);
1da177e4
LT
1247 if (THROTTLE_TX_PKTS)
1248 netif_wake_queue(dev);
ea937560 1249 } else if (status & IM_RCV_INT) {
6389aa45 1250 DBG(3, dev, "RX irq\n");
ea937560 1251 smc_rcv(dev);
1da177e4 1252 } else if (status & IM_ALLOC_INT) {
6389aa45 1253 DBG(3, dev, "Allocation irq\n");
1da177e4
LT
1254 tasklet_hi_schedule(&lp->tx_task);
1255 mask &= ~IM_ALLOC_INT;
1256 } else if (status & IM_TX_EMPTY_INT) {
6389aa45 1257 DBG(3, dev, "TX empty\n");
1da177e4
LT
1258 mask &= ~IM_TX_EMPTY_INT;
1259
1260 /* update stats */
cfdfa865
MD
1261 SMC_SELECT_BANK(lp, 0);
1262 card_stats = SMC_GET_COUNTER(lp);
1263 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
1264
1265 /* single collisions */
09f75cd7 1266 dev->stats.collisions += card_stats & 0xF;
1da177e4
LT
1267 card_stats >>= 4;
1268
1269 /* multiple collisions */
09f75cd7 1270 dev->stats.collisions += card_stats & 0xF;
1da177e4 1271 } else if (status & IM_RX_OVRN_INT) {
6389aa45
BB
1272 DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n",
1273 ({ int eph_st; SMC_SELECT_BANK(lp, 0);
1274 eph_st = SMC_GET_EPH_STATUS(lp);
1275 SMC_SELECT_BANK(lp, 2); eph_st; }));
cfdfa865 1276 SMC_ACK_INT(lp, IM_RX_OVRN_INT);
09f75cd7
JG
1277 dev->stats.rx_errors++;
1278 dev->stats.rx_fifo_errors++;
1da177e4
LT
1279 } else if (status & IM_EPH_INT) {
1280 smc_eph_interrupt(dev);
1281 } else if (status & IM_MDINT) {
cfdfa865 1282 SMC_ACK_INT(lp, IM_MDINT);
1da177e4
LT
1283 smc_phy_interrupt(dev);
1284 } else if (status & IM_ERCV_INT) {
cfdfa865 1285 SMC_ACK_INT(lp, IM_ERCV_INT);
6389aa45 1286 PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n");
1da177e4
LT
1287 }
1288 } while (--timeout);
1289
1290 /* restore register states */
cfdfa865
MD
1291 SMC_SET_PTR(lp, saved_pointer);
1292 SMC_SET_INT_MASK(lp, mask);
1da177e4
LT
1293 spin_unlock(&lp->lock);
1294
c500cb26 1295#ifndef CONFIG_NET_POLL_CONTROLLER
5d0571d9 1296 if (timeout == MAX_IRQ_LOOPS)
6389aa45
BB
1297 PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n",
1298 mask);
c500cb26 1299#endif
6389aa45
BB
1300 DBG(3, dev, "Interrupt done (%d loops)\n",
1301 MAX_IRQ_LOOPS - timeout);
1da177e4
LT
1302
1303 /*
1304 * We return IRQ_HANDLED unconditionally here even if there was
1305 * nothing to do. There is a possibility that a packet might
1306 * get enqueued into the chip right after TX_EMPTY_INT is raised
1307 * but just before the CPU acknowledges the IRQ.
1308 * Better take an unneeded IRQ in some occasions than complexifying
1309 * the code for all cases.
1310 */
1311 return IRQ_HANDLED;
1312}
1313
1314#ifdef CONFIG_NET_POLL_CONTROLLER
1315/*
1316 * Polling receive - used by netconsole and other diagnostic tools
1317 * to allow network i/o with interrupts disabled.
1318 */
1319static void smc_poll_controller(struct net_device *dev)
1320{
1321 disable_irq(dev->irq);
9c8e7f5c 1322 smc_interrupt(dev->irq, dev);
1da177e4
LT
1323 enable_irq(dev->irq);
1324}
1325#endif
1326
1327/* Our watchdog timed out. Called by the networking layer */
1328static void smc_timeout(struct net_device *dev)
1329{
1330 struct smc_local *lp = netdev_priv(dev);
1331 void __iomem *ioaddr = lp->base;
8de90115 1332 int status, mask, eph_st, meminfo, fifo;
1da177e4 1333
6389aa45 1334 DBG(2, dev, "%s\n", __func__);
1da177e4
LT
1335
1336 spin_lock_irq(&lp->lock);
cfdfa865
MD
1337 status = SMC_GET_INT(lp);
1338 mask = SMC_GET_INT_MASK(lp);
1339 fifo = SMC_GET_FIFO(lp);
1340 SMC_SELECT_BANK(lp, 0);
1341 eph_st = SMC_GET_EPH_STATUS(lp);
1342 meminfo = SMC_GET_MIR(lp);
1343 SMC_SELECT_BANK(lp, 2);
1da177e4 1344 spin_unlock_irq(&lp->lock);
6389aa45
BB
1345 PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
1346 status, mask, meminfo, fifo, eph_st);
1da177e4
LT
1347
1348 smc_reset(dev);
1349 smc_enable(dev);
1350
1351 /*
1352 * Reconfiguring the PHY doesn't seem like a bad idea here, but
1353 * smc_phy_configure() calls msleep() which calls schedule_timeout()
1354 * which calls schedule(). Hence we use a work queue.
1355 */
4bb073c0
DM
1356 if (lp->phy_type != 0)
1357 schedule_work(&lp->phy_configure);
1da177e4
LT
1358
1359 /* We can accept TX packets again */
1ae5dc34 1360 dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
1361 netif_wake_queue(dev);
1362}
1363
1364/*
1365 * This routine will, depending on the values passed to it,
1366 * either make it accept multicast packets, go into
1367 * promiscuous mode (for TCPDUMP and cousins) or accept
1368 * a select set of multicast packets
1369 */
1370static void smc_set_multicast_list(struct net_device *dev)
1371{
1372 struct smc_local *lp = netdev_priv(dev);
1373 void __iomem *ioaddr = lp->base;
1374 unsigned char multicast_table[8];
1375 int update_multicast = 0;
1376
6389aa45 1377 DBG(2, dev, "%s\n", __func__);
1da177e4
LT
1378
1379 if (dev->flags & IFF_PROMISC) {
6389aa45 1380 DBG(2, dev, "RCR_PRMS\n");
1da177e4
LT
1381 lp->rcr_cur_mode |= RCR_PRMS;
1382 }
1383
1384/* BUG? I never disable promiscuous mode if multicasting was turned on.
1385 Now, I turn off promiscuous mode, but I don't do anything to multicasting
1386 when promiscuous mode is turned on.
1387*/
1388
1389 /*
1390 * Here, I am setting this to accept all multicast packets.
1391 * I don't need to zero the multicast table, because the flag is
1392 * checked before the table is
1393 */
4cd24eaf 1394 else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
6389aa45 1395 DBG(2, dev, "RCR_ALMUL\n");
1da177e4
LT
1396 lp->rcr_cur_mode |= RCR_ALMUL;
1397 }
1398
1399 /*
1400 * This sets the internal hardware table to filter out unwanted
1401 * multicast packets before they take up memory.
1402 *
1403 * The SMC chip uses a hash table where the high 6 bits of the CRC of
1404 * address are the offset into the table. If that bit is 1, then the
1405 * multicast packet is accepted. Otherwise, it's dropped silently.
1406 *
1407 * To use the 6 bits as an offset into the table, the high 3 bits are
1408 * the number of the 8 bit register, while the low 3 bits are the bit
1409 * within that register.
1410 */
4cd24eaf 1411 else if (!netdev_mc_empty(dev)) {
22bedad3 1412 struct netdev_hw_addr *ha;
1da177e4
LT
1413
1414 /* table for flipping the order of 3 bits */
1415 static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
1416
1417 /* start with a table of all zeros: reject all */
1418 memset(multicast_table, 0, sizeof(multicast_table));
1419
22bedad3 1420 netdev_for_each_mc_addr(ha, dev) {
1da177e4
LT
1421 int position;
1422
1da177e4 1423 /* only use the low order bits */
22bedad3 1424 position = crc32_le(~0, ha->addr, 6) & 0x3f;
1da177e4
LT
1425
1426 /* do some messy swapping to put the bit in the right spot */
1427 multicast_table[invert3[position&7]] |=
1428 (1<<invert3[(position>>3)&7]);
1429 }
1430
1431 /* be sure I get rid of flags I might have set */
1432 lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
1433
1434 /* now, the table can be loaded into the chipset */
1435 update_multicast = 1;
1436 } else {
6389aa45 1437 DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n");
1da177e4
LT
1438 lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
1439
1440 /*
1441 * since I'm disabling all multicast entirely, I need to
1442 * clear the multicast list
1443 */
1444 memset(multicast_table, 0, sizeof(multicast_table));
1445 update_multicast = 1;
1446 }
1447
1448 spin_lock_irq(&lp->lock);
cfdfa865
MD
1449 SMC_SELECT_BANK(lp, 0);
1450 SMC_SET_RCR(lp, lp->rcr_cur_mode);
1da177e4 1451 if (update_multicast) {
cfdfa865
MD
1452 SMC_SELECT_BANK(lp, 3);
1453 SMC_SET_MCAST(lp, multicast_table);
1da177e4 1454 }
cfdfa865 1455 SMC_SELECT_BANK(lp, 2);
1da177e4
LT
1456 spin_unlock_irq(&lp->lock);
1457}
1458
1459
1460/*
1461 * Open and Initialize the board
1462 *
1463 * Set up everything, reset the card, etc..
1464 */
1465static int
1466smc_open(struct net_device *dev)
1467{
1468 struct smc_local *lp = netdev_priv(dev);
1469
6389aa45 1470 DBG(2, dev, "%s\n", __func__);
1da177e4 1471
1da177e4
LT
1472 /* Setup the default Register Modes */
1473 lp->tcr_cur_mode = TCR_DEFAULT;
1474 lp->rcr_cur_mode = RCR_DEFAULT;
b0dbcf51
RK
1475 lp->rpc_cur_mode = RPC_DEFAULT |
1476 lp->cfg.leda << RPC_LSXA_SHFT |
1477 lp->cfg.ledb << RPC_LSXB_SHFT;
1da177e4
LT
1478
1479 /*
1480 * If we are not using a MII interface, we need to
1481 * monitor our own carrier signal to detect faults.
1482 */
1483 if (lp->phy_type == 0)
1484 lp->tcr_cur_mode |= TCR_MON_CSN;
1485
1486 /* reset the hardware */
1487 smc_reset(dev);
1488 smc_enable(dev);
1489
1490 /* Configure the PHY, initialize the link state */
1491 if (lp->phy_type != 0)
6d5aefb8 1492 smc_phy_configure(&lp->phy_configure);
1da177e4
LT
1493 else {
1494 spin_lock_irq(&lp->lock);
1495 smc_10bt_check_media(dev, 1);
1496 spin_unlock_irq(&lp->lock);
1497 }
1498
1499 netif_start_queue(dev);
1500 return 0;
1501}
1502
1503/*
1504 * smc_close
1505 *
1506 * this makes the board clean up everything that it can
1507 * and not talk to the outside world. Caused by
1508 * an 'ifconfig ethX down'
1509 */
1510static int smc_close(struct net_device *dev)
1511{
1512 struct smc_local *lp = netdev_priv(dev);
1513
6389aa45 1514 DBG(2, dev, "%s\n", __func__);
1da177e4
LT
1515
1516 netif_stop_queue(dev);
1517 netif_carrier_off(dev);
1518
1519 /* clear everything */
1520 smc_shutdown(dev);
be83668a 1521 tasklet_kill(&lp->tx_task);
1da177e4 1522 smc_phy_powerdown(dev);
1da177e4
LT
1523 return 0;
1524}
1525
1da177e4
LT
1526/*
1527 * Ethtool support
1528 */
1529static int
1530smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1531{
1532 struct smc_local *lp = netdev_priv(dev);
1533 int ret;
1534
1535 cmd->maxtxpkt = 1;
1536 cmd->maxrxpkt = 1;
1537
1538 if (lp->phy_type != 0) {
1539 spin_lock_irq(&lp->lock);
1540 ret = mii_ethtool_gset(&lp->mii, cmd);
1541 spin_unlock_irq(&lp->lock);
1542 } else {
1543 cmd->supported = SUPPORTED_10baseT_Half |
1544 SUPPORTED_10baseT_Full |
1545 SUPPORTED_TP | SUPPORTED_AUI;
1546
1547 if (lp->ctl_rspeed == 10)
70739497 1548 ethtool_cmd_speed_set(cmd, SPEED_10);
1da177e4 1549 else if (lp->ctl_rspeed == 100)
70739497 1550 ethtool_cmd_speed_set(cmd, SPEED_100);
1da177e4
LT
1551
1552 cmd->autoneg = AUTONEG_DISABLE;
1553 cmd->transceiver = XCVR_INTERNAL;
1554 cmd->port = 0;
1555 cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
1556
1557 ret = 0;
1558 }
1559
1560 return ret;
1561}
1562
1563static int
1564smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1565{
1566 struct smc_local *lp = netdev_priv(dev);
1567 int ret;
1568
1569 if (lp->phy_type != 0) {
1570 spin_lock_irq(&lp->lock);
1571 ret = mii_ethtool_sset(&lp->mii, cmd);
1572 spin_unlock_irq(&lp->lock);
1573 } else {
1574 if (cmd->autoneg != AUTONEG_DISABLE ||
1575 cmd->speed != SPEED_10 ||
1576 (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
1577 (cmd->port != PORT_TP && cmd->port != PORT_AUI))
1578 return -EINVAL;
1579
1580// lp->port = cmd->port;
1581 lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
1582
1583// if (netif_running(dev))
1584// smc_set_port(dev);
1585
1586 ret = 0;
1587 }
1588
1589 return ret;
1590}
1591
1592static void
1593smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1594{
7826d43f
JP
1595 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
1596 strlcpy(info->version, version, sizeof(info->version));
1597 strlcpy(info->bus_info, dev_name(dev->dev.parent),
1598 sizeof(info->bus_info));
1da177e4
LT
1599}
1600
1601static int smc_ethtool_nwayreset(struct net_device *dev)
1602{
1603 struct smc_local *lp = netdev_priv(dev);
1604 int ret = -EINVAL;
1605
1606 if (lp->phy_type != 0) {
1607 spin_lock_irq(&lp->lock);
1608 ret = mii_nway_restart(&lp->mii);
1609 spin_unlock_irq(&lp->lock);
1610 }
1611
1612 return ret;
1613}
1614
1615static u32 smc_ethtool_getmsglevel(struct net_device *dev)
1616{
1617 struct smc_local *lp = netdev_priv(dev);
1618 return lp->msg_enable;
1619}
1620
1621static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
1622{
1623 struct smc_local *lp = netdev_priv(dev);
1624 lp->msg_enable = level;
1625}
1626
357fe2c6
VS
1627static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
1628{
1629 u16 ctl;
1630 struct smc_local *lp = netdev_priv(dev);
1631 void __iomem *ioaddr = lp->base;
1632
1633 spin_lock_irq(&lp->lock);
1634 /* load word into GP register */
1635 SMC_SELECT_BANK(lp, 1);
1636 SMC_SET_GP(lp, word);
1637 /* set the address to put the data in EEPROM */
1638 SMC_SELECT_BANK(lp, 2);
1639 SMC_SET_PTR(lp, addr);
1640 /* tell it to write */
1641 SMC_SELECT_BANK(lp, 1);
1642 ctl = SMC_GET_CTL(lp);
1643 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
1644 /* wait for it to finish */
1645 do {
1646 udelay(1);
1647 } while (SMC_GET_CTL(lp) & CTL_STORE);
1648 /* clean up */
1649 SMC_SET_CTL(lp, ctl);
1650 SMC_SELECT_BANK(lp, 2);
1651 spin_unlock_irq(&lp->lock);
1652 return 0;
1653}
1654
1655static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
1656{
1657 u16 ctl;
1658 struct smc_local *lp = netdev_priv(dev);
1659 void __iomem *ioaddr = lp->base;
1660
1661 spin_lock_irq(&lp->lock);
1662 /* set the EEPROM address to get the data from */
1663 SMC_SELECT_BANK(lp, 2);
1664 SMC_SET_PTR(lp, addr | PTR_READ);
1665 /* tell it to load */
1666 SMC_SELECT_BANK(lp, 1);
1667 SMC_SET_GP(lp, 0xffff); /* init to known */
1668 ctl = SMC_GET_CTL(lp);
1669 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
1670 /* wait for it to finish */
1671 do {
1672 udelay(1);
1673 } while (SMC_GET_CTL(lp) & CTL_RELOAD);
1674 /* read word from GP register */
1675 *word = SMC_GET_GP(lp);
1676 /* clean up */
1677 SMC_SET_CTL(lp, ctl);
1678 SMC_SELECT_BANK(lp, 2);
1679 spin_unlock_irq(&lp->lock);
1680 return 0;
1681}
1682
1683static int smc_ethtool_geteeprom_len(struct net_device *dev)
1684{
1685 return 0x23 * 2;
1686}
1687
1688static int smc_ethtool_geteeprom(struct net_device *dev,
1689 struct ethtool_eeprom *eeprom, u8 *data)
1690{
1691 int i;
1692 int imax;
1693
6389aa45 1694 DBG(1, dev, "Reading %d bytes at %d(0x%x)\n",
357fe2c6
VS
1695 eeprom->len, eeprom->offset, eeprom->offset);
1696 imax = smc_ethtool_geteeprom_len(dev);
1697 for (i = 0; i < eeprom->len; i += 2) {
1698 int ret;
1699 u16 wbuf;
1700 int offset = i + eeprom->offset;
1701 if (offset > imax)
1702 break;
1703 ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
1704 if (ret != 0)
1705 return ret;
6389aa45 1706 DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
357fe2c6
VS
1707 data[i] = (wbuf >> 8) & 0xff;
1708 data[i+1] = wbuf & 0xff;
1709 }
1710 return 0;
1711}
1712
1713static int smc_ethtool_seteeprom(struct net_device *dev,
1714 struct ethtool_eeprom *eeprom, u8 *data)
1715{
1716 int i;
1717 int imax;
1718
6389aa45
BB
1719 DBG(1, dev, "Writing %d bytes to %d(0x%x)\n",
1720 eeprom->len, eeprom->offset, eeprom->offset);
357fe2c6
VS
1721 imax = smc_ethtool_geteeprom_len(dev);
1722 for (i = 0; i < eeprom->len; i += 2) {
1723 int ret;
1724 u16 wbuf;
1725 int offset = i + eeprom->offset;
1726 if (offset > imax)
1727 break;
1728 wbuf = (data[i] << 8) | data[i + 1];
6389aa45 1729 DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
357fe2c6
VS
1730 ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
1731 if (ret != 0)
1732 return ret;
1733 }
1734 return 0;
1735}
1736
1737
7282d491 1738static const struct ethtool_ops smc_ethtool_ops = {
1da177e4
LT
1739 .get_settings = smc_ethtool_getsettings,
1740 .set_settings = smc_ethtool_setsettings,
1741 .get_drvinfo = smc_ethtool_getdrvinfo,
1742
1743 .get_msglevel = smc_ethtool_getmsglevel,
1744 .set_msglevel = smc_ethtool_setmsglevel,
1745 .nway_reset = smc_ethtool_nwayreset,
1746 .get_link = ethtool_op_get_link,
357fe2c6
VS
1747 .get_eeprom_len = smc_ethtool_geteeprom_len,
1748 .get_eeprom = smc_ethtool_geteeprom,
1749 .set_eeprom = smc_ethtool_seteeprom,
1da177e4
LT
1750};
1751
a528079e
MD
1752static const struct net_device_ops smc_netdev_ops = {
1753 .ndo_open = smc_open,
1754 .ndo_stop = smc_close,
1755 .ndo_start_xmit = smc_hard_start_xmit,
1756 .ndo_tx_timeout = smc_timeout,
afc4b13d 1757 .ndo_set_rx_mode = smc_set_multicast_list,
635ecaa7 1758 .ndo_change_mtu = eth_change_mtu,
a528079e
MD
1759 .ndo_validate_addr = eth_validate_addr,
1760 .ndo_set_mac_address = eth_mac_addr,
1761#ifdef CONFIG_NET_POLL_CONTROLLER
1762 .ndo_poll_controller = smc_poll_controller,
1763#endif
1764};
1765
1da177e4
LT
1766/*
1767 * smc_findirq
1768 *
1769 * This routine has a simple purpose -- make the SMC chip generate an
1770 * interrupt, so an auto-detect routine can detect it, and find the IRQ,
1771 */
1772/*
1773 * does this still work?
1774 *
1775 * I just deleted auto_irq.c, since it was never built...
1776 * --jgarzik
1777 */
1e48fea4 1778static int smc_findirq(struct smc_local *lp)
1da177e4 1779{
cfdfa865 1780 void __iomem *ioaddr = lp->base;
1da177e4
LT
1781 int timeout = 20;
1782 unsigned long cookie;
1783
6389aa45 1784 DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
1da177e4
LT
1785
1786 cookie = probe_irq_on();
1787
1788 /*
1789 * What I try to do here is trigger an ALLOC_INT. This is done
1790 * by allocating a small chunk of memory, which will give an interrupt
1791 * when done.
1792 */
1793 /* enable ALLOCation interrupts ONLY */
cfdfa865
MD
1794 SMC_SELECT_BANK(lp, 2);
1795 SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
1da177e4
LT
1796
1797 /*
1798 * Allocate 512 bytes of memory. Note that the chip was just
1799 * reset so all the memory is available
1800 */
cfdfa865 1801 SMC_SET_MMU_CMD(lp, MC_ALLOC | 1);
1da177e4
LT
1802
1803 /*
1804 * Wait until positive that the interrupt has been generated
1805 */
1806 do {
1807 int int_status;
1808 udelay(10);
cfdfa865 1809 int_status = SMC_GET_INT(lp);
1da177e4
LT
1810 if (int_status & IM_ALLOC_INT)
1811 break; /* got the interrupt */
1812 } while (--timeout);
1813
1814 /*
1815 * there is really nothing that I can do here if timeout fails,
1816 * as autoirq_report will return a 0 anyway, which is what I
1817 * want in this case. Plus, the clean up is needed in both
1818 * cases.
1819 */
1820
1821 /* and disable all interrupts again */
cfdfa865 1822 SMC_SET_INT_MASK(lp, 0);
1da177e4
LT
1823
1824 /* and return what I found */
1825 return probe_irq_off(cookie);
1826}
1827
1828/*
1829 * Function: smc_probe(unsigned long ioaddr)
1830 *
1831 * Purpose:
1832 * Tests to see if a given ioaddr points to an SMC91x chip.
1833 * Returns a 0 on success
1834 *
1835 * Algorithm:
1836 * (1) see if the high byte of BANK_SELECT is 0x33
1837 * (2) compare the ioaddr with the base register's address
1838 * (3) see if I recognize the chip ID in the appropriate register
1839 *
1840 * Here I do typical initialization tasks.
1841 *
1842 * o Initialize the structure if needed
1843 * o print out my vanity message if not done so already
1844 * o print out what type of hardware is detected
1845 * o print out the ethernet address
1846 * o find the IRQ
1847 * o set up my private data
1848 * o configure the dev structure with my subroutines
1849 * o actually GRAB the irq.
1850 * o GRAB the region
1851 */
1e48fea4 1852static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
1dd06ae8 1853 unsigned long irq_flags)
1da177e4
LT
1854{
1855 struct smc_local *lp = netdev_priv(dev);
0795af57 1856 int retval;
1da177e4
LT
1857 unsigned int val, revision_register;
1858 const char *version_string;
1859
6389aa45 1860 DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
1da177e4
LT
1861
1862 /* First, see if the high byte is 0x33 */
cfdfa865 1863 val = SMC_CURRENT_BANK(lp);
6389aa45
BB
1864 DBG(2, dev, "%s: bank signature probe returned 0x%04x\n",
1865 CARDNAME, val);
1da177e4
LT
1866 if ((val & 0xFF00) != 0x3300) {
1867 if ((val & 0xFF) == 0x33) {
6389aa45
BB
1868 netdev_warn(dev,
1869 "%s: Detected possible byte-swapped interface at IOADDR %p\n",
1870 CARDNAME, ioaddr);
1da177e4
LT
1871 }
1872 retval = -ENODEV;
1873 goto err_out;
1874 }
1875
1876 /*
1877 * The above MIGHT indicate a device, but I need to write to
1878 * further test this.
1879 */
cfdfa865
MD
1880 SMC_SELECT_BANK(lp, 0);
1881 val = SMC_CURRENT_BANK(lp);
1da177e4
LT
1882 if ((val & 0xFF00) != 0x3300) {
1883 retval = -ENODEV;
1884 goto err_out;
1885 }
1886
1887 /*
1888 * well, we've already written once, so hopefully another
1889 * time won't hurt. This time, I need to switch the bank
1890 * register to bank 1, so I can access the base address
1891 * register
1892 */
cfdfa865
MD
1893 SMC_SELECT_BANK(lp, 1);
1894 val = SMC_GET_BASE(lp);
1da177e4 1895 val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
6bc21eed 1896 if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
6389aa45
BB
1897 netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
1898 CARDNAME, ioaddr, val);
1da177e4
LT
1899 }
1900
1901 /*
1902 * check if the revision register is something that I
1903 * recognize. These might need to be added to later,
1904 * as future revisions could be added.
1905 */
cfdfa865
MD
1906 SMC_SELECT_BANK(lp, 3);
1907 revision_register = SMC_GET_REV(lp);
6389aa45 1908 DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
1da177e4
LT
1909 version_string = chip_ids[ (revision_register >> 4) & 0xF];
1910 if (!version_string || (revision_register & 0xff00) != 0x3300) {
1911 /* I don't recognize this chip, so... */
6389aa45
BB
1912 netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n",
1913 CARDNAME, ioaddr, revision_register);
1da177e4
LT
1914
1915 retval = -ENODEV;
1916 goto err_out;
1917 }
1918
1919 /* At this point I'll assume that the chip is an SMC91x. */
6389aa45 1920 pr_info_once("%s\n", version);
1da177e4
LT
1921
1922 /* fill in some of the fields */
1923 dev->base_addr = (unsigned long)ioaddr;
1924 lp->base = ioaddr;
1925 lp->version = revision_register & 0xff;
1926 spin_lock_init(&lp->lock);
1927
1928 /* Get the MAC address */
cfdfa865
MD
1929 SMC_SELECT_BANK(lp, 1);
1930 SMC_GET_MAC_ADDR(lp, dev->dev_addr);
1da177e4
LT
1931
1932 /* now, reset the chip, and put it into a known state */
1933 smc_reset(dev);
1934
1935 /*
1936 * If dev->irq is 0, then the device has to be banged on to see
1937 * what the IRQ is.
6389aa45 1938 *
1da177e4
LT
1939 * This banging doesn't always detect the IRQ, for unknown reasons.
1940 * a workaround is to reset the chip and try again.
1941 *
1942 * Interestingly, the DOS packet driver *SETS* the IRQ on the card to
1943 * be what is requested on the command line. I don't do that, mostly
1944 * because the card that I have uses a non-standard method of accessing
1945 * the IRQs, and because this _should_ work in most configurations.
1946 *
1947 * Specifying an IRQ is done with the assumption that the user knows
1948 * what (s)he is doing. No checking is done!!!!
1949 */
1950 if (dev->irq < 1) {
1951 int trials;
1952
1953 trials = 3;
1954 while (trials--) {
cfdfa865 1955 dev->irq = smc_findirq(lp);
1da177e4
LT
1956 if (dev->irq)
1957 break;
1958 /* kick the card and try again */
1959 smc_reset(dev);
1960 }
1961 }
1962 if (dev->irq == 0) {
6389aa45 1963 netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
1da177e4
LT
1964 retval = -ENODEV;
1965 goto err_out;
1966 }
1967 dev->irq = irq_canonicalize(dev->irq);
1968
1969 /* Fill in the fields of the device structure with ethernet values. */
1970 ether_setup(dev);
1971
1da177e4 1972 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
a528079e 1973 dev->netdev_ops = &smc_netdev_ops;
1da177e4 1974 dev->ethtool_ops = &smc_ethtool_ops;
1da177e4
LT
1975
1976 tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
6d5aefb8
DH
1977 INIT_WORK(&lp->phy_configure, smc_phy_configure);
1978 lp->dev = dev;
1da177e4
LT
1979 lp->mii.phy_id_mask = 0x1f;
1980 lp->mii.reg_num_mask = 0x1f;
1981 lp->mii.force_media = 0;
1982 lp->mii.full_duplex = 0;
1983 lp->mii.dev = dev;
1984 lp->mii.mdio_read = smc_phy_read;
1985 lp->mii.mdio_write = smc_phy_write;
1986
1987 /*
1988 * Locate the phy, if any.
1989 */
1990 if (lp->version >= (CHIP_91100 << 4))
1991 smc_phy_detect(dev);
1992
99e1baf8
NP
1993 /* then shut everything down to save power */
1994 smc_shutdown(dev);
1995 smc_phy_powerdown(dev);
1996
1da177e4
LT
1997 /* Set default parameters */
1998 lp->msg_enable = NETIF_MSG_LINK;
1999 lp->ctl_rfduplx = 0;
2000 lp->ctl_rspeed = 10;
2001
2002 if (lp->version >= (CHIP_91100 << 4)) {
2003 lp->ctl_rfduplx = 1;
2004 lp->ctl_rspeed = 100;
2005 }
2006
2007 /* Grab the IRQ */
a0607fd3 2008 retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
1da177e4
LT
2009 if (retval)
2010 goto err_out;
2011
52256c0e
EM
2012#ifdef CONFIG_ARCH_PXA
2013# ifdef SMC_USE_PXA_DMA
2014 lp->cfg.flags |= SMC91X_USE_DMA;
2015# endif
2016 if (lp->cfg.flags & SMC91X_USE_DMA) {
1da177e4
LT
2017 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
2018 smc_pxa_dma_irq, NULL);
2019 if (dma >= 0)
2020 dev->dma = dma;
2021 }
2022#endif
2023
2024 retval = register_netdev(dev);
2025 if (retval == 0) {
2026 /* now, print out the card info, in a short format.. */
6389aa45
BB
2027 netdev_info(dev, "%s (rev %d) at %p IRQ %d",
2028 version_string, revision_register & 0x0f,
2029 lp->base, dev->irq);
1da177e4
LT
2030
2031 if (dev->dma != (unsigned char)-1)
6389aa45 2032 pr_cont(" DMA %d", dev->dma);
1da177e4 2033
6389aa45 2034 pr_cont("%s%s\n",
d6bc372e 2035 lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
1da177e4
LT
2036 THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
2037
2038 if (!is_valid_ether_addr(dev->dev_addr)) {
6389aa45 2039 netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
1da177e4
LT
2040 } else {
2041 /* Print the Ethernet address */
6389aa45
BB
2042 netdev_info(dev, "Ethernet addr: %pM\n",
2043 dev->dev_addr);
1da177e4
LT
2044 }
2045
2046 if (lp->phy_type == 0) {
6389aa45 2047 PRINTK(dev, "No PHY found\n");
1da177e4 2048 } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
6389aa45 2049 PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n");
1da177e4 2050 } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
6389aa45 2051 PRINTK(dev, "PHY LAN83C180\n");
1da177e4
LT
2052 }
2053 }
2054
2055err_out:
52256c0e 2056#ifdef CONFIG_ARCH_PXA
1da177e4
LT
2057 if (retval && dev->dma != (unsigned char)-1)
2058 pxa_free_dma(dev->dma);
2059#endif
2060 return retval;
2061}
2062
2063static int smc_enable_device(struct platform_device *pdev)
2064{
3e947943
MD
2065 struct net_device *ndev = platform_get_drvdata(pdev);
2066 struct smc_local *lp = netdev_priv(ndev);
1da177e4
LT
2067 unsigned long flags;
2068 unsigned char ecor, ecsr;
2069 void __iomem *addr;
2070 struct resource * res;
2071
2072 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2073 if (!res)
2074 return 0;
2075
2076 /*
2077 * Map the attribute space. This is overkill, but clean.
2078 */
2079 addr = ioremap(res->start, ATTRIB_SIZE);
2080 if (!addr)
2081 return -ENOMEM;
2082
2083 /*
2084 * Reset the device. We must disable IRQs around this
2085 * since a reset causes the IRQ line become active.
2086 */
2087 local_irq_save(flags);
2088 ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET;
2089 writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT));
2090 readb(addr + (ECOR << SMC_IO_SHIFT));
2091
2092 /*
2093 * Wait 100us for the chip to reset.
2094 */
2095 udelay(100);
2096
2097 /*
2098 * The device will ignore all writes to the enable bit while
2099 * reset is asserted, even if the reset bit is cleared in the
2100 * same write. Must clear reset first, then enable the device.
2101 */
2102 writeb(ecor, addr + (ECOR << SMC_IO_SHIFT));
2103 writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT));
2104
2105 /*
2106 * Set the appropriate byte/word mode.
2107 */
2108 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
3e947943 2109 if (!SMC_16BIT(lp))
09779c6d 2110 ecsr |= ECSR_IOIS8;
1da177e4
LT
2111 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
2112 local_irq_restore(flags);
2113
2114 iounmap(addr);
2115
2116 /*
2117 * Wait for the chip to wake up. We could poll the control
2118 * register in the main register space, but that isn't mapped
2119 * yet. We know this is going to take 750us.
2120 */
2121 msleep(1);
2122
2123 return 0;
2124}
2125
15919886
EM
2126static int smc_request_attrib(struct platform_device *pdev,
2127 struct net_device *ndev)
1da177e4
LT
2128{
2129 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
55c8eb6c 2130 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
1da177e4
LT
2131
2132 if (!res)
2133 return 0;
2134
2135 if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME))
2136 return -EBUSY;
2137
2138 return 0;
2139}
2140
15919886
EM
2141static void smc_release_attrib(struct platform_device *pdev,
2142 struct net_device *ndev)
1da177e4
LT
2143{
2144 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
55c8eb6c 2145 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
1da177e4
LT
2146
2147 if (res)
2148 release_mem_region(res->start, ATTRIB_SIZE);
2149}
2150
09779c6d 2151static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
1da177e4 2152{
09779c6d
NP
2153 if (SMC_CAN_USE_DATACS) {
2154 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2155 struct smc_local *lp = netdev_priv(ndev);
1da177e4 2156
09779c6d
NP
2157 if (!res)
2158 return;
1da177e4 2159
09779c6d 2160 if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
6389aa45
BB
2161 netdev_info(ndev, "%s: failed to request datacs memory region.\n",
2162 CARDNAME);
09779c6d
NP
2163 return;
2164 }
1da177e4 2165
09779c6d
NP
2166 lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
2167 }
1da177e4
LT
2168}
2169
2170static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
2171{
09779c6d
NP
2172 if (SMC_CAN_USE_DATACS) {
2173 struct smc_local *lp = netdev_priv(ndev);
2174 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
1da177e4 2175
09779c6d
NP
2176 if (lp->datacs)
2177 iounmap(lp->datacs);
1da177e4 2178
09779c6d 2179 lp->datacs = NULL;
1da177e4 2180
09779c6d
NP
2181 if (res)
2182 release_mem_region(res->start, SMC_DATA_EXTENT);
2183 }
1da177e4 2184}
1da177e4 2185
3f823c15
TL
2186#if IS_BUILTIN(CONFIG_OF)
2187static const struct of_device_id smc91x_match[] = {
2188 { .compatible = "smsc,lan91c94", },
2189 { .compatible = "smsc,lan91c111", },
2190 {},
2191};
2192MODULE_DEVICE_TABLE(of, smc91x_match);
2193#endif
2194
1da177e4
LT
2195/*
2196 * smc_init(void)
2197 * Input parameters:
2198 * dev->base_addr == 0, try to find all possible locations
2199 * dev->base_addr > 0x1ff, this is the address to check
2200 * dev->base_addr == <anything else>, return failure code
2201 *
2202 * Output:
2203 * 0 --> there is a device
2204 * anything else, error
2205 */
1e48fea4 2206static int smc_drv_probe(struct platform_device *pdev)
1da177e4 2207{
f64deaca 2208 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
3f823c15 2209 const struct of_device_id *match = NULL;
3e947943 2210 struct smc_local *lp;
1da177e4 2211 struct net_device *ndev;
e7b3dc7e 2212 struct resource *res, *ires;
1da177e4 2213 unsigned int __iomem *addr;
d280eadc 2214 unsigned long irq_flags = SMC_IRQ_FLAGS;
1da177e4
LT
2215 int ret;
2216
1da177e4
LT
2217 ndev = alloc_etherdev(sizeof(struct smc_local));
2218 if (!ndev) {
1da177e4 2219 ret = -ENOMEM;
15919886 2220 goto out;
1da177e4 2221 }
3ae5eaec 2222 SET_NETDEV_DEV(ndev, &pdev->dev);
1da177e4 2223
3e947943
MD
2224 /* get configuration from platform data, only allow use of
2225 * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set.
2226 */
2227
2228 lp = netdev_priv(ndev);
3f823c15 2229 lp->cfg.flags = 0;
3e947943 2230
15919886 2231 if (pd) {
3e947943 2232 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
15919886 2233 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
3f823c15
TL
2234 }
2235
2236#if IS_BUILTIN(CONFIG_OF)
2237 match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
2238 if (match) {
2239 struct device_node *np = pdev->dev.of_node;
2240 u32 val;
2241
2242 /* Combination of IO widths supported, default to 16-bit */
2243 if (!of_property_read_u32(np, "reg-io-width", &val)) {
2244 if (val & 1)
2245 lp->cfg.flags |= SMC91X_USE_8BIT;
2246 if ((val == 0) || (val & 2))
2247 lp->cfg.flags |= SMC91X_USE_16BIT;
2248 if (val & 4)
2249 lp->cfg.flags |= SMC91X_USE_32BIT;
2250 } else {
2251 lp->cfg.flags |= SMC91X_USE_16BIT;
2252 }
2253 }
2254#endif
2255
2256 if (!pd && !match) {
fa6d3be0
EM
2257 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2258 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2259 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
c4f0e767 2260 lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
3e947943
MD
2261 }
2262
b0dbcf51
RK
2263 if (!lp->cfg.leda && !lp->cfg.ledb) {
2264 lp->cfg.leda = RPC_LSA_DEFAULT;
2265 lp->cfg.ledb = RPC_LSB_DEFAULT;
2266 }
2267
1da177e4 2268 ndev->dma = (unsigned char)-1;
e7b3dc7e 2269
15919886
EM
2270 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2271 if (!res)
2272 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2273 if (!res) {
2274 ret = -ENODEV;
2275 goto out_free_netdev;
2276 }
2277
2278
2279 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2280 ret = -EBUSY;
2281 goto out_free_netdev;
2282 }
2283
e7b3dc7e
RK
2284 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2285 if (!ires) {
48944738 2286 ret = -ENODEV;
15919886 2287 goto out_release_io;
48944738 2288 }
1da177e4 2289
e7b3dc7e 2290 ndev->irq = ires->start;
d280eadc 2291
d5ccd67b 2292 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
d280eadc 2293 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
e7b3dc7e 2294
15919886 2295 ret = smc_request_attrib(pdev, ndev);
1da177e4 2296 if (ret)
15919886 2297 goto out_release_io;
1da177e4 2298#if defined(CONFIG_SA1100_ASSABET)
6ad1b614 2299 neponset_ncr_set(NCR_ENET_OSC_EN);
1da177e4 2300#endif
3e947943 2301 platform_set_drvdata(pdev, ndev);
1da177e4
LT
2302 ret = smc_enable_device(pdev);
2303 if (ret)
2304 goto out_release_attrib;
2305
2306 addr = ioremap(res->start, SMC_IO_EXTENT);
2307 if (!addr) {
2308 ret = -ENOMEM;
2309 goto out_release_attrib;
2310 }
2311
52256c0e 2312#ifdef CONFIG_ARCH_PXA
073ac8fd 2313 {
1da177e4 2314 struct smc_local *lp = netdev_priv(ndev);
073ac8fd 2315 lp->device = &pdev->dev;
1da177e4
LT
2316 lp->physaddr = res->start;
2317 }
2318#endif
2319
d280eadc 2320 ret = smc_probe(ndev, addr, irq_flags);
073ac8fd
RK
2321 if (ret != 0)
2322 goto out_iounmap;
2323
1da177e4
LT
2324 smc_request_datacs(pdev, ndev);
2325
2326 return 0;
2327
2328 out_iounmap:
1da177e4
LT
2329 iounmap(addr);
2330 out_release_attrib:
15919886 2331 smc_release_attrib(pdev, ndev);
1da177e4
LT
2332 out_release_io:
2333 release_mem_region(res->start, SMC_IO_EXTENT);
15919886
EM
2334 out_free_netdev:
2335 free_netdev(ndev);
1da177e4 2336 out:
6389aa45 2337 pr_info("%s: not found (%d).\n", CARDNAME, ret);
1da177e4
LT
2338
2339 return ret;
2340}
2341
1e48fea4 2342static int smc_drv_remove(struct platform_device *pdev)
1da177e4 2343{
3ae5eaec 2344 struct net_device *ndev = platform_get_drvdata(pdev);
1da177e4
LT
2345 struct smc_local *lp = netdev_priv(ndev);
2346 struct resource *res;
2347
1da177e4
LT
2348 unregister_netdev(ndev);
2349
2350 free_irq(ndev->irq, ndev);
2351
52256c0e 2352#ifdef CONFIG_ARCH_PXA
1da177e4
LT
2353 if (ndev->dma != (unsigned char)-1)
2354 pxa_free_dma(ndev->dma);
2355#endif
2356 iounmap(lp->base);
2357
2358 smc_release_datacs(pdev,ndev);
15919886 2359 smc_release_attrib(pdev,ndev);
1da177e4
LT
2360
2361 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2362 if (!res)
6fc30db5 2363 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1da177e4
LT
2364 release_mem_region(res->start, SMC_IO_EXTENT);
2365
2366 free_netdev(ndev);
2367
2368 return 0;
2369}
2370
9f950f72 2371static int smc_drv_suspend(struct device *dev)
1da177e4 2372{
9f950f72
KH
2373 struct platform_device *pdev = to_platform_device(dev);
2374 struct net_device *ndev = platform_get_drvdata(pdev);
1da177e4 2375
9480e307 2376 if (ndev) {
1da177e4
LT
2377 if (netif_running(ndev)) {
2378 netif_device_detach(ndev);
2379 smc_shutdown(ndev);
2380 smc_phy_powerdown(ndev);
2381 }
2382 }
2383 return 0;
2384}
2385
9f950f72 2386static int smc_drv_resume(struct device *dev)
1da177e4 2387{
9f950f72
KH
2388 struct platform_device *pdev = to_platform_device(dev);
2389 struct net_device *ndev = platform_get_drvdata(pdev);
1da177e4 2390
9480e307 2391 if (ndev) {
1da177e4 2392 struct smc_local *lp = netdev_priv(ndev);
5fc34413 2393 smc_enable_device(pdev);
1da177e4
LT
2394 if (netif_running(ndev)) {
2395 smc_reset(ndev);
2396 smc_enable(ndev);
2397 if (lp->phy_type != 0)
6d5aefb8 2398 smc_phy_configure(&lp->phy_configure);
1da177e4
LT
2399 netif_device_attach(ndev);
2400 }
2401 }
2402 return 0;
2403}
2404
9f950f72
KH
2405static struct dev_pm_ops smc_drv_pm_ops = {
2406 .suspend = smc_drv_suspend,
2407 .resume = smc_drv_resume,
2408};
2409
3ae5eaec 2410static struct platform_driver smc_driver = {
1da177e4 2411 .probe = smc_drv_probe,
1e48fea4 2412 .remove = smc_drv_remove,
3ae5eaec
RK
2413 .driver = {
2414 .name = CARDNAME,
72abb461 2415 .owner = THIS_MODULE,
9f950f72 2416 .pm = &smc_drv_pm_ops,
89ce376c 2417 .of_match_table = of_match_ptr(smc91x_match),
3ae5eaec 2418 },
1da177e4
LT
2419};
2420
db62f684 2421module_platform_driver(smc_driver);