2 Written 1998-2000 by Donald Becker.
4 This software may be used and distributed according to the terms of
5 the GNU General Public License (GPL), incorporated herein by reference.
6 Drivers based on or derived from this code fall under the GPL and must
7 retain the authorship, copyright and license notice. This file is not
8 a complete program and may only be used when the entire operating
9 system is licensed under the GPL.
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
16 Support information and updates available at
17 http://www.scyld.com/network/pci-skeleton.html
21 Version 2.51, Nov 17, 2001 (jgarzik):
23 - Replace some MII-related magic numbers with constants
27 #define DRV_NAME "fealnx"
28 #define DRV_VERSION "2.52"
29 #define DRV_RELDATE "Sep-11-2006"
31 static int debug
; /* 1-> print debug message */
32 static int max_interrupt_work
= 20;
34 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
35 static int multicast_filter_limit
= 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
38 /* Setting to > 1518 effectively disables this feature. */
39 static int rx_copybreak
;
41 /* Used to pass the media type, etc. */
42 /* Both 'options[]' and 'full_duplex[]' should exist for driver */
43 /* interoperability. */
44 /* The media type is usually passed in 'options[]'. */
45 #define MAX_UNITS 8 /* More are supported, limit only on options */
46 static int options
[MAX_UNITS
] = { -1, -1, -1, -1, -1, -1, -1, -1 };
47 static int full_duplex
[MAX_UNITS
] = { -1, -1, -1, -1, -1, -1, -1, -1 };
49 /* Operational parameters that are set at compile time. */
50 /* Keep the ring sizes a power of two for compile efficiency. */
51 /* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
52 /* Making the Tx ring too large decreases the effectiveness of channel */
53 /* bonding and packet priority. */
54 /* There are no ill effects from too-large receive rings. */
56 // #define TX_RING_SIZE 16
57 // #define RX_RING_SIZE 32
58 #define TX_RING_SIZE 6
59 #define RX_RING_SIZE 12
60 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
61 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
63 /* Operational parameters that usually are not changed. */
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (2*HZ)
67 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
70 /* Include files, designed to support most kernel versions 2.0.0 and later. */
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/slab.h>
78 #include <linux/interrupt.h>
79 #include <linux/pci.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/mii.h>
85 #include <linux/ethtool.h>
86 #include <linux/crc32.h>
87 #include <linux/delay.h>
88 #include <linux/bitops.h>
90 #include <asm/processor.h> /* Processor type for cache alignment. */
92 #include <asm/uaccess.h>
93 #include <asm/byteorder.h>
95 /* These identify the driver base version and may not be removed. */
96 static char version
[] =
97 KERN_INFO DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
"\n";
100 /* This driver was written to use PCI memory space, however some x86 systems
101 work only with I/O space accesses. */
106 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
107 /* This is only in the support-all-kernels source code. */
109 #define RUN_AT(x) (jiffies + (x))
111 MODULE_AUTHOR("Myson or whoever");
112 MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
113 MODULE_LICENSE("GPL");
114 module_param(max_interrupt_work
, int, 0);
115 module_param(debug
, int, 0);
116 module_param(rx_copybreak
, int, 0);
117 module_param(multicast_filter_limit
, int, 0);
118 module_param_array(options
, int, NULL
, 0);
119 module_param_array(full_duplex
, int, NULL
, 0);
120 MODULE_PARM_DESC(max_interrupt_work
, "fealnx maximum events handled per interrupt");
121 MODULE_PARM_DESC(debug
, "fealnx enable debugging (0-1)");
122 MODULE_PARM_DESC(rx_copybreak
, "fealnx copy breakpoint for copy-only-tiny-frames");
123 MODULE_PARM_DESC(multicast_filter_limit
, "fealnx maximum number of filtered multicast addresses");
124 MODULE_PARM_DESC(options
, "fealnx: Bits 0-3: media type, bit 17: full duplex");
125 MODULE_PARM_DESC(full_duplex
, "fealnx full duplex setting(s) (1)");
128 MIN_REGION_SIZE
= 136,
131 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
132 enum chip_capability_flags
{
138 /* for different PHY */
139 enum phy_type_flags
{
154 static const struct chip_info skel_netdrv_tbl
[] __devinitdata
= {
155 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR
},
156 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR
},
157 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR
},
160 /* Offsets to the Command and Status Registers. */
161 enum fealnx_offsets
{
162 PAR0
= 0x0, /* physical address 0-3 */
163 PAR1
= 0x04, /* physical address 4-5 */
164 MAR0
= 0x08, /* multicast address 0-3 */
165 MAR1
= 0x0C, /* multicast address 4-7 */
166 FAR0
= 0x10, /* flow-control address 0-3 */
167 FAR1
= 0x14, /* flow-control address 4-5 */
168 TCRRCR
= 0x18, /* receive & transmit configuration */
169 BCR
= 0x1C, /* bus command */
170 TXPDR
= 0x20, /* transmit polling demand */
171 RXPDR
= 0x24, /* receive polling demand */
172 RXCWP
= 0x28, /* receive current word pointer */
173 TXLBA
= 0x2C, /* transmit list base address */
174 RXLBA
= 0x30, /* receive list base address */
175 ISR
= 0x34, /* interrupt status */
176 IMR
= 0x38, /* interrupt mask */
177 FTH
= 0x3C, /* flow control high/low threshold */
178 MANAGEMENT
= 0x40, /* bootrom/eeprom and mii management */
179 TALLY
= 0x44, /* tally counters for crc and mpa */
180 TSR
= 0x48, /* tally counter for transmit status */
181 BMCRSR
= 0x4c, /* basic mode control and status */
182 PHYIDENTIFIER
= 0x50, /* phy identifier */
183 ANARANLPAR
= 0x54, /* auto-negotiation advertisement and link
185 ANEROCR
= 0x58, /* auto-negotiation expansion and pci conf. */
186 BPREMRPSR
= 0x5c, /* bypass & receive error mask and phy status */
189 /* Bits in the interrupt status/enable registers. */
190 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
191 enum intr_status_bits
{
192 RFCON
= 0x00020000, /* receive flow control xon packet */
193 RFCOFF
= 0x00010000, /* receive flow control xoff packet */
194 LSCStatus
= 0x00008000, /* link status change */
195 ANCStatus
= 0x00004000, /* autonegotiation completed */
196 FBE
= 0x00002000, /* fatal bus error */
197 FBEMask
= 0x00001800, /* mask bit12-11 */
198 ParityErr
= 0x00000000, /* parity error */
199 TargetErr
= 0x00001000, /* target abort */
200 MasterErr
= 0x00000800, /* master error */
201 TUNF
= 0x00000400, /* transmit underflow */
202 ROVF
= 0x00000200, /* receive overflow */
203 ETI
= 0x00000100, /* transmit early int */
204 ERI
= 0x00000080, /* receive early int */
205 CNTOVF
= 0x00000040, /* counter overflow */
206 RBU
= 0x00000020, /* receive buffer unavailable */
207 TBU
= 0x00000010, /* transmit buffer unavilable */
208 TI
= 0x00000008, /* transmit interrupt */
209 RI
= 0x00000004, /* receive interrupt */
210 RxErr
= 0x00000002, /* receive error */
213 /* Bits in the NetworkConfig register, W for writing, R for reading */
214 /* FIXME: some names are invented by me. Marked with (name?) */
215 /* If you have docs and know bit names, please fix 'em */
217 CR_W_ENH
= 0x02000000, /* enhanced mode (name?) */
218 CR_W_FD
= 0x00100000, /* full duplex */
219 CR_W_PS10
= 0x00080000, /* 10 mbit */
220 CR_W_TXEN
= 0x00040000, /* tx enable (name?) */
221 CR_W_PS1000
= 0x00010000, /* 1000 mbit */
222 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
223 CR_W_RXMODEMASK
= 0x000000e0,
224 CR_W_PROM
= 0x00000080, /* promiscuous mode */
225 CR_W_AB
= 0x00000040, /* accept broadcast */
226 CR_W_AM
= 0x00000020, /* accept mutlicast */
227 CR_W_ARP
= 0x00000008, /* receive runt pkt */
228 CR_W_ALP
= 0x00000004, /* receive long pkt */
229 CR_W_SEP
= 0x00000002, /* receive error pkt */
230 CR_W_RXEN
= 0x00000001, /* rx enable (unicast?) (name?) */
232 CR_R_TXSTOP
= 0x04000000, /* tx stopped (name?) */
233 CR_R_FD
= 0x00100000, /* full duplex detected */
234 CR_R_PS10
= 0x00080000, /* 10 mbit detected */
235 CR_R_RXSTOP
= 0x00008000, /* rx stopped (name?) */
238 /* The Tulip Rx and Tx buffer descriptors. */
244 struct fealnx_desc
*next_desc_logical
;
245 struct sk_buff
*skbuff
;
250 /* Bits in network_desc.status */
251 enum rx_desc_status_bits
{
252 RXOWN
= 0x80000000, /* own bit */
253 FLNGMASK
= 0x0fff0000, /* frame length */
255 MARSTATUS
= 0x00004000, /* multicast address received */
256 BARSTATUS
= 0x00002000, /* broadcast address received */
257 PHYSTATUS
= 0x00001000, /* physical address received */
258 RXFSD
= 0x00000800, /* first descriptor */
259 RXLSD
= 0x00000400, /* last descriptor */
260 ErrorSummary
= 0x80, /* error summary */
261 RUNT
= 0x40, /* runt packet received */
262 LONG
= 0x20, /* long packet received */
263 FAE
= 0x10, /* frame align error */
264 CRC
= 0x08, /* crc error */
265 RXER
= 0x04, /* receive error */
268 enum rx_desc_control_bits
{
269 RXIC
= 0x00800000, /* interrupt control */
273 enum tx_desc_status_bits
{
274 TXOWN
= 0x80000000, /* own bit */
275 JABTO
= 0x00004000, /* jabber timeout */
276 CSL
= 0x00002000, /* carrier sense lost */
277 LC
= 0x00001000, /* late collision */
278 EC
= 0x00000800, /* excessive collision */
279 UDF
= 0x00000400, /* fifo underflow */
280 DFR
= 0x00000200, /* deferred */
281 HF
= 0x00000100, /* heartbeat fail */
282 NCRMask
= 0x000000ff, /* collision retry count */
286 enum tx_desc_control_bits
{
287 TXIC
= 0x80000000, /* interrupt control */
288 ETIControl
= 0x40000000, /* early transmit interrupt */
289 TXLD
= 0x20000000, /* last descriptor */
290 TXFD
= 0x10000000, /* first descriptor */
291 CRCEnable
= 0x08000000, /* crc control */
292 PADEnable
= 0x04000000, /* padding control */
293 RetryTxLC
= 0x02000000, /* retry late collision */
294 PKTSMask
= 0x3ff800, /* packet size bit21-11 */
296 TBSMask
= 0x000007ff, /* transmit buffer bit 10-0 */
300 /* BootROM/EEPROM/MII Management Register */
301 #define MASK_MIIR_MII_READ 0x00000000
302 #define MASK_MIIR_MII_WRITE 0x00000008
303 #define MASK_MIIR_MII_MDO 0x00000004
304 #define MASK_MIIR_MII_MDI 0x00000002
305 #define MASK_MIIR_MII_MDC 0x00000001
307 /* ST+OP+PHYAD+REGAD+TA */
308 #define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
309 #define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
311 /* ------------------------------------------------------------------------- */
312 /* Constants for Myson PHY */
313 /* ------------------------------------------------------------------------- */
314 #define MysonPHYID 0xd0000302
315 /* 89-7-27 add, (begin) */
316 #define MysonPHYID0 0x0302
317 #define StatusRegister 18
318 #define SPEED100 0x0400 // bit10
319 #define FULLMODE 0x0800 // bit11
320 /* 89-7-27 add, (end) */
322 /* ------------------------------------------------------------------------- */
323 /* Constants for Seeq 80225 PHY */
324 /* ------------------------------------------------------------------------- */
325 #define SeeqPHYID0 0x0016
327 #define MIIRegister18 18
328 #define SPD_DET_100 0x80
329 #define DPLX_DET_FULL 0x40
331 /* ------------------------------------------------------------------------- */
332 /* Constants for Ahdoc 101 PHY */
333 /* ------------------------------------------------------------------------- */
334 #define AhdocPHYID0 0x0022
336 #define DiagnosticReg 18
337 #define DPLX_FULL 0x0800
338 #define Speed_100 0x0400
341 /* -------------------------------------------------------------------------- */
343 /* -------------------------------------------------------------------------- */
344 #define MarvellPHYID0 0x0141
345 #define LevelOnePHYID0 0x0013
347 #define MII1000BaseTControlReg 9
348 #define MII1000BaseTStatusReg 10
349 #define SpecificReg 17
351 /* for 1000BaseT Control Register */
352 #define PHYAbletoPerform1000FullDuplex 0x0200
353 #define PHYAbletoPerform1000HalfDuplex 0x0100
354 #define PHY1000AbilityMask 0x300
356 // for phy specific status register, marvell phy.
357 #define SpeedMask 0x0c000
358 #define Speed_1000M 0x08000
359 #define Speed_100M 0x4000
361 #define Full_Duplex 0x2000
363 // 89/12/29 add, for phy specific status register, levelone phy, (begin)
364 #define LXT1000_100M 0x08000
365 #define LXT1000_1000M 0x0c000
366 #define LXT1000_Full 0x200
367 // 89/12/29 add, for phy specific status register, levelone phy, (end)
369 /* for 3-in-1 case, BMCRSR register */
370 #define LinkIsUp2 0x00040000
373 #define LinkIsUp 0x0004
376 struct netdev_private
{
377 /* Descriptor rings first for alignment. */
378 struct fealnx_desc
*rx_ring
;
379 struct fealnx_desc
*tx_ring
;
381 dma_addr_t rx_ring_dma
;
382 dma_addr_t tx_ring_dma
;
386 struct net_device_stats stats
;
388 /* Media monitoring timer. */
389 struct timer_list timer
;
392 struct timer_list reset_timer
;
393 int reset_timer_armed
;
394 unsigned long crvalue_sv
;
395 unsigned long imrvalue_sv
;
397 /* Frequently used values: keep some adjacent for cache effect. */
399 struct pci_dev
*pci_dev
;
400 unsigned long crvalue
;
401 unsigned long bcrvalue
;
402 unsigned long imrvalue
;
403 struct fealnx_desc
*cur_rx
;
404 struct fealnx_desc
*lack_rxbuf
;
406 struct fealnx_desc
*cur_tx
;
407 struct fealnx_desc
*cur_tx_copy
;
410 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
412 /* These values are keep track of the transceiver/media in use. */
414 unsigned int line_speed
;
415 unsigned int duplexmode
;
416 unsigned int default_port
:4; /* Last dev->if_port value. */
417 unsigned int PHYType
;
419 /* MII transceiver section. */
420 int mii_cnt
; /* MII device addresses. */
421 unsigned char phys
[2]; /* MII device addresses. */
422 struct mii_if_info mii
;
427 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
428 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
429 static int netdev_open(struct net_device
*dev
);
430 static void getlinktype(struct net_device
*dev
);
431 static void getlinkstatus(struct net_device
*dev
);
432 static void netdev_timer(unsigned long data
);
433 static void reset_timer(unsigned long data
);
434 static void fealnx_tx_timeout(struct net_device
*dev
);
435 static void init_ring(struct net_device
*dev
);
436 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
437 static irqreturn_t
intr_handler(int irq
, void *dev_instance
);
438 static int netdev_rx(struct net_device
*dev
);
439 static void set_rx_mode(struct net_device
*dev
);
440 static void __set_rx_mode(struct net_device
*dev
);
441 static struct net_device_stats
*get_stats(struct net_device
*dev
);
442 static int mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
443 static const struct ethtool_ops netdev_ethtool_ops
;
444 static int netdev_close(struct net_device
*dev
);
445 static void reset_rx_descriptors(struct net_device
*dev
);
446 static void reset_tx_descriptors(struct net_device
*dev
);
448 static void stop_nic_rx(void __iomem
*ioaddr
, long crvalue
)
451 iowrite32(crvalue
& ~(CR_W_RXEN
), ioaddr
+ TCRRCR
);
453 if ( (ioread32(ioaddr
+ TCRRCR
) & CR_R_RXSTOP
) == CR_R_RXSTOP
)
459 static void stop_nic_rxtx(void __iomem
*ioaddr
, long crvalue
)
462 iowrite32(crvalue
& ~(CR_W_RXEN
+CR_W_TXEN
), ioaddr
+ TCRRCR
);
464 if ( (ioread32(ioaddr
+ TCRRCR
) & (CR_R_RXSTOP
+CR_R_TXSTOP
))
465 == (CR_R_RXSTOP
+CR_R_TXSTOP
) )
471 static int __devinit
fealnx_init_one(struct pci_dev
*pdev
,
472 const struct pci_device_id
*ent
)
474 struct netdev_private
*np
;
475 int i
, option
, err
, irq
;
476 static int card_idx
= -1;
478 void __iomem
*ioaddr
;
480 unsigned int chip_id
= ent
->driver_data
;
481 struct net_device
*dev
;
490 /* when built into the kernel, we only print version if device is found */
492 static int printed_version
;
493 if (!printed_version
++)
498 sprintf(boardname
, "fealnx%d", card_idx
);
500 option
= card_idx
< MAX_UNITS
? options
[card_idx
] : 0;
502 i
= pci_enable_device(pdev
);
504 pci_set_master(pdev
);
506 len
= pci_resource_len(pdev
, bar
);
507 if (len
< MIN_REGION_SIZE
) {
509 "region size %ld too small, aborting\n", len
);
513 i
= pci_request_regions(pdev
, boardname
);
519 ioaddr
= pci_iomap(pdev
, bar
, len
);
525 dev
= alloc_etherdev(sizeof(struct netdev_private
));
530 SET_NETDEV_DEV(dev
, &pdev
->dev
);
532 /* read ethernet id */
533 for (i
= 0; i
< 6; ++i
)
534 dev
->dev_addr
[i
] = ioread8(ioaddr
+ PAR0
+ i
);
536 /* Reset the chip to erase previous misconfiguration. */
537 iowrite32(0x00000001, ioaddr
+ BCR
);
539 dev
->base_addr
= (unsigned long)ioaddr
;
542 /* Make certain the descriptor lists are aligned. */
543 np
= netdev_priv(dev
);
545 spin_lock_init(&np
->lock
);
547 np
->flags
= skel_netdrv_tbl
[chip_id
].flags
;
548 pci_set_drvdata(pdev
, dev
);
550 np
->mii
.mdio_read
= mdio_read
;
551 np
->mii
.mdio_write
= mdio_write
;
552 np
->mii
.phy_id_mask
= 0x1f;
553 np
->mii
.reg_num_mask
= 0x1f;
555 ring_space
= pci_alloc_consistent(pdev
, RX_TOTAL_SIZE
, &ring_dma
);
558 goto err_out_free_dev
;
560 np
->rx_ring
= (struct fealnx_desc
*)ring_space
;
561 np
->rx_ring_dma
= ring_dma
;
563 ring_space
= pci_alloc_consistent(pdev
, TX_TOTAL_SIZE
, &ring_dma
);
566 goto err_out_free_rx
;
568 np
->tx_ring
= (struct fealnx_desc
*)ring_space
;
569 np
->tx_ring_dma
= ring_dma
;
571 /* find the connected MII xcvrs */
572 if (np
->flags
== HAS_MII_XCVR
) {
573 int phy
, phy_idx
= 0;
575 for (phy
= 1; phy
< 32 && phy_idx
< 4; phy
++) {
576 int mii_status
= mdio_read(dev
, phy
, 1);
578 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
579 np
->phys
[phy_idx
++] = phy
;
581 "MII PHY found at address %d, status "
582 "0x%4.4x.\n", phy
, mii_status
);
587 data
= mdio_read(dev
, np
->phys
[0], 2);
588 if (data
== SeeqPHYID0
)
589 np
->PHYType
= SeeqPHY
;
590 else if (data
== AhdocPHYID0
)
591 np
->PHYType
= AhdocPHY
;
592 else if (data
== MarvellPHYID0
)
593 np
->PHYType
= MarvellPHY
;
594 else if (data
== MysonPHYID0
)
595 np
->PHYType
= Myson981
;
596 else if (data
== LevelOnePHYID0
)
597 np
->PHYType
= LevelOnePHY
;
599 np
->PHYType
= OtherPHY
;
604 np
->mii_cnt
= phy_idx
;
607 "MII PHY not found -- this device may "
608 "not operate correctly.\n");
611 /* 89/6/23 add, (begin) */
613 if (ioread32(ioaddr
+ PHYIDENTIFIER
) == MysonPHYID
)
614 np
->PHYType
= MysonPHY
;
616 np
->PHYType
= OtherPHY
;
618 np
->mii
.phy_id
= np
->phys
[0];
621 option
= dev
->mem_start
;
623 /* The lower four bits are the media type. */
626 np
->mii
.full_duplex
= 1;
627 np
->default_port
= option
& 15;
630 if (card_idx
< MAX_UNITS
&& full_duplex
[card_idx
] > 0)
631 np
->mii
.full_duplex
= full_duplex
[card_idx
];
633 if (np
->mii
.full_duplex
) {
634 dev_info(&pdev
->dev
, "Media type forced to Full Duplex.\n");
635 /* 89/6/13 add, (begin) */
636 // if (np->PHYType==MarvellPHY)
637 if ((np
->PHYType
== MarvellPHY
) || (np
->PHYType
== LevelOnePHY
)) {
640 data
= mdio_read(dev
, np
->phys
[0], 9);
641 data
= (data
& 0xfcff) | 0x0200;
642 mdio_write(dev
, np
->phys
[0], 9, data
);
644 /* 89/6/13 add, (end) */
645 if (np
->flags
== HAS_MII_XCVR
)
646 mdio_write(dev
, np
->phys
[0], MII_ADVERTISE
, ADVERTISE_FULL
);
648 iowrite32(ADVERTISE_FULL
, ioaddr
+ ANARANLPAR
);
649 np
->mii
.force_media
= 1;
652 /* The chip-specific entries in the device structure. */
653 dev
->open
= &netdev_open
;
654 dev
->hard_start_xmit
= &start_tx
;
655 dev
->stop
= &netdev_close
;
656 dev
->get_stats
= &get_stats
;
657 dev
->set_multicast_list
= &set_rx_mode
;
658 dev
->do_ioctl
= &mii_ioctl
;
659 dev
->ethtool_ops
= &netdev_ethtool_ops
;
660 dev
->tx_timeout
= &fealnx_tx_timeout
;
661 dev
->watchdog_timeo
= TX_TIMEOUT
;
663 err
= register_netdev(dev
);
665 goto err_out_free_tx
;
667 printk(KERN_INFO
"%s: %s at %p, %pM, IRQ %d.\n",
668 dev
->name
, skel_netdrv_tbl
[chip_id
].chip_name
, ioaddr
,
674 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
, np
->tx_ring_dma
);
676 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
, np
->rx_ring_dma
);
680 pci_iounmap(pdev
, ioaddr
);
682 pci_release_regions(pdev
);
687 static void __devexit
fealnx_remove_one(struct pci_dev
*pdev
)
689 struct net_device
*dev
= pci_get_drvdata(pdev
);
692 struct netdev_private
*np
= netdev_priv(dev
);
694 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
,
696 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
,
698 unregister_netdev(dev
);
699 pci_iounmap(pdev
, np
->mem
);
701 pci_release_regions(pdev
);
702 pci_set_drvdata(pdev
, NULL
);
704 printk(KERN_ERR
"fealnx: remove for unknown device\n");
708 static ulong
m80x_send_cmd_to_phy(void __iomem
*miiport
, int opcode
, int phyad
, int regad
)
712 unsigned int mask
, data
;
714 /* enable MII output */
715 miir
= (ulong
) ioread32(miiport
);
718 miir
|= MASK_MIIR_MII_WRITE
+ MASK_MIIR_MII_MDO
;
720 /* send 32 1's preamble */
721 for (i
= 0; i
< 32; i
++) {
722 /* low MDC; MDO is already high (miir) */
723 miir
&= ~MASK_MIIR_MII_MDC
;
724 iowrite32(miir
, miiport
);
727 miir
|= MASK_MIIR_MII_MDC
;
728 iowrite32(miir
, miiport
);
731 /* calculate ST+OP+PHYAD+REGAD+TA */
732 data
= opcode
| (phyad
<< 7) | (regad
<< 2);
737 /* low MDC, prepare MDO */
738 miir
&= ~(MASK_MIIR_MII_MDC
+ MASK_MIIR_MII_MDO
);
740 miir
|= MASK_MIIR_MII_MDO
;
742 iowrite32(miir
, miiport
);
744 miir
|= MASK_MIIR_MII_MDC
;
745 iowrite32(miir
, miiport
);
750 if (mask
== 0x2 && opcode
== OP_READ
)
751 miir
&= ~MASK_MIIR_MII_WRITE
;
757 static int mdio_read(struct net_device
*dev
, int phyad
, int regad
)
759 struct netdev_private
*np
= netdev_priv(dev
);
760 void __iomem
*miiport
= np
->mem
+ MANAGEMENT
;
762 unsigned int mask
, data
;
764 miir
= m80x_send_cmd_to_phy(miiport
, OP_READ
, phyad
, regad
);
771 miir
&= ~MASK_MIIR_MII_MDC
;
772 iowrite32(miir
, miiport
);
775 miir
= ioread32(miiport
);
776 if (miir
& MASK_MIIR_MII_MDI
)
779 /* high MDC, and wait */
780 miir
|= MASK_MIIR_MII_MDC
;
781 iowrite32(miir
, miiport
);
789 miir
&= ~MASK_MIIR_MII_MDC
;
790 iowrite32(miir
, miiport
);
792 return data
& 0xffff;
796 static void mdio_write(struct net_device
*dev
, int phyad
, int regad
, int data
)
798 struct netdev_private
*np
= netdev_priv(dev
);
799 void __iomem
*miiport
= np
->mem
+ MANAGEMENT
;
803 miir
= m80x_send_cmd_to_phy(miiport
, OP_WRITE
, phyad
, regad
);
808 /* low MDC, prepare MDO */
809 miir
&= ~(MASK_MIIR_MII_MDC
+ MASK_MIIR_MII_MDO
);
811 miir
|= MASK_MIIR_MII_MDO
;
812 iowrite32(miir
, miiport
);
815 miir
|= MASK_MIIR_MII_MDC
;
816 iowrite32(miir
, miiport
);
823 miir
&= ~MASK_MIIR_MII_MDC
;
824 iowrite32(miir
, miiport
);
828 static int netdev_open(struct net_device
*dev
)
830 struct netdev_private
*np
= netdev_priv(dev
);
831 void __iomem
*ioaddr
= np
->mem
;
834 iowrite32(0x00000001, ioaddr
+ BCR
); /* Reset */
836 if (request_irq(dev
->irq
, &intr_handler
, IRQF_SHARED
, dev
->name
, dev
))
839 for (i
= 0; i
< 3; i
++)
840 iowrite16(((unsigned short*)dev
->dev_addr
)[i
],
841 ioaddr
+ PAR0
+ i
*2);
845 iowrite32(np
->rx_ring_dma
, ioaddr
+ RXLBA
);
846 iowrite32(np
->tx_ring_dma
, ioaddr
+ TXLBA
);
848 /* Initialize other registers. */
849 /* Configure the PCI bus bursts and FIFO thresholds.
850 486: Set 8 longword burst.
861 Wait the specified 50 PCI cycles after a reset by initializing
862 Tx and Rx queues and the address filter list.
863 FIXME (Ueimor): optimistic for alpha + posted writes ? */
865 np
->bcrvalue
= 0x10; /* little-endian, 8 burst length */
867 np
->bcrvalue
|= 0x04; /* big-endian */
870 #if defined(__i386__) && !defined(MODULE)
871 if (boot_cpu_data
.x86
<= 4)
875 np
->crvalue
= 0xe00; /* rx 128 burst length */
880 // np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
881 np
->imrvalue
= TUNF
| CNTOVF
| RBU
| TI
| RI
;
882 if (np
->pci_dev
->device
== 0x891) {
883 np
->bcrvalue
|= 0x200; /* set PROG bit */
884 np
->crvalue
|= CR_W_ENH
; /* set enhanced bit */
887 iowrite32(np
->bcrvalue
, ioaddr
+ BCR
);
889 if (dev
->if_port
== 0)
890 dev
->if_port
= np
->default_port
;
892 iowrite32(0, ioaddr
+ RXPDR
);
894 // np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
895 np
->crvalue
|= 0x00e40001; /* tx store and forward, tx/rx enable */
896 np
->mii
.full_duplex
= np
->mii
.force_media
;
902 netif_start_queue(dev
);
904 /* Clear and Enable interrupts by setting the interrupt mask. */
905 iowrite32(FBE
| TUNF
| CNTOVF
| RBU
| TI
| RI
, ioaddr
+ ISR
);
906 iowrite32(np
->imrvalue
, ioaddr
+ IMR
);
909 printk(KERN_DEBUG
"%s: Done netdev_open().\n", dev
->name
);
911 /* Set the timer to check for link beat. */
912 init_timer(&np
->timer
);
913 np
->timer
.expires
= RUN_AT(3 * HZ
);
914 np
->timer
.data
= (unsigned long) dev
;
915 np
->timer
.function
= &netdev_timer
;
918 add_timer(&np
->timer
);
920 init_timer(&np
->reset_timer
);
921 np
->reset_timer
.data
= (unsigned long) dev
;
922 np
->reset_timer
.function
= &reset_timer
;
923 np
->reset_timer_armed
= 0;
929 static void getlinkstatus(struct net_device
*dev
)
930 /* function: Routine will read MII Status Register to get link status. */
931 /* input : dev... pointer to the adapter block. */
934 struct netdev_private
*np
= netdev_priv(dev
);
935 unsigned int i
, DelayTime
= 0x1000;
939 if (np
->PHYType
== MysonPHY
) {
940 for (i
= 0; i
< DelayTime
; ++i
) {
941 if (ioread32(np
->mem
+ BMCRSR
) & LinkIsUp2
) {
948 for (i
= 0; i
< DelayTime
; ++i
) {
949 if (mdio_read(dev
, np
->phys
[0], MII_BMSR
) & BMSR_LSTATUS
) {
959 static void getlinktype(struct net_device
*dev
)
961 struct netdev_private
*np
= netdev_priv(dev
);
963 if (np
->PHYType
== MysonPHY
) { /* 3-in-1 case */
964 if (ioread32(np
->mem
+ TCRRCR
) & CR_R_FD
)
965 np
->duplexmode
= 2; /* full duplex */
967 np
->duplexmode
= 1; /* half duplex */
968 if (ioread32(np
->mem
+ TCRRCR
) & CR_R_PS10
)
969 np
->line_speed
= 1; /* 10M */
971 np
->line_speed
= 2; /* 100M */
973 if (np
->PHYType
== SeeqPHY
) { /* this PHY is SEEQ 80225 */
976 data
= mdio_read(dev
, np
->phys
[0], MIIRegister18
);
977 if (data
& SPD_DET_100
)
978 np
->line_speed
= 2; /* 100M */
980 np
->line_speed
= 1; /* 10M */
981 if (data
& DPLX_DET_FULL
)
982 np
->duplexmode
= 2; /* full duplex mode */
984 np
->duplexmode
= 1; /* half duplex mode */
985 } else if (np
->PHYType
== AhdocPHY
) {
988 data
= mdio_read(dev
, np
->phys
[0], DiagnosticReg
);
989 if (data
& Speed_100
)
990 np
->line_speed
= 2; /* 100M */
992 np
->line_speed
= 1; /* 10M */
993 if (data
& DPLX_FULL
)
994 np
->duplexmode
= 2; /* full duplex mode */
996 np
->duplexmode
= 1; /* half duplex mode */
998 /* 89/6/13 add, (begin) */
999 else if (np
->PHYType
== MarvellPHY
) {
1002 data
= mdio_read(dev
, np
->phys
[0], SpecificReg
);
1003 if (data
& Full_Duplex
)
1004 np
->duplexmode
= 2; /* full duplex mode */
1006 np
->duplexmode
= 1; /* half duplex mode */
1008 if (data
== Speed_1000M
)
1009 np
->line_speed
= 3; /* 1000M */
1010 else if (data
== Speed_100M
)
1011 np
->line_speed
= 2; /* 100M */
1013 np
->line_speed
= 1; /* 10M */
1015 /* 89/6/13 add, (end) */
1016 /* 89/7/27 add, (begin) */
1017 else if (np
->PHYType
== Myson981
) {
1020 data
= mdio_read(dev
, np
->phys
[0], StatusRegister
);
1022 if (data
& SPEED100
)
1027 if (data
& FULLMODE
)
1032 /* 89/7/27 add, (end) */
1034 else if (np
->PHYType
== LevelOnePHY
) {
1037 data
= mdio_read(dev
, np
->phys
[0], SpecificReg
);
1038 if (data
& LXT1000_Full
)
1039 np
->duplexmode
= 2; /* full duplex mode */
1041 np
->duplexmode
= 1; /* half duplex mode */
1043 if (data
== LXT1000_1000M
)
1044 np
->line_speed
= 3; /* 1000M */
1045 else if (data
== LXT1000_100M
)
1046 np
->line_speed
= 2; /* 100M */
1048 np
->line_speed
= 1; /* 10M */
1050 np
->crvalue
&= (~CR_W_PS10
) & (~CR_W_FD
) & (~CR_W_PS1000
);
1051 if (np
->line_speed
== 1)
1052 np
->crvalue
|= CR_W_PS10
;
1053 else if (np
->line_speed
== 3)
1054 np
->crvalue
|= CR_W_PS1000
;
1055 if (np
->duplexmode
== 2)
1056 np
->crvalue
|= CR_W_FD
;
1061 /* Take lock before calling this */
1062 static void allocate_rx_buffers(struct net_device
*dev
)
1064 struct netdev_private
*np
= netdev_priv(dev
);
1066 /* allocate skb for rx buffers */
1067 while (np
->really_rx_count
!= RX_RING_SIZE
) {
1068 struct sk_buff
*skb
;
1070 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1072 break; /* Better luck next round. */
1074 while (np
->lack_rxbuf
->skbuff
)
1075 np
->lack_rxbuf
= np
->lack_rxbuf
->next_desc_logical
;
1077 skb
->dev
= dev
; /* Mark as being used by this device. */
1078 np
->lack_rxbuf
->skbuff
= skb
;
1079 np
->lack_rxbuf
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1080 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1081 np
->lack_rxbuf
->status
= RXOWN
;
1082 ++np
->really_rx_count
;
1087 static void netdev_timer(unsigned long data
)
1089 struct net_device
*dev
= (struct net_device
*) data
;
1090 struct netdev_private
*np
= netdev_priv(dev
);
1091 void __iomem
*ioaddr
= np
->mem
;
1092 int old_crvalue
= np
->crvalue
;
1093 unsigned int old_linkok
= np
->linkok
;
1094 unsigned long flags
;
1097 printk(KERN_DEBUG
"%s: Media selection timer tick, status %8.8x "
1098 "config %8.8x.\n", dev
->name
, ioread32(ioaddr
+ ISR
),
1099 ioread32(ioaddr
+ TCRRCR
));
1101 spin_lock_irqsave(&np
->lock
, flags
);
1103 if (np
->flags
== HAS_MII_XCVR
) {
1105 if ((old_linkok
== 0) && (np
->linkok
== 1)) { /* we need to detect the media type again */
1107 if (np
->crvalue
!= old_crvalue
) {
1108 stop_nic_rxtx(ioaddr
, np
->crvalue
);
1109 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1114 allocate_rx_buffers(dev
);
1116 spin_unlock_irqrestore(&np
->lock
, flags
);
1118 np
->timer
.expires
= RUN_AT(10 * HZ
);
1119 add_timer(&np
->timer
);
1123 /* Take lock before calling */
1124 /* Reset chip and disable rx, tx and interrupts */
1125 static void reset_and_disable_rxtx(struct net_device
*dev
)
1127 struct netdev_private
*np
= netdev_priv(dev
);
1128 void __iomem
*ioaddr
= np
->mem
;
1131 /* Reset the chip's Tx and Rx processes. */
1132 stop_nic_rxtx(ioaddr
, 0);
1134 /* Disable interrupts by clearing the interrupt mask. */
1135 iowrite32(0, ioaddr
+ IMR
);
1137 /* Reset the chip to erase previous misconfiguration. */
1138 iowrite32(0x00000001, ioaddr
+ BCR
);
1140 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1141 We surely wait too long (address+data phase). Who cares? */
1143 ioread32(ioaddr
+ BCR
);
1149 /* Take lock before calling */
1150 /* Restore chip after reset */
1151 static void enable_rxtx(struct net_device
*dev
)
1153 struct netdev_private
*np
= netdev_priv(dev
);
1154 void __iomem
*ioaddr
= np
->mem
;
1156 reset_rx_descriptors(dev
);
1158 iowrite32(np
->tx_ring_dma
+ ((char*)np
->cur_tx
- (char*)np
->tx_ring
),
1160 iowrite32(np
->rx_ring_dma
+ ((char*)np
->cur_rx
- (char*)np
->rx_ring
),
1163 iowrite32(np
->bcrvalue
, ioaddr
+ BCR
);
1165 iowrite32(0, ioaddr
+ RXPDR
);
1166 __set_rx_mode(dev
); /* changes np->crvalue, writes it into TCRRCR */
1168 /* Clear and Enable interrupts by setting the interrupt mask. */
1169 iowrite32(FBE
| TUNF
| CNTOVF
| RBU
| TI
| RI
, ioaddr
+ ISR
);
1170 iowrite32(np
->imrvalue
, ioaddr
+ IMR
);
1172 iowrite32(0, ioaddr
+ TXPDR
);
1176 static void reset_timer(unsigned long data
)
1178 struct net_device
*dev
= (struct net_device
*) data
;
1179 struct netdev_private
*np
= netdev_priv(dev
);
1180 unsigned long flags
;
1182 printk(KERN_WARNING
"%s: resetting tx and rx machinery\n", dev
->name
);
1184 spin_lock_irqsave(&np
->lock
, flags
);
1185 np
->crvalue
= np
->crvalue_sv
;
1186 np
->imrvalue
= np
->imrvalue_sv
;
1188 reset_and_disable_rxtx(dev
);
1189 /* works for me without this:
1190 reset_tx_descriptors(dev); */
1192 netif_start_queue(dev
); /* FIXME: or netif_wake_queue(dev); ? */
1194 np
->reset_timer_armed
= 0;
1196 spin_unlock_irqrestore(&np
->lock
, flags
);
1200 static void fealnx_tx_timeout(struct net_device
*dev
)
1202 struct netdev_private
*np
= netdev_priv(dev
);
1203 void __iomem
*ioaddr
= np
->mem
;
1204 unsigned long flags
;
1207 printk(KERN_WARNING
"%s: Transmit timed out, status %8.8x,"
1208 " resetting...\n", dev
->name
, ioread32(ioaddr
+ ISR
));
1211 printk(KERN_DEBUG
" Rx ring %p: ", np
->rx_ring
);
1212 for (i
= 0; i
< RX_RING_SIZE
; i
++)
1213 printk(" %8.8x", (unsigned int) np
->rx_ring
[i
].status
);
1214 printk("\n" KERN_DEBUG
" Tx ring %p: ", np
->tx_ring
);
1215 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1216 printk(" %4.4x", np
->tx_ring
[i
].status
);
1220 spin_lock_irqsave(&np
->lock
, flags
);
1222 reset_and_disable_rxtx(dev
);
1223 reset_tx_descriptors(dev
);
1226 spin_unlock_irqrestore(&np
->lock
, flags
);
1228 dev
->trans_start
= jiffies
;
1229 np
->stats
.tx_errors
++;
1230 netif_wake_queue(dev
); /* or .._start_.. ?? */
1234 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1235 static void init_ring(struct net_device
*dev
)
1237 struct netdev_private
*np
= netdev_priv(dev
);
1240 /* initialize rx variables */
1241 np
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
1242 np
->cur_rx
= &np
->rx_ring
[0];
1243 np
->lack_rxbuf
= np
->rx_ring
;
1244 np
->really_rx_count
= 0;
1246 /* initial rx descriptors. */
1247 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1248 np
->rx_ring
[i
].status
= 0;
1249 np
->rx_ring
[i
].control
= np
->rx_buf_sz
<< RBSShift
;
1250 np
->rx_ring
[i
].next_desc
= np
->rx_ring_dma
+
1251 (i
+ 1)*sizeof(struct fealnx_desc
);
1252 np
->rx_ring
[i
].next_desc_logical
= &np
->rx_ring
[i
+ 1];
1253 np
->rx_ring
[i
].skbuff
= NULL
;
1256 /* for the last rx descriptor */
1257 np
->rx_ring
[i
- 1].next_desc
= np
->rx_ring_dma
;
1258 np
->rx_ring
[i
- 1].next_desc_logical
= np
->rx_ring
;
1260 /* allocate skb for rx buffers */
1261 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1262 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
1265 np
->lack_rxbuf
= &np
->rx_ring
[i
];
1269 ++np
->really_rx_count
;
1270 np
->rx_ring
[i
].skbuff
= skb
;
1271 skb
->dev
= dev
; /* Mark as being used by this device. */
1272 np
->rx_ring
[i
].buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1273 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1274 np
->rx_ring
[i
].status
= RXOWN
;
1275 np
->rx_ring
[i
].control
|= RXIC
;
1278 /* initialize tx variables */
1279 np
->cur_tx
= &np
->tx_ring
[0];
1280 np
->cur_tx_copy
= &np
->tx_ring
[0];
1281 np
->really_tx_count
= 0;
1282 np
->free_tx_count
= TX_RING_SIZE
;
1284 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1285 np
->tx_ring
[i
].status
= 0;
1286 /* do we need np->tx_ring[i].control = XXX; ?? */
1287 np
->tx_ring
[i
].next_desc
= np
->tx_ring_dma
+
1288 (i
+ 1)*sizeof(struct fealnx_desc
);
1289 np
->tx_ring
[i
].next_desc_logical
= &np
->tx_ring
[i
+ 1];
1290 np
->tx_ring
[i
].skbuff
= NULL
;
1293 /* for the last tx descriptor */
1294 np
->tx_ring
[i
- 1].next_desc
= np
->tx_ring_dma
;
1295 np
->tx_ring
[i
- 1].next_desc_logical
= &np
->tx_ring
[0];
1299 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1301 struct netdev_private
*np
= netdev_priv(dev
);
1302 unsigned long flags
;
1304 spin_lock_irqsave(&np
->lock
, flags
);
1306 np
->cur_tx_copy
->skbuff
= skb
;
1310 #if defined(one_buffer)
1311 np
->cur_tx_copy
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1312 skb
->len
, PCI_DMA_TODEVICE
);
1313 np
->cur_tx_copy
->control
= TXIC
| TXLD
| TXFD
| CRCEnable
| PADEnable
;
1314 np
->cur_tx_copy
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1315 np
->cur_tx_copy
->control
|= (skb
->len
<< TBSShift
); /* buffer size */
1317 if (np
->pci_dev
->device
== 0x891)
1318 np
->cur_tx_copy
->control
|= ETIControl
| RetryTxLC
;
1319 np
->cur_tx_copy
->status
= TXOWN
;
1320 np
->cur_tx_copy
= np
->cur_tx_copy
->next_desc_logical
;
1321 --np
->free_tx_count
;
1322 #elif defined(two_buffer)
1323 if (skb
->len
> BPT
) {
1324 struct fealnx_desc
*next
;
1326 /* for the first descriptor */
1327 np
->cur_tx_copy
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1328 BPT
, PCI_DMA_TODEVICE
);
1329 np
->cur_tx_copy
->control
= TXIC
| TXFD
| CRCEnable
| PADEnable
;
1330 np
->cur_tx_copy
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1331 np
->cur_tx_copy
->control
|= (BPT
<< TBSShift
); /* buffer size */
1333 /* for the last descriptor */
1334 next
= np
->cur_tx_copy
->next_desc_logical
;
1336 next
->control
= TXIC
| TXLD
| CRCEnable
| PADEnable
;
1337 next
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1338 next
->control
|= ((skb
->len
- BPT
) << TBSShift
); /* buf size */
1340 if (np
->pci_dev
->device
== 0x891)
1341 np
->cur_tx_copy
->control
|= ETIControl
| RetryTxLC
;
1342 next
->buffer
= pci_map_single(ep
->pci_dev
, skb
->data
+ BPT
,
1343 skb
->len
- BPT
, PCI_DMA_TODEVICE
);
1345 next
->status
= TXOWN
;
1346 np
->cur_tx_copy
->status
= TXOWN
;
1348 np
->cur_tx_copy
= next
->next_desc_logical
;
1349 np
->free_tx_count
-= 2;
1351 np
->cur_tx_copy
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1352 skb
->len
, PCI_DMA_TODEVICE
);
1353 np
->cur_tx_copy
->control
= TXIC
| TXLD
| TXFD
| CRCEnable
| PADEnable
;
1354 np
->cur_tx_copy
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1355 np
->cur_tx_copy
->control
|= (skb
->len
<< TBSShift
); /* buffer size */
1357 if (np
->pci_dev
->device
== 0x891)
1358 np
->cur_tx_copy
->control
|= ETIControl
| RetryTxLC
;
1359 np
->cur_tx_copy
->status
= TXOWN
;
1360 np
->cur_tx_copy
= np
->cur_tx_copy
->next_desc_logical
;
1361 --np
->free_tx_count
;
1365 if (np
->free_tx_count
< 2)
1366 netif_stop_queue(dev
);
1367 ++np
->really_tx_count
;
1368 iowrite32(0, np
->mem
+ TXPDR
);
1369 dev
->trans_start
= jiffies
;
1371 spin_unlock_irqrestore(&np
->lock
, flags
);
1376 /* Take lock before calling */
1377 /* Chip probably hosed tx ring. Clean up. */
1378 static void reset_tx_descriptors(struct net_device
*dev
)
1380 struct netdev_private
*np
= netdev_priv(dev
);
1381 struct fealnx_desc
*cur
;
1384 /* initialize tx variables */
1385 np
->cur_tx
= &np
->tx_ring
[0];
1386 np
->cur_tx_copy
= &np
->tx_ring
[0];
1387 np
->really_tx_count
= 0;
1388 np
->free_tx_count
= TX_RING_SIZE
;
1390 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1391 cur
= &np
->tx_ring
[i
];
1393 pci_unmap_single(np
->pci_dev
, cur
->buffer
,
1394 cur
->skbuff
->len
, PCI_DMA_TODEVICE
);
1395 dev_kfree_skb_any(cur
->skbuff
);
1399 cur
->control
= 0; /* needed? */
1400 /* probably not needed. We do it for purely paranoid reasons */
1401 cur
->next_desc
= np
->tx_ring_dma
+
1402 (i
+ 1)*sizeof(struct fealnx_desc
);
1403 cur
->next_desc_logical
= &np
->tx_ring
[i
+ 1];
1405 /* for the last tx descriptor */
1406 np
->tx_ring
[TX_RING_SIZE
- 1].next_desc
= np
->tx_ring_dma
;
1407 np
->tx_ring
[TX_RING_SIZE
- 1].next_desc_logical
= &np
->tx_ring
[0];
1411 /* Take lock and stop rx before calling this */
1412 static void reset_rx_descriptors(struct net_device
*dev
)
1414 struct netdev_private
*np
= netdev_priv(dev
);
1415 struct fealnx_desc
*cur
= np
->cur_rx
;
1418 allocate_rx_buffers(dev
);
1420 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1422 cur
->status
= RXOWN
;
1423 cur
= cur
->next_desc_logical
;
1426 iowrite32(np
->rx_ring_dma
+ ((char*)np
->cur_rx
- (char*)np
->rx_ring
),
1431 /* The interrupt handler does all of the Rx thread work and cleans up
1432 after the Tx thread. */
1433 static irqreturn_t
intr_handler(int irq
, void *dev_instance
)
1435 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1436 struct netdev_private
*np
= netdev_priv(dev
);
1437 void __iomem
*ioaddr
= np
->mem
;
1438 long boguscnt
= max_interrupt_work
;
1439 unsigned int num_tx
= 0;
1442 spin_lock(&np
->lock
);
1444 iowrite32(0, ioaddr
+ IMR
);
1447 u32 intr_status
= ioread32(ioaddr
+ ISR
);
1449 /* Acknowledge all of the current interrupt sources ASAP. */
1450 iowrite32(intr_status
, ioaddr
+ ISR
);
1453 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n", dev
->name
,
1456 if (!(intr_status
& np
->imrvalue
))
1463 // if (intr_status & FBE)
1464 // { /* fatal error */
1465 // stop_nic_tx(ioaddr, 0);
1466 // stop_nic_rx(ioaddr, 0);
1470 if (intr_status
& TUNF
)
1471 iowrite32(0, ioaddr
+ TXPDR
);
1473 if (intr_status
& CNTOVF
) {
1475 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ TALLY
) & 0x7fff;
1478 np
->stats
.rx_crc_errors
+=
1479 (ioread32(ioaddr
+ TALLY
) & 0x7fff0000) >> 16;
1482 if (intr_status
& (RI
| RBU
)) {
1483 if (intr_status
& RI
)
1486 stop_nic_rx(ioaddr
, np
->crvalue
);
1487 reset_rx_descriptors(dev
);
1488 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1492 while (np
->really_tx_count
) {
1493 long tx_status
= np
->cur_tx
->status
;
1494 long tx_control
= np
->cur_tx
->control
;
1496 if (!(tx_control
& TXLD
)) { /* this pkt is combined by two tx descriptors */
1497 struct fealnx_desc
*next
;
1499 next
= np
->cur_tx
->next_desc_logical
;
1500 tx_status
= next
->status
;
1501 tx_control
= next
->control
;
1504 if (tx_status
& TXOWN
)
1507 if (!(np
->crvalue
& CR_W_ENH
)) {
1508 if (tx_status
& (CSL
| LC
| EC
| UDF
| HF
)) {
1509 np
->stats
.tx_errors
++;
1511 np
->stats
.tx_aborted_errors
++;
1512 if (tx_status
& CSL
)
1513 np
->stats
.tx_carrier_errors
++;
1515 np
->stats
.tx_window_errors
++;
1516 if (tx_status
& UDF
)
1517 np
->stats
.tx_fifo_errors
++;
1518 if ((tx_status
& HF
) && np
->mii
.full_duplex
== 0)
1519 np
->stats
.tx_heartbeat_errors
++;
1522 np
->stats
.tx_bytes
+=
1523 ((tx_control
& PKTSMask
) >> PKTSShift
);
1525 np
->stats
.collisions
+=
1526 ((tx_status
& NCRMask
) >> NCRShift
);
1527 np
->stats
.tx_packets
++;
1530 np
->stats
.tx_bytes
+=
1531 ((tx_control
& PKTSMask
) >> PKTSShift
);
1532 np
->stats
.tx_packets
++;
1535 /* Free the original skb. */
1536 pci_unmap_single(np
->pci_dev
, np
->cur_tx
->buffer
,
1537 np
->cur_tx
->skbuff
->len
, PCI_DMA_TODEVICE
);
1538 dev_kfree_skb_irq(np
->cur_tx
->skbuff
);
1539 np
->cur_tx
->skbuff
= NULL
;
1540 --np
->really_tx_count
;
1541 if (np
->cur_tx
->control
& TXLD
) {
1542 np
->cur_tx
= np
->cur_tx
->next_desc_logical
;
1543 ++np
->free_tx_count
;
1545 np
->cur_tx
= np
->cur_tx
->next_desc_logical
;
1546 np
->cur_tx
= np
->cur_tx
->next_desc_logical
;
1547 np
->free_tx_count
+= 2;
1550 } /* end of for loop */
1552 if (num_tx
&& np
->free_tx_count
>= 2)
1553 netif_wake_queue(dev
);
1555 /* read transmit status for enhanced mode only */
1556 if (np
->crvalue
& CR_W_ENH
) {
1559 data
= ioread32(ioaddr
+ TSR
);
1560 np
->stats
.tx_errors
+= (data
& 0xff000000) >> 24;
1561 np
->stats
.tx_aborted_errors
+= (data
& 0xff000000) >> 24;
1562 np
->stats
.tx_window_errors
+= (data
& 0x00ff0000) >> 16;
1563 np
->stats
.collisions
+= (data
& 0x0000ffff);
1566 if (--boguscnt
< 0) {
1567 printk(KERN_WARNING
"%s: Too much work at interrupt, "
1568 "status=0x%4.4x.\n", dev
->name
, intr_status
);
1569 if (!np
->reset_timer_armed
) {
1570 np
->reset_timer_armed
= 1;
1571 np
->reset_timer
.expires
= RUN_AT(HZ
/2);
1572 add_timer(&np
->reset_timer
);
1573 stop_nic_rxtx(ioaddr
, 0);
1574 netif_stop_queue(dev
);
1575 /* or netif_tx_disable(dev); ?? */
1576 /* Prevent other paths from enabling tx,rx,intrs */
1577 np
->crvalue_sv
= np
->crvalue
;
1578 np
->imrvalue_sv
= np
->imrvalue
;
1579 np
->crvalue
&= ~(CR_W_TXEN
| CR_W_RXEN
); /* or simply = 0? */
1587 /* read the tally counters */
1589 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ TALLY
) & 0x7fff;
1592 np
->stats
.rx_crc_errors
+= (ioread32(ioaddr
+ TALLY
) & 0x7fff0000) >> 16;
1595 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
1596 dev
->name
, ioread32(ioaddr
+ ISR
));
1598 iowrite32(np
->imrvalue
, ioaddr
+ IMR
);
1600 spin_unlock(&np
->lock
);
1602 return IRQ_RETVAL(handled
);
1606 /* This routine is logically part of the interrupt handler, but separated
1607 for clarity and better register allocation. */
1608 static int netdev_rx(struct net_device
*dev
)
1610 struct netdev_private
*np
= netdev_priv(dev
);
1611 void __iomem
*ioaddr
= np
->mem
;
1613 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1614 while (!(np
->cur_rx
->status
& RXOWN
) && np
->cur_rx
->skbuff
) {
1615 s32 rx_status
= np
->cur_rx
->status
;
1617 if (np
->really_rx_count
== 0)
1621 printk(KERN_DEBUG
" netdev_rx() status was %8.8x.\n", rx_status
);
1623 if ((!((rx_status
& RXFSD
) && (rx_status
& RXLSD
)))
1624 || (rx_status
& ErrorSummary
)) {
1625 if (rx_status
& ErrorSummary
) { /* there was a fatal error */
1628 "%s: Receive error, Rx status %8.8x.\n",
1629 dev
->name
, rx_status
);
1631 np
->stats
.rx_errors
++; /* end of a packet. */
1632 if (rx_status
& (LONG
| RUNT
))
1633 np
->stats
.rx_length_errors
++;
1634 if (rx_status
& RXER
)
1635 np
->stats
.rx_frame_errors
++;
1636 if (rx_status
& CRC
)
1637 np
->stats
.rx_crc_errors
++;
1639 int need_to_reset
= 0;
1642 if (rx_status
& RXFSD
) { /* this pkt is too long, over one rx buffer */
1643 struct fealnx_desc
*cur
;
1645 /* check this packet is received completely? */
1647 while (desno
<= np
->really_rx_count
) {
1649 if ((!(cur
->status
& RXOWN
))
1650 && (cur
->status
& RXLSD
))
1652 /* goto next rx descriptor */
1653 cur
= cur
->next_desc_logical
;
1655 if (desno
> np
->really_rx_count
)
1657 } else /* RXLSD did not find, something error */
1660 if (need_to_reset
== 0) {
1663 np
->stats
.rx_length_errors
++;
1665 /* free all rx descriptors related this long pkt */
1666 for (i
= 0; i
< desno
; ++i
) {
1667 if (!np
->cur_rx
->skbuff
) {
1669 "%s: I'm scared\n", dev
->name
);
1672 np
->cur_rx
->status
= RXOWN
;
1673 np
->cur_rx
= np
->cur_rx
->next_desc_logical
;
1676 } else { /* rx error, need to reset this chip */
1677 stop_nic_rx(ioaddr
, np
->crvalue
);
1678 reset_rx_descriptors(dev
);
1679 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1681 break; /* exit the while loop */
1683 } else { /* this received pkt is ok */
1685 struct sk_buff
*skb
;
1686 /* Omit the four octet CRC from the length. */
1687 short pkt_len
= ((rx_status
& FLNGMASK
) >> FLNGShift
) - 4;
1689 #ifndef final_version
1691 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d"
1692 " status %x.\n", pkt_len
, rx_status
);
1695 /* Check if the packet is long enough to accept without copying
1696 to a minimally-sized skbuff. */
1697 if (pkt_len
< rx_copybreak
&&
1698 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1699 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1700 pci_dma_sync_single_for_cpu(np
->pci_dev
,
1703 PCI_DMA_FROMDEVICE
);
1704 /* Call copy + cksum if available. */
1706 #if ! defined(__alpha__)
1707 skb_copy_to_linear_data(skb
,
1708 np
->cur_rx
->skbuff
->data
, pkt_len
);
1709 skb_put(skb
, pkt_len
);
1711 memcpy(skb_put(skb
, pkt_len
),
1712 np
->cur_rx
->skbuff
->data
, pkt_len
);
1714 pci_dma_sync_single_for_device(np
->pci_dev
,
1717 PCI_DMA_FROMDEVICE
);
1719 pci_unmap_single(np
->pci_dev
,
1722 PCI_DMA_FROMDEVICE
);
1723 skb_put(skb
= np
->cur_rx
->skbuff
, pkt_len
);
1724 np
->cur_rx
->skbuff
= NULL
;
1725 --np
->really_rx_count
;
1727 skb
->protocol
= eth_type_trans(skb
, dev
);
1729 np
->stats
.rx_packets
++;
1730 np
->stats
.rx_bytes
+= pkt_len
;
1733 np
->cur_rx
= np
->cur_rx
->next_desc_logical
;
1734 } /* end of while loop */
1736 /* allocate skb for rx buffers */
1737 allocate_rx_buffers(dev
);
1743 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1745 struct netdev_private
*np
= netdev_priv(dev
);
1746 void __iomem
*ioaddr
= np
->mem
;
1748 /* The chip only need report frame silently dropped. */
1749 if (netif_running(dev
)) {
1750 np
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ TALLY
) & 0x7fff;
1751 np
->stats
.rx_crc_errors
+= (ioread32(ioaddr
+ TALLY
) & 0x7fff0000) >> 16;
1758 /* for dev->set_multicast_list */
1759 static void set_rx_mode(struct net_device
*dev
)
1761 spinlock_t
*lp
= &((struct netdev_private
*)netdev_priv(dev
))->lock
;
1762 unsigned long flags
;
1763 spin_lock_irqsave(lp
, flags
);
1765 spin_unlock_irqrestore(lp
, flags
);
1769 /* Take lock before calling */
1770 static void __set_rx_mode(struct net_device
*dev
)
1772 struct netdev_private
*np
= netdev_priv(dev
);
1773 void __iomem
*ioaddr
= np
->mem
;
1774 u32 mc_filter
[2]; /* Multicast hash filter */
1777 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1778 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1779 rx_mode
= CR_W_PROM
| CR_W_AB
| CR_W_AM
;
1780 } else if ((dev
->mc_count
> multicast_filter_limit
)
1781 || (dev
->flags
& IFF_ALLMULTI
)) {
1782 /* Too many to match, or accept all multicasts. */
1783 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1784 rx_mode
= CR_W_AB
| CR_W_AM
;
1786 struct dev_mc_list
*mclist
;
1789 memset(mc_filter
, 0, sizeof(mc_filter
));
1790 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1791 i
++, mclist
= mclist
->next
) {
1793 bit
= (ether_crc(ETH_ALEN
, mclist
->dmi_addr
) >> 26) ^ 0x3F;
1794 mc_filter
[bit
>> 5] |= (1 << bit
);
1796 rx_mode
= CR_W_AB
| CR_W_AM
;
1799 stop_nic_rxtx(ioaddr
, np
->crvalue
);
1801 iowrite32(mc_filter
[0], ioaddr
+ MAR0
);
1802 iowrite32(mc_filter
[1], ioaddr
+ MAR1
);
1803 np
->crvalue
&= ~CR_W_RXMODEMASK
;
1804 np
->crvalue
|= rx_mode
;
1805 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1808 static void netdev_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1810 struct netdev_private
*np
= netdev_priv(dev
);
1812 strcpy(info
->driver
, DRV_NAME
);
1813 strcpy(info
->version
, DRV_VERSION
);
1814 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
1817 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1819 struct netdev_private
*np
= netdev_priv(dev
);
1822 spin_lock_irq(&np
->lock
);
1823 rc
= mii_ethtool_gset(&np
->mii
, cmd
);
1824 spin_unlock_irq(&np
->lock
);
1829 static int netdev_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1831 struct netdev_private
*np
= netdev_priv(dev
);
1834 spin_lock_irq(&np
->lock
);
1835 rc
= mii_ethtool_sset(&np
->mii
, cmd
);
1836 spin_unlock_irq(&np
->lock
);
1841 static int netdev_nway_reset(struct net_device
*dev
)
1843 struct netdev_private
*np
= netdev_priv(dev
);
1844 return mii_nway_restart(&np
->mii
);
1847 static u32
netdev_get_link(struct net_device
*dev
)
1849 struct netdev_private
*np
= netdev_priv(dev
);
1850 return mii_link_ok(&np
->mii
);
1853 static u32
netdev_get_msglevel(struct net_device
*dev
)
1858 static void netdev_set_msglevel(struct net_device
*dev
, u32 value
)
1863 static const struct ethtool_ops netdev_ethtool_ops
= {
1864 .get_drvinfo
= netdev_get_drvinfo
,
1865 .get_settings
= netdev_get_settings
,
1866 .set_settings
= netdev_set_settings
,
1867 .nway_reset
= netdev_nway_reset
,
1868 .get_link
= netdev_get_link
,
1869 .get_msglevel
= netdev_get_msglevel
,
1870 .set_msglevel
= netdev_set_msglevel
,
1873 static int mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1875 struct netdev_private
*np
= netdev_priv(dev
);
1878 if (!netif_running(dev
))
1881 spin_lock_irq(&np
->lock
);
1882 rc
= generic_mii_ioctl(&np
->mii
, if_mii(rq
), cmd
, NULL
);
1883 spin_unlock_irq(&np
->lock
);
1889 static int netdev_close(struct net_device
*dev
)
1891 struct netdev_private
*np
= netdev_priv(dev
);
1892 void __iomem
*ioaddr
= np
->mem
;
1895 netif_stop_queue(dev
);
1897 /* Disable interrupts by clearing the interrupt mask. */
1898 iowrite32(0x0000, ioaddr
+ IMR
);
1900 /* Stop the chip's Tx and Rx processes. */
1901 stop_nic_rxtx(ioaddr
, 0);
1903 del_timer_sync(&np
->timer
);
1904 del_timer_sync(&np
->reset_timer
);
1906 free_irq(dev
->irq
, dev
);
1908 /* Free all the skbuffs in the Rx queue. */
1909 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1910 struct sk_buff
*skb
= np
->rx_ring
[i
].skbuff
;
1912 np
->rx_ring
[i
].status
= 0;
1914 pci_unmap_single(np
->pci_dev
, np
->rx_ring
[i
].buffer
,
1915 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1917 np
->rx_ring
[i
].skbuff
= NULL
;
1921 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1922 struct sk_buff
*skb
= np
->tx_ring
[i
].skbuff
;
1925 pci_unmap_single(np
->pci_dev
, np
->tx_ring
[i
].buffer
,
1926 skb
->len
, PCI_DMA_TODEVICE
);
1928 np
->tx_ring
[i
].skbuff
= NULL
;
1935 static struct pci_device_id fealnx_pci_tbl
[] = {
1936 {0x1516, 0x0800, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
1937 {0x1516, 0x0803, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 1},
1938 {0x1516, 0x0891, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 2},
1939 {} /* terminate list */
1941 MODULE_DEVICE_TABLE(pci
, fealnx_pci_tbl
);
1944 static struct pci_driver fealnx_driver
= {
1946 .id_table
= fealnx_pci_tbl
,
1947 .probe
= fealnx_init_one
,
1948 .remove
= __devexit_p(fealnx_remove_one
),
1951 static int __init
fealnx_init(void)
1953 /* when a module, this is printed whether or not devices are found in probe */
1958 return pci_register_driver(&fealnx_driver
);
1961 static void __exit
fealnx_exit(void)
1963 pci_unregister_driver(&fealnx_driver
);
1966 module_init(fealnx_init
);
1967 module_exit(fealnx_exit
);