1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver.
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
14 * - Now that the driver was significantly simplified, I need to rework
15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably
16 * can avoid taking most of them for so long period of time (and schedule
17 * instead). The main issues at this point are caused by the netdev layer
20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
21 * help by net/core/dev.c, thus they can't schedule. That means they can't
22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock
23 * where it could have been dropped. change_mtu especially would love also to
24 * be able to msleep instead of horrid locked delays when resetting the HW,
25 * but that read_lock() makes it impossible, unless I defer it's action to
26 * the reset task, which means it'll be asynchronous (won't take effect until
27 * the system schedules a bit).
29 * Also, it would probably be possible to also remove most of the long-life
30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful
31 * about when we can start taking interrupts or get xmit() called...
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/interrupt.h>
39 #include <linux/ioport.h>
41 #include <linux/slab.h>
42 #include <linux/string.h>
43 #include <linux/delay.h>
44 #include <linux/init.h>
45 #include <linux/errno.h>
46 #include <linux/pci.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
51 #include <linux/mii.h>
52 #include <linux/ethtool.h>
53 #include <linux/crc32.h>
54 #include <linux/random.h>
55 #include <linux/workqueue.h>
56 #include <linux/if_vlan.h>
57 #include <linux/bitops.h>
58 #include <linux/mutex.h>
61 #include <asm/system.h>
63 #include <asm/byteorder.h>
64 #include <asm/uaccess.h>
68 #include <asm/idprom.h>
69 #include <asm/openprom.h>
70 #include <asm/oplib.h>
74 #ifdef CONFIG_PPC_PMAC
75 #include <asm/pci-bridge.h>
77 #include <asm/machdep.h>
78 #include <asm/pmac_feature.h>
81 #include "sungem_phy.h"
84 /* Stripping FCS is causing problems, disabled for now */
87 #define DEFAULT_MSG (NETIF_MSG_DRV | \
91 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
92 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
93 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
94 SUPPORTED_Pause | SUPPORTED_Autoneg)
96 #define DRV_NAME "sungem"
97 #define DRV_VERSION "0.98"
98 #define DRV_RELDATE "8/24/03"
99 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
101 static char version
[] __devinitdata
=
102 DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
" " DRV_AUTHOR
"\n";
104 MODULE_AUTHOR(DRV_AUTHOR
);
105 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
106 MODULE_LICENSE("GPL");
108 #define GEM_MODULE_NAME "gem"
109 #define PFX GEM_MODULE_NAME ": "
111 static struct pci_device_id gem_pci_tbl
[] = {
112 { PCI_VENDOR_ID_SUN
, PCI_DEVICE_ID_SUN_GEM
,
113 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
115 /* These models only differ from the original GEM in
116 * that their tx/rx fifos are of a different size and
117 * they only support 10/100 speeds. -DaveM
119 * Apple's GMAC does support gigabit on machines with
120 * the BCM54xx PHYs. -BenH
122 { PCI_VENDOR_ID_SUN
, PCI_DEVICE_ID_SUN_RIO_GEM
,
123 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
124 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_GMAC
,
125 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
126 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_GMACP
,
127 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
128 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2
,
129 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
130 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_K2_GMAC
,
131 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
132 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_SH_SUNGEM
,
133 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
134 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_IPID2_GMAC
,
135 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
139 MODULE_DEVICE_TABLE(pci
, gem_pci_tbl
);
141 static u16
__phy_read(struct gem
*gp
, int phy_addr
, int reg
)
148 cmd
|= (phy_addr
<< 23) & MIF_FRAME_PHYAD
;
149 cmd
|= (reg
<< 18) & MIF_FRAME_REGAD
;
150 cmd
|= (MIF_FRAME_TAMSB
);
151 writel(cmd
, gp
->regs
+ MIF_FRAME
);
154 cmd
= readl(gp
->regs
+ MIF_FRAME
);
155 if (cmd
& MIF_FRAME_TALSB
)
164 return cmd
& MIF_FRAME_DATA
;
167 static inline int _phy_read(struct net_device
*dev
, int mii_id
, int reg
)
169 struct gem
*gp
= dev
->priv
;
170 return __phy_read(gp
, mii_id
, reg
);
173 static inline u16
phy_read(struct gem
*gp
, int reg
)
175 return __phy_read(gp
, gp
->mii_phy_addr
, reg
);
178 static void __phy_write(struct gem
*gp
, int phy_addr
, int reg
, u16 val
)
185 cmd
|= (phy_addr
<< 23) & MIF_FRAME_PHYAD
;
186 cmd
|= (reg
<< 18) & MIF_FRAME_REGAD
;
187 cmd
|= (MIF_FRAME_TAMSB
);
188 cmd
|= (val
& MIF_FRAME_DATA
);
189 writel(cmd
, gp
->regs
+ MIF_FRAME
);
192 cmd
= readl(gp
->regs
+ MIF_FRAME
);
193 if (cmd
& MIF_FRAME_TALSB
)
200 static inline void _phy_write(struct net_device
*dev
, int mii_id
, int reg
, int val
)
202 struct gem
*gp
= dev
->priv
;
203 __phy_write(gp
, mii_id
, reg
, val
& 0xffff);
206 static inline void phy_write(struct gem
*gp
, int reg
, u16 val
)
208 __phy_write(gp
, gp
->mii_phy_addr
, reg
, val
);
211 static inline void gem_enable_ints(struct gem
*gp
)
213 /* Enable all interrupts but TXDONE */
214 writel(GREG_STAT_TXDONE
, gp
->regs
+ GREG_IMASK
);
217 static inline void gem_disable_ints(struct gem
*gp
)
219 /* Disable all interrupts, including TXDONE */
220 writel(GREG_STAT_NAPI
| GREG_STAT_TXDONE
, gp
->regs
+ GREG_IMASK
);
223 static void gem_get_cell(struct gem
*gp
)
225 BUG_ON(gp
->cell_enabled
< 0);
227 #ifdef CONFIG_PPC_PMAC
228 if (gp
->cell_enabled
== 1) {
230 pmac_call_feature(PMAC_FTR_GMAC_ENABLE
, gp
->of_node
, 0, 1);
233 #endif /* CONFIG_PPC_PMAC */
236 /* Turn off the chip's clock */
237 static void gem_put_cell(struct gem
*gp
)
239 BUG_ON(gp
->cell_enabled
<= 0);
241 #ifdef CONFIG_PPC_PMAC
242 if (gp
->cell_enabled
== 0) {
244 pmac_call_feature(PMAC_FTR_GMAC_ENABLE
, gp
->of_node
, 0, 0);
247 #endif /* CONFIG_PPC_PMAC */
250 static void gem_handle_mif_event(struct gem
*gp
, u32 reg_val
, u32 changed_bits
)
252 if (netif_msg_intr(gp
))
253 printk(KERN_DEBUG
"%s: mif interrupt\n", gp
->dev
->name
);
256 static int gem_pcs_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
258 u32 pcs_istat
= readl(gp
->regs
+ PCS_ISTAT
);
261 if (netif_msg_intr(gp
))
262 printk(KERN_DEBUG
"%s: pcs interrupt, pcs_istat: 0x%x\n",
263 gp
->dev
->name
, pcs_istat
);
265 if (!(pcs_istat
& PCS_ISTAT_LSC
)) {
266 printk(KERN_ERR
"%s: PCS irq but no link status change???\n",
271 /* The link status bit latches on zero, so you must
272 * read it twice in such a case to see a transition
273 * to the link being up.
275 pcs_miistat
= readl(gp
->regs
+ PCS_MIISTAT
);
276 if (!(pcs_miistat
& PCS_MIISTAT_LS
))
278 (readl(gp
->regs
+ PCS_MIISTAT
) &
281 if (pcs_miistat
& PCS_MIISTAT_ANC
) {
282 /* The remote-fault indication is only valid
283 * when autoneg has completed.
285 if (pcs_miistat
& PCS_MIISTAT_RF
)
286 printk(KERN_INFO
"%s: PCS AutoNEG complete, "
287 "RemoteFault\n", dev
->name
);
289 printk(KERN_INFO
"%s: PCS AutoNEG complete.\n",
293 if (pcs_miistat
& PCS_MIISTAT_LS
) {
294 printk(KERN_INFO
"%s: PCS link is now up.\n",
296 netif_carrier_on(gp
->dev
);
298 printk(KERN_INFO
"%s: PCS link is now down.\n",
300 netif_carrier_off(gp
->dev
);
301 /* If this happens and the link timer is not running,
302 * reset so we re-negotiate.
304 if (!timer_pending(&gp
->link_timer
))
311 static int gem_txmac_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
313 u32 txmac_stat
= readl(gp
->regs
+ MAC_TXSTAT
);
315 if (netif_msg_intr(gp
))
316 printk(KERN_DEBUG
"%s: txmac interrupt, txmac_stat: 0x%x\n",
317 gp
->dev
->name
, txmac_stat
);
319 /* Defer timer expiration is quite normal,
320 * don't even log the event.
322 if ((txmac_stat
& MAC_TXSTAT_DTE
) &&
323 !(txmac_stat
& ~MAC_TXSTAT_DTE
))
326 if (txmac_stat
& MAC_TXSTAT_URUN
) {
327 printk(KERN_ERR
"%s: TX MAC xmit underrun.\n",
329 gp
->net_stats
.tx_fifo_errors
++;
332 if (txmac_stat
& MAC_TXSTAT_MPE
) {
333 printk(KERN_ERR
"%s: TX MAC max packet size error.\n",
335 gp
->net_stats
.tx_errors
++;
338 /* The rest are all cases of one of the 16-bit TX
341 if (txmac_stat
& MAC_TXSTAT_NCE
)
342 gp
->net_stats
.collisions
+= 0x10000;
344 if (txmac_stat
& MAC_TXSTAT_ECE
) {
345 gp
->net_stats
.tx_aborted_errors
+= 0x10000;
346 gp
->net_stats
.collisions
+= 0x10000;
349 if (txmac_stat
& MAC_TXSTAT_LCE
) {
350 gp
->net_stats
.tx_aborted_errors
+= 0x10000;
351 gp
->net_stats
.collisions
+= 0x10000;
354 /* We do not keep track of MAC_TXSTAT_FCE and
355 * MAC_TXSTAT_PCE events.
360 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
361 * so we do the following.
363 * If any part of the reset goes wrong, we return 1 and that causes the
364 * whole chip to be reset.
366 static int gem_rxmac_reset(struct gem
*gp
)
368 struct net_device
*dev
= gp
->dev
;
373 /* First, reset & disable MAC RX. */
374 writel(MAC_RXRST_CMD
, gp
->regs
+ MAC_RXRST
);
375 for (limit
= 0; limit
< 5000; limit
++) {
376 if (!(readl(gp
->regs
+ MAC_RXRST
) & MAC_RXRST_CMD
))
381 printk(KERN_ERR
"%s: RX MAC will not reset, resetting whole "
382 "chip.\n", dev
->name
);
386 writel(gp
->mac_rx_cfg
& ~MAC_RXCFG_ENAB
,
387 gp
->regs
+ MAC_RXCFG
);
388 for (limit
= 0; limit
< 5000; limit
++) {
389 if (!(readl(gp
->regs
+ MAC_RXCFG
) & MAC_RXCFG_ENAB
))
394 printk(KERN_ERR
"%s: RX MAC will not disable, resetting whole "
395 "chip.\n", dev
->name
);
399 /* Second, disable RX DMA. */
400 writel(0, gp
->regs
+ RXDMA_CFG
);
401 for (limit
= 0; limit
< 5000; limit
++) {
402 if (!(readl(gp
->regs
+ RXDMA_CFG
) & RXDMA_CFG_ENABLE
))
407 printk(KERN_ERR
"%s: RX DMA will not disable, resetting whole "
408 "chip.\n", dev
->name
);
414 /* Execute RX reset command. */
415 writel(gp
->swrst_base
| GREG_SWRST_RXRST
,
416 gp
->regs
+ GREG_SWRST
);
417 for (limit
= 0; limit
< 5000; limit
++) {
418 if (!(readl(gp
->regs
+ GREG_SWRST
) & GREG_SWRST_RXRST
))
423 printk(KERN_ERR
"%s: RX reset command will not execute, resetting "
424 "whole chip.\n", dev
->name
);
428 /* Refresh the RX ring. */
429 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
430 struct gem_rxd
*rxd
= &gp
->init_block
->rxd
[i
];
432 if (gp
->rx_skbs
[i
] == NULL
) {
433 printk(KERN_ERR
"%s: Parts of RX ring empty, resetting "
434 "whole chip.\n", dev
->name
);
438 rxd
->status_word
= cpu_to_le64(RXDCTRL_FRESH(gp
));
440 gp
->rx_new
= gp
->rx_old
= 0;
442 /* Now we must reprogram the rest of RX unit. */
443 desc_dma
= (u64
) gp
->gblock_dvma
;
444 desc_dma
+= (INIT_BLOCK_TX_RING_SIZE
* sizeof(struct gem_txd
));
445 writel(desc_dma
>> 32, gp
->regs
+ RXDMA_DBHI
);
446 writel(desc_dma
& 0xffffffff, gp
->regs
+ RXDMA_DBLOW
);
447 writel(RX_RING_SIZE
- 4, gp
->regs
+ RXDMA_KICK
);
448 val
= (RXDMA_CFG_BASE
| (RX_OFFSET
<< 10) |
449 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128
);
450 writel(val
, gp
->regs
+ RXDMA_CFG
);
451 if (readl(gp
->regs
+ GREG_BIFCFG
) & GREG_BIFCFG_M66EN
)
452 writel(((5 & RXDMA_BLANK_IPKTS
) |
453 ((8 << 12) & RXDMA_BLANK_ITIME
)),
454 gp
->regs
+ RXDMA_BLANK
);
456 writel(((5 & RXDMA_BLANK_IPKTS
) |
457 ((4 << 12) & RXDMA_BLANK_ITIME
)),
458 gp
->regs
+ RXDMA_BLANK
);
459 val
= (((gp
->rx_pause_off
/ 64) << 0) & RXDMA_PTHRESH_OFF
);
460 val
|= (((gp
->rx_pause_on
/ 64) << 12) & RXDMA_PTHRESH_ON
);
461 writel(val
, gp
->regs
+ RXDMA_PTHRESH
);
462 val
= readl(gp
->regs
+ RXDMA_CFG
);
463 writel(val
| RXDMA_CFG_ENABLE
, gp
->regs
+ RXDMA_CFG
);
464 writel(MAC_RXSTAT_RCV
, gp
->regs
+ MAC_RXMASK
);
465 val
= readl(gp
->regs
+ MAC_RXCFG
);
466 writel(val
| MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
471 static int gem_rxmac_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
473 u32 rxmac_stat
= readl(gp
->regs
+ MAC_RXSTAT
);
476 if (netif_msg_intr(gp
))
477 printk(KERN_DEBUG
"%s: rxmac interrupt, rxmac_stat: 0x%x\n",
478 gp
->dev
->name
, rxmac_stat
);
480 if (rxmac_stat
& MAC_RXSTAT_OFLW
) {
481 u32 smac
= readl(gp
->regs
+ MAC_SMACHINE
);
483 printk(KERN_ERR
"%s: RX MAC fifo overflow smac[%08x].\n",
485 gp
->net_stats
.rx_over_errors
++;
486 gp
->net_stats
.rx_fifo_errors
++;
488 ret
= gem_rxmac_reset(gp
);
491 if (rxmac_stat
& MAC_RXSTAT_ACE
)
492 gp
->net_stats
.rx_frame_errors
+= 0x10000;
494 if (rxmac_stat
& MAC_RXSTAT_CCE
)
495 gp
->net_stats
.rx_crc_errors
+= 0x10000;
497 if (rxmac_stat
& MAC_RXSTAT_LCE
)
498 gp
->net_stats
.rx_length_errors
+= 0x10000;
500 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
506 static int gem_mac_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
508 u32 mac_cstat
= readl(gp
->regs
+ MAC_CSTAT
);
510 if (netif_msg_intr(gp
))
511 printk(KERN_DEBUG
"%s: mac interrupt, mac_cstat: 0x%x\n",
512 gp
->dev
->name
, mac_cstat
);
514 /* This interrupt is just for pause frame and pause
515 * tracking. It is useful for diagnostics and debug
516 * but probably by default we will mask these events.
518 if (mac_cstat
& MAC_CSTAT_PS
)
521 if (mac_cstat
& MAC_CSTAT_PRCV
)
522 gp
->pause_last_time_recvd
= (mac_cstat
>> 16);
527 static int gem_mif_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
529 u32 mif_status
= readl(gp
->regs
+ MIF_STATUS
);
530 u32 reg_val
, changed_bits
;
532 reg_val
= (mif_status
& MIF_STATUS_DATA
) >> 16;
533 changed_bits
= (mif_status
& MIF_STATUS_STAT
);
535 gem_handle_mif_event(gp
, reg_val
, changed_bits
);
540 static int gem_pci_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
542 u32 pci_estat
= readl(gp
->regs
+ GREG_PCIESTAT
);
544 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
545 gp
->pdev
->device
== PCI_DEVICE_ID_SUN_GEM
) {
546 printk(KERN_ERR
"%s: PCI error [%04x] ",
547 dev
->name
, pci_estat
);
549 if (pci_estat
& GREG_PCIESTAT_BADACK
)
550 printk("<No ACK64# during ABS64 cycle> ");
551 if (pci_estat
& GREG_PCIESTAT_DTRTO
)
552 printk("<Delayed transaction timeout> ");
553 if (pci_estat
& GREG_PCIESTAT_OTHER
)
557 pci_estat
|= GREG_PCIESTAT_OTHER
;
558 printk(KERN_ERR
"%s: PCI error\n", dev
->name
);
561 if (pci_estat
& GREG_PCIESTAT_OTHER
) {
564 /* Interrogate PCI config space for the
567 pci_read_config_word(gp
->pdev
, PCI_STATUS
,
569 printk(KERN_ERR
"%s: Read PCI cfg space status [%04x]\n",
570 dev
->name
, pci_cfg_stat
);
571 if (pci_cfg_stat
& PCI_STATUS_PARITY
)
572 printk(KERN_ERR
"%s: PCI parity error detected.\n",
574 if (pci_cfg_stat
& PCI_STATUS_SIG_TARGET_ABORT
)
575 printk(KERN_ERR
"%s: PCI target abort.\n",
577 if (pci_cfg_stat
& PCI_STATUS_REC_TARGET_ABORT
)
578 printk(KERN_ERR
"%s: PCI master acks target abort.\n",
580 if (pci_cfg_stat
& PCI_STATUS_REC_MASTER_ABORT
)
581 printk(KERN_ERR
"%s: PCI master abort.\n",
583 if (pci_cfg_stat
& PCI_STATUS_SIG_SYSTEM_ERROR
)
584 printk(KERN_ERR
"%s: PCI system error SERR#.\n",
586 if (pci_cfg_stat
& PCI_STATUS_DETECTED_PARITY
)
587 printk(KERN_ERR
"%s: PCI parity error.\n",
590 /* Write the error bits back to clear them. */
591 pci_cfg_stat
&= (PCI_STATUS_PARITY
|
592 PCI_STATUS_SIG_TARGET_ABORT
|
593 PCI_STATUS_REC_TARGET_ABORT
|
594 PCI_STATUS_REC_MASTER_ABORT
|
595 PCI_STATUS_SIG_SYSTEM_ERROR
|
596 PCI_STATUS_DETECTED_PARITY
);
597 pci_write_config_word(gp
->pdev
,
598 PCI_STATUS
, pci_cfg_stat
);
601 /* For all PCI errors, we should reset the chip. */
605 /* All non-normal interrupt conditions get serviced here.
606 * Returns non-zero if we should just exit the interrupt
607 * handler right now (ie. if we reset the card which invalidates
608 * all of the other original irq status bits).
610 static int gem_abnormal_irq(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
612 if (gem_status
& GREG_STAT_RXNOBUF
) {
613 /* Frame arrived, no free RX buffers available. */
614 if (netif_msg_rx_err(gp
))
615 printk(KERN_DEBUG
"%s: no buffer for rx frame\n",
617 gp
->net_stats
.rx_dropped
++;
620 if (gem_status
& GREG_STAT_RXTAGERR
) {
621 /* corrupt RX tag framing */
622 if (netif_msg_rx_err(gp
))
623 printk(KERN_DEBUG
"%s: corrupt rx tag framing\n",
625 gp
->net_stats
.rx_errors
++;
630 if (gem_status
& GREG_STAT_PCS
) {
631 if (gem_pcs_interrupt(dev
, gp
, gem_status
))
635 if (gem_status
& GREG_STAT_TXMAC
) {
636 if (gem_txmac_interrupt(dev
, gp
, gem_status
))
640 if (gem_status
& GREG_STAT_RXMAC
) {
641 if (gem_rxmac_interrupt(dev
, gp
, gem_status
))
645 if (gem_status
& GREG_STAT_MAC
) {
646 if (gem_mac_interrupt(dev
, gp
, gem_status
))
650 if (gem_status
& GREG_STAT_MIF
) {
651 if (gem_mif_interrupt(dev
, gp
, gem_status
))
655 if (gem_status
& GREG_STAT_PCIERR
) {
656 if (gem_pci_interrupt(dev
, gp
, gem_status
))
663 gp
->reset_task_pending
= 1;
664 schedule_work(&gp
->reset_task
);
669 static __inline__
void gem_tx(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
673 if (netif_msg_intr(gp
))
674 printk(KERN_DEBUG
"%s: tx interrupt, gem_status: 0x%x\n",
675 gp
->dev
->name
, gem_status
);
678 limit
= ((gem_status
& GREG_STAT_TXNR
) >> GREG_STAT_TXNR_SHIFT
);
679 while (entry
!= limit
) {
686 if (netif_msg_tx_done(gp
))
687 printk(KERN_DEBUG
"%s: tx done, slot %d\n",
688 gp
->dev
->name
, entry
);
689 skb
= gp
->tx_skbs
[entry
];
690 if (skb_shinfo(skb
)->nr_frags
) {
691 int last
= entry
+ skb_shinfo(skb
)->nr_frags
;
695 last
&= (TX_RING_SIZE
- 1);
697 walk
= NEXT_TX(walk
);
706 gp
->tx_skbs
[entry
] = NULL
;
707 gp
->net_stats
.tx_bytes
+= skb
->len
;
709 for (frag
= 0; frag
<= skb_shinfo(skb
)->nr_frags
; frag
++) {
710 txd
= &gp
->init_block
->txd
[entry
];
712 dma_addr
= le64_to_cpu(txd
->buffer
);
713 dma_len
= le64_to_cpu(txd
->control_word
) & TXDCTRL_BUFSZ
;
715 pci_unmap_page(gp
->pdev
, dma_addr
, dma_len
, PCI_DMA_TODEVICE
);
716 entry
= NEXT_TX(entry
);
719 gp
->net_stats
.tx_packets
++;
720 dev_kfree_skb_irq(skb
);
724 if (netif_queue_stopped(dev
) &&
725 TX_BUFFS_AVAIL(gp
) > (MAX_SKB_FRAGS
+ 1))
726 netif_wake_queue(dev
);
729 static __inline__
void gem_post_rxds(struct gem
*gp
, int limit
)
731 int cluster_start
, curr
, count
, kick
;
733 cluster_start
= curr
= (gp
->rx_new
& ~(4 - 1));
737 while (curr
!= limit
) {
738 curr
= NEXT_RX(curr
);
740 struct gem_rxd
*rxd
=
741 &gp
->init_block
->rxd
[cluster_start
];
743 rxd
->status_word
= cpu_to_le64(RXDCTRL_FRESH(gp
));
745 cluster_start
= NEXT_RX(cluster_start
);
746 if (cluster_start
== curr
)
755 writel(kick
, gp
->regs
+ RXDMA_KICK
);
759 static int gem_rx(struct gem
*gp
, int work_to_do
)
761 int entry
, drops
, work_done
= 0;
764 if (netif_msg_rx_status(gp
))
765 printk(KERN_DEBUG
"%s: rx interrupt, done: %d, rx_new: %d\n",
766 gp
->dev
->name
, readl(gp
->regs
+ RXDMA_DONE
), gp
->rx_new
);
770 done
= readl(gp
->regs
+ RXDMA_DONE
);
772 struct gem_rxd
*rxd
= &gp
->init_block
->rxd
[entry
];
774 u64 status
= cpu_to_le64(rxd
->status_word
);
778 if ((status
& RXDCTRL_OWN
) != 0)
781 if (work_done
>= RX_RING_SIZE
|| work_done
>= work_to_do
)
784 /* When writing back RX descriptor, GEM writes status
785 * then buffer address, possibly in seperate transactions.
786 * If we don't wait for the chip to write both, we could
787 * post a new buffer to this descriptor then have GEM spam
788 * on the buffer address. We sync on the RX completion
789 * register to prevent this from happening.
792 done
= readl(gp
->regs
+ RXDMA_DONE
);
797 /* We can now account for the work we're about to do */
800 skb
= gp
->rx_skbs
[entry
];
802 len
= (status
& RXDCTRL_BUFSZ
) >> 16;
803 if ((len
< ETH_ZLEN
) || (status
& RXDCTRL_BAD
)) {
804 gp
->net_stats
.rx_errors
++;
806 gp
->net_stats
.rx_length_errors
++;
807 if (len
& RXDCTRL_BAD
)
808 gp
->net_stats
.rx_crc_errors
++;
810 /* We'll just return it to GEM. */
812 gp
->net_stats
.rx_dropped
++;
816 dma_addr
= cpu_to_le64(rxd
->buffer
);
817 if (len
> RX_COPY_THRESHOLD
) {
818 struct sk_buff
*new_skb
;
820 new_skb
= gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp
), GFP_ATOMIC
);
821 if (new_skb
== NULL
) {
825 pci_unmap_page(gp
->pdev
, dma_addr
,
826 RX_BUF_ALLOC_SIZE(gp
),
828 gp
->rx_skbs
[entry
] = new_skb
;
829 new_skb
->dev
= gp
->dev
;
830 skb_put(new_skb
, (gp
->rx_buf_sz
+ RX_OFFSET
));
831 rxd
->buffer
= cpu_to_le64(pci_map_page(gp
->pdev
,
832 virt_to_page(new_skb
->data
),
833 offset_in_page(new_skb
->data
),
834 RX_BUF_ALLOC_SIZE(gp
),
835 PCI_DMA_FROMDEVICE
));
836 skb_reserve(new_skb
, RX_OFFSET
);
838 /* Trim the original skb for the netif. */
841 struct sk_buff
*copy_skb
= dev_alloc_skb(len
+ 2);
843 if (copy_skb
== NULL
) {
848 skb_reserve(copy_skb
, 2);
849 skb_put(copy_skb
, len
);
850 pci_dma_sync_single_for_cpu(gp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
851 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
852 pci_dma_sync_single_for_device(gp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
854 /* We'll reuse the original ring buffer. */
858 skb
->csum
= ntohs((status
& RXDCTRL_TCPCSUM
) ^ 0xffff);
859 skb
->ip_summed
= CHECKSUM_COMPLETE
;
860 skb
->protocol
= eth_type_trans(skb
, gp
->dev
);
862 netif_receive_skb(skb
);
864 gp
->net_stats
.rx_packets
++;
865 gp
->net_stats
.rx_bytes
+= len
;
866 gp
->dev
->last_rx
= jiffies
;
869 entry
= NEXT_RX(entry
);
872 gem_post_rxds(gp
, entry
);
877 printk(KERN_INFO
"%s: Memory squeeze, deferring packet.\n",
883 static int gem_poll(struct net_device
*dev
, int *budget
)
885 struct gem
*gp
= dev
->priv
;
889 * NAPI locking nightmare: See comment at head of driver
891 spin_lock_irqsave(&gp
->lock
, flags
);
894 int work_to_do
, work_done
;
896 /* Handle anomalies */
897 if (gp
->status
& GREG_STAT_ABNORMAL
) {
898 if (gem_abnormal_irq(dev
, gp
, gp
->status
))
902 /* Run TX completion thread */
903 spin_lock(&gp
->tx_lock
);
904 gem_tx(dev
, gp
, gp
->status
);
905 spin_unlock(&gp
->tx_lock
);
907 spin_unlock_irqrestore(&gp
->lock
, flags
);
909 /* Run RX thread. We don't use any locking here,
910 * code willing to do bad things - like cleaning the
911 * rx ring - must call netif_poll_disable(), which
912 * schedule_timeout()'s if polling is already disabled.
914 work_to_do
= min(*budget
, dev
->quota
);
916 work_done
= gem_rx(gp
, work_to_do
);
918 *budget
-= work_done
;
919 dev
->quota
-= work_done
;
921 if (work_done
>= work_to_do
)
924 spin_lock_irqsave(&gp
->lock
, flags
);
926 gp
->status
= readl(gp
->regs
+ GREG_STAT
);
927 } while (gp
->status
& GREG_STAT_NAPI
);
929 __netif_rx_complete(dev
);
932 spin_unlock_irqrestore(&gp
->lock
, flags
);
936 static irqreturn_t
gem_interrupt(int irq
, void *dev_id
)
938 struct net_device
*dev
= dev_id
;
939 struct gem
*gp
= dev
->priv
;
942 /* Swallow interrupts when shutting the chip down, though
943 * that shouldn't happen, we should have done free_irq() at
949 spin_lock_irqsave(&gp
->lock
, flags
);
951 if (netif_rx_schedule_prep(dev
)) {
952 u32 gem_status
= readl(gp
->regs
+ GREG_STAT
);
954 if (gem_status
== 0) {
955 netif_poll_enable(dev
);
956 spin_unlock_irqrestore(&gp
->lock
, flags
);
959 gp
->status
= gem_status
;
960 gem_disable_ints(gp
);
961 __netif_rx_schedule(dev
);
964 spin_unlock_irqrestore(&gp
->lock
, flags
);
966 /* If polling was disabled at the time we received that
967 * interrupt, we may return IRQ_HANDLED here while we
968 * should return IRQ_NONE. No big deal...
973 #ifdef CONFIG_NET_POLL_CONTROLLER
974 static void gem_poll_controller(struct net_device
*dev
)
976 /* gem_interrupt is safe to reentrance so no need
977 * to disable_irq here.
979 gem_interrupt(dev
->irq
, dev
);
983 static void gem_tx_timeout(struct net_device
*dev
)
985 struct gem
*gp
= dev
->priv
;
987 printk(KERN_ERR
"%s: transmit timed out, resetting\n", dev
->name
);
989 printk("%s: hrm.. hw not running !\n", dev
->name
);
992 printk(KERN_ERR
"%s: TX_STATE[%08x:%08x:%08x]\n",
994 readl(gp
->regs
+ TXDMA_CFG
),
995 readl(gp
->regs
+ MAC_TXSTAT
),
996 readl(gp
->regs
+ MAC_TXCFG
));
997 printk(KERN_ERR
"%s: RX_STATE[%08x:%08x:%08x]\n",
999 readl(gp
->regs
+ RXDMA_CFG
),
1000 readl(gp
->regs
+ MAC_RXSTAT
),
1001 readl(gp
->regs
+ MAC_RXCFG
));
1003 spin_lock_irq(&gp
->lock
);
1004 spin_lock(&gp
->tx_lock
);
1006 gp
->reset_task_pending
= 1;
1007 schedule_work(&gp
->reset_task
);
1009 spin_unlock(&gp
->tx_lock
);
1010 spin_unlock_irq(&gp
->lock
);
1013 static __inline__
int gem_intme(int entry
)
1015 /* Algorithm: IRQ every 1/2 of descriptors. */
1016 if (!(entry
& ((TX_RING_SIZE
>>1)-1)))
1022 static int gem_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1024 struct gem
*gp
= dev
->priv
;
1027 unsigned long flags
;
1030 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1031 const u64 csum_start_off
= skb_transport_offset(skb
);
1032 const u64 csum_stuff_off
= csum_start_off
+ skb
->csum_offset
;
1034 ctrl
= (TXDCTRL_CENAB
|
1035 (csum_start_off
<< 15) |
1036 (csum_stuff_off
<< 21));
1039 local_irq_save(flags
);
1040 if (!spin_trylock(&gp
->tx_lock
)) {
1041 /* Tell upper layer to requeue */
1042 local_irq_restore(flags
);
1043 return NETDEV_TX_LOCKED
;
1045 /* We raced with gem_do_stop() */
1047 spin_unlock_irqrestore(&gp
->tx_lock
, flags
);
1048 return NETDEV_TX_BUSY
;
1051 /* This is a hard error, log it. */
1052 if (TX_BUFFS_AVAIL(gp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
1053 netif_stop_queue(dev
);
1054 spin_unlock_irqrestore(&gp
->tx_lock
, flags
);
1055 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when queue awake!\n",
1057 return NETDEV_TX_BUSY
;
1061 gp
->tx_skbs
[entry
] = skb
;
1063 if (skb_shinfo(skb
)->nr_frags
== 0) {
1064 struct gem_txd
*txd
= &gp
->init_block
->txd
[entry
];
1069 mapping
= pci_map_page(gp
->pdev
,
1070 virt_to_page(skb
->data
),
1071 offset_in_page(skb
->data
),
1072 len
, PCI_DMA_TODEVICE
);
1073 ctrl
|= TXDCTRL_SOF
| TXDCTRL_EOF
| len
;
1074 if (gem_intme(entry
))
1075 ctrl
|= TXDCTRL_INTME
;
1076 txd
->buffer
= cpu_to_le64(mapping
);
1078 txd
->control_word
= cpu_to_le64(ctrl
);
1079 entry
= NEXT_TX(entry
);
1081 struct gem_txd
*txd
;
1084 dma_addr_t first_mapping
;
1085 int frag
, first_entry
= entry
;
1088 if (gem_intme(entry
))
1089 intme
|= TXDCTRL_INTME
;
1091 /* We must give this initial chunk to the device last.
1092 * Otherwise we could race with the device.
1094 first_len
= skb_headlen(skb
);
1095 first_mapping
= pci_map_page(gp
->pdev
, virt_to_page(skb
->data
),
1096 offset_in_page(skb
->data
),
1097 first_len
, PCI_DMA_TODEVICE
);
1098 entry
= NEXT_TX(entry
);
1100 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1101 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
1106 len
= this_frag
->size
;
1107 mapping
= pci_map_page(gp
->pdev
,
1109 this_frag
->page_offset
,
1110 len
, PCI_DMA_TODEVICE
);
1112 if (frag
== skb_shinfo(skb
)->nr_frags
- 1)
1113 this_ctrl
|= TXDCTRL_EOF
;
1115 txd
= &gp
->init_block
->txd
[entry
];
1116 txd
->buffer
= cpu_to_le64(mapping
);
1118 txd
->control_word
= cpu_to_le64(this_ctrl
| len
);
1120 if (gem_intme(entry
))
1121 intme
|= TXDCTRL_INTME
;
1123 entry
= NEXT_TX(entry
);
1125 txd
= &gp
->init_block
->txd
[first_entry
];
1126 txd
->buffer
= cpu_to_le64(first_mapping
);
1129 cpu_to_le64(ctrl
| TXDCTRL_SOF
| intme
| first_len
);
1133 if (TX_BUFFS_AVAIL(gp
) <= (MAX_SKB_FRAGS
+ 1))
1134 netif_stop_queue(dev
);
1136 if (netif_msg_tx_queued(gp
))
1137 printk(KERN_DEBUG
"%s: tx queued, slot %d, skblen %d\n",
1138 dev
->name
, entry
, skb
->len
);
1140 writel(gp
->tx_new
, gp
->regs
+ TXDMA_KICK
);
1141 spin_unlock_irqrestore(&gp
->tx_lock
, flags
);
1143 dev
->trans_start
= jiffies
;
1145 return NETDEV_TX_OK
;
1148 #define STOP_TRIES 32
1150 /* Must be invoked under gp->lock and gp->tx_lock. */
1151 static void gem_reset(struct gem
*gp
)
1156 /* Make sure we won't get any more interrupts */
1157 writel(0xffffffff, gp
->regs
+ GREG_IMASK
);
1159 /* Reset the chip */
1160 writel(gp
->swrst_base
| GREG_SWRST_TXRST
| GREG_SWRST_RXRST
,
1161 gp
->regs
+ GREG_SWRST
);
1167 val
= readl(gp
->regs
+ GREG_SWRST
);
1170 } while (val
& (GREG_SWRST_TXRST
| GREG_SWRST_RXRST
));
1173 printk(KERN_ERR
"%s: SW reset is ghetto.\n", gp
->dev
->name
);
1176 /* Must be invoked under gp->lock and gp->tx_lock. */
1177 static void gem_start_dma(struct gem
*gp
)
1181 /* We are ready to rock, turn everything on. */
1182 val
= readl(gp
->regs
+ TXDMA_CFG
);
1183 writel(val
| TXDMA_CFG_ENABLE
, gp
->regs
+ TXDMA_CFG
);
1184 val
= readl(gp
->regs
+ RXDMA_CFG
);
1185 writel(val
| RXDMA_CFG_ENABLE
, gp
->regs
+ RXDMA_CFG
);
1186 val
= readl(gp
->regs
+ MAC_TXCFG
);
1187 writel(val
| MAC_TXCFG_ENAB
, gp
->regs
+ MAC_TXCFG
);
1188 val
= readl(gp
->regs
+ MAC_RXCFG
);
1189 writel(val
| MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
1191 (void) readl(gp
->regs
+ MAC_RXCFG
);
1194 gem_enable_ints(gp
);
1196 writel(RX_RING_SIZE
- 4, gp
->regs
+ RXDMA_KICK
);
1199 /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1200 * actually stopped before about 4ms tho ...
1202 static void gem_stop_dma(struct gem
*gp
)
1206 /* We are done rocking, turn everything off. */
1207 val
= readl(gp
->regs
+ TXDMA_CFG
);
1208 writel(val
& ~TXDMA_CFG_ENABLE
, gp
->regs
+ TXDMA_CFG
);
1209 val
= readl(gp
->regs
+ RXDMA_CFG
);
1210 writel(val
& ~RXDMA_CFG_ENABLE
, gp
->regs
+ RXDMA_CFG
);
1211 val
= readl(gp
->regs
+ MAC_TXCFG
);
1212 writel(val
& ~MAC_TXCFG_ENAB
, gp
->regs
+ MAC_TXCFG
);
1213 val
= readl(gp
->regs
+ MAC_RXCFG
);
1214 writel(val
& ~MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
1216 (void) readl(gp
->regs
+ MAC_RXCFG
);
1218 /* Need to wait a bit ... done by the caller */
1222 /* Must be invoked under gp->lock and gp->tx_lock. */
1223 // XXX dbl check what that function should do when called on PCS PHY
1224 static void gem_begin_auto_negotiation(struct gem
*gp
, struct ethtool_cmd
*ep
)
1226 u32 advertise
, features
;
1231 if (gp
->phy_type
!= phy_mii_mdio0
&&
1232 gp
->phy_type
!= phy_mii_mdio1
)
1235 /* Setup advertise */
1236 if (found_mii_phy(gp
))
1237 features
= gp
->phy_mii
.def
->features
;
1241 advertise
= features
& ADVERTISE_MASK
;
1242 if (gp
->phy_mii
.advertising
!= 0)
1243 advertise
&= gp
->phy_mii
.advertising
;
1245 autoneg
= gp
->want_autoneg
;
1246 speed
= gp
->phy_mii
.speed
;
1247 duplex
= gp
->phy_mii
.duplex
;
1249 /* Setup link parameters */
1252 if (ep
->autoneg
== AUTONEG_ENABLE
) {
1253 advertise
= ep
->advertising
;
1258 duplex
= ep
->duplex
;
1262 /* Sanitize settings based on PHY capabilities */
1263 if ((features
& SUPPORTED_Autoneg
) == 0)
1265 if (speed
== SPEED_1000
&&
1266 !(features
& (SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full
)))
1268 if (speed
== SPEED_100
&&
1269 !(features
& (SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
)))
1271 if (duplex
== DUPLEX_FULL
&&
1272 !(features
& (SUPPORTED_1000baseT_Full
|
1273 SUPPORTED_100baseT_Full
|
1274 SUPPORTED_10baseT_Full
)))
1275 duplex
= DUPLEX_HALF
;
1279 /* If we are asleep, we don't try to actually setup the PHY, we
1280 * just store the settings
1283 gp
->phy_mii
.autoneg
= gp
->want_autoneg
= autoneg
;
1284 gp
->phy_mii
.speed
= speed
;
1285 gp
->phy_mii
.duplex
= duplex
;
1289 /* Configure PHY & start aneg */
1290 gp
->want_autoneg
= autoneg
;
1292 if (found_mii_phy(gp
))
1293 gp
->phy_mii
.def
->ops
->setup_aneg(&gp
->phy_mii
, advertise
);
1294 gp
->lstate
= link_aneg
;
1296 if (found_mii_phy(gp
))
1297 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
, speed
, duplex
);
1298 gp
->lstate
= link_force_ok
;
1302 gp
->timer_ticks
= 0;
1303 mod_timer(&gp
->link_timer
, jiffies
+ ((12 * HZ
) / 10));
1306 /* A link-up condition has occurred, initialize and enable the
1309 * Must be invoked under gp->lock and gp->tx_lock.
1311 static int gem_set_link_modes(struct gem
*gp
)
1314 int full_duplex
, speed
, pause
;
1320 if (found_mii_phy(gp
)) {
1321 if (gp
->phy_mii
.def
->ops
->read_link(&gp
->phy_mii
))
1323 full_duplex
= (gp
->phy_mii
.duplex
== DUPLEX_FULL
);
1324 speed
= gp
->phy_mii
.speed
;
1325 pause
= gp
->phy_mii
.pause
;
1326 } else if (gp
->phy_type
== phy_serialink
||
1327 gp
->phy_type
== phy_serdes
) {
1328 u32 pcs_lpa
= readl(gp
->regs
+ PCS_MIILP
);
1330 if (pcs_lpa
& PCS_MIIADV_FD
)
1335 if (netif_msg_link(gp
))
1336 printk(KERN_INFO
"%s: Link is up at %d Mbps, %s-duplex.\n",
1337 gp
->dev
->name
, speed
, (full_duplex
? "full" : "half"));
1342 val
= (MAC_TXCFG_EIPG0
| MAC_TXCFG_NGU
);
1344 val
|= (MAC_TXCFG_ICS
| MAC_TXCFG_ICOLL
);
1346 /* MAC_TXCFG_NBO must be zero. */
1348 writel(val
, gp
->regs
+ MAC_TXCFG
);
1350 val
= (MAC_XIFCFG_OE
| MAC_XIFCFG_LLED
);
1352 (gp
->phy_type
== phy_mii_mdio0
||
1353 gp
->phy_type
== phy_mii_mdio1
)) {
1354 val
|= MAC_XIFCFG_DISE
;
1355 } else if (full_duplex
) {
1356 val
|= MAC_XIFCFG_FLED
;
1359 if (speed
== SPEED_1000
)
1360 val
|= (MAC_XIFCFG_GMII
);
1362 writel(val
, gp
->regs
+ MAC_XIFCFG
);
1364 /* If gigabit and half-duplex, enable carrier extension
1365 * mode. Else, disable it.
1367 if (speed
== SPEED_1000
&& !full_duplex
) {
1368 val
= readl(gp
->regs
+ MAC_TXCFG
);
1369 writel(val
| MAC_TXCFG_TCE
, gp
->regs
+ MAC_TXCFG
);
1371 val
= readl(gp
->regs
+ MAC_RXCFG
);
1372 writel(val
| MAC_RXCFG_RCE
, gp
->regs
+ MAC_RXCFG
);
1374 val
= readl(gp
->regs
+ MAC_TXCFG
);
1375 writel(val
& ~MAC_TXCFG_TCE
, gp
->regs
+ MAC_TXCFG
);
1377 val
= readl(gp
->regs
+ MAC_RXCFG
);
1378 writel(val
& ~MAC_RXCFG_RCE
, gp
->regs
+ MAC_RXCFG
);
1381 if (gp
->phy_type
== phy_serialink
||
1382 gp
->phy_type
== phy_serdes
) {
1383 u32 pcs_lpa
= readl(gp
->regs
+ PCS_MIILP
);
1385 if (pcs_lpa
& (PCS_MIIADV_SP
| PCS_MIIADV_AP
))
1389 if (netif_msg_link(gp
)) {
1391 printk(KERN_INFO
"%s: Pause is enabled "
1392 "(rxfifo: %d off: %d on: %d)\n",
1398 printk(KERN_INFO
"%s: Pause is disabled\n",
1404 writel(512, gp
->regs
+ MAC_STIME
);
1406 writel(64, gp
->regs
+ MAC_STIME
);
1407 val
= readl(gp
->regs
+ MAC_MCCFG
);
1409 val
|= (MAC_MCCFG_SPE
| MAC_MCCFG_RPE
);
1411 val
&= ~(MAC_MCCFG_SPE
| MAC_MCCFG_RPE
);
1412 writel(val
, gp
->regs
+ MAC_MCCFG
);
1419 /* Must be invoked under gp->lock and gp->tx_lock. */
1420 static int gem_mdio_link_not_up(struct gem
*gp
)
1422 switch (gp
->lstate
) {
1423 case link_force_ret
:
1424 if (netif_msg_link(gp
))
1425 printk(KERN_INFO
"%s: Autoneg failed again, keeping"
1426 " forced mode\n", gp
->dev
->name
);
1427 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
,
1428 gp
->last_forced_speed
, DUPLEX_HALF
);
1429 gp
->timer_ticks
= 5;
1430 gp
->lstate
= link_force_ok
;
1433 /* We try forced modes after a failed aneg only on PHYs that don't
1434 * have "magic_aneg" bit set, which means they internally do the
1435 * while forced-mode thingy. On these, we just restart aneg
1437 if (gp
->phy_mii
.def
->magic_aneg
)
1439 if (netif_msg_link(gp
))
1440 printk(KERN_INFO
"%s: switching to forced 100bt\n",
1442 /* Try forced modes. */
1443 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
, SPEED_100
,
1445 gp
->timer_ticks
= 5;
1446 gp
->lstate
= link_force_try
;
1448 case link_force_try
:
1449 /* Downgrade from 100 to 10 Mbps if necessary.
1450 * If already at 10Mbps, warn user about the
1451 * situation every 10 ticks.
1453 if (gp
->phy_mii
.speed
== SPEED_100
) {
1454 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
, SPEED_10
,
1456 gp
->timer_ticks
= 5;
1457 if (netif_msg_link(gp
))
1458 printk(KERN_INFO
"%s: switching to forced 10bt\n",
1468 static void gem_link_timer(unsigned long data
)
1470 struct gem
*gp
= (struct gem
*) data
;
1471 int restart_aneg
= 0;
1476 spin_lock_irq(&gp
->lock
);
1477 spin_lock(&gp
->tx_lock
);
1480 /* If the reset task is still pending, we just
1481 * reschedule the link timer
1483 if (gp
->reset_task_pending
)
1486 if (gp
->phy_type
== phy_serialink
||
1487 gp
->phy_type
== phy_serdes
) {
1488 u32 val
= readl(gp
->regs
+ PCS_MIISTAT
);
1490 if (!(val
& PCS_MIISTAT_LS
))
1491 val
= readl(gp
->regs
+ PCS_MIISTAT
);
1493 if ((val
& PCS_MIISTAT_LS
) != 0) {
1494 gp
->lstate
= link_up
;
1495 netif_carrier_on(gp
->dev
);
1496 (void)gem_set_link_modes(gp
);
1500 if (found_mii_phy(gp
) && gp
->phy_mii
.def
->ops
->poll_link(&gp
->phy_mii
)) {
1501 /* Ok, here we got a link. If we had it due to a forced
1502 * fallback, and we were configured for autoneg, we do
1503 * retry a short autoneg pass. If you know your hub is
1504 * broken, use ethtool ;)
1506 if (gp
->lstate
== link_force_try
&& gp
->want_autoneg
) {
1507 gp
->lstate
= link_force_ret
;
1508 gp
->last_forced_speed
= gp
->phy_mii
.speed
;
1509 gp
->timer_ticks
= 5;
1510 if (netif_msg_link(gp
))
1511 printk(KERN_INFO
"%s: Got link after fallback, retrying"
1512 " autoneg once...\n", gp
->dev
->name
);
1513 gp
->phy_mii
.def
->ops
->setup_aneg(&gp
->phy_mii
, gp
->phy_mii
.advertising
);
1514 } else if (gp
->lstate
!= link_up
) {
1515 gp
->lstate
= link_up
;
1516 netif_carrier_on(gp
->dev
);
1517 if (gem_set_link_modes(gp
))
1521 /* If the link was previously up, we restart the
1524 if (gp
->lstate
== link_up
) {
1525 gp
->lstate
= link_down
;
1526 if (netif_msg_link(gp
))
1527 printk(KERN_INFO
"%s: Link down\n",
1529 netif_carrier_off(gp
->dev
);
1530 gp
->reset_task_pending
= 1;
1531 schedule_work(&gp
->reset_task
);
1533 } else if (++gp
->timer_ticks
> 10) {
1534 if (found_mii_phy(gp
))
1535 restart_aneg
= gem_mdio_link_not_up(gp
);
1541 gem_begin_auto_negotiation(gp
, NULL
);
1545 mod_timer(&gp
->link_timer
, jiffies
+ ((12 * HZ
) / 10));
1548 spin_unlock(&gp
->tx_lock
);
1549 spin_unlock_irq(&gp
->lock
);
1552 /* Must be invoked under gp->lock and gp->tx_lock. */
1553 static void gem_clean_rings(struct gem
*gp
)
1555 struct gem_init_block
*gb
= gp
->init_block
;
1556 struct sk_buff
*skb
;
1558 dma_addr_t dma_addr
;
1560 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1561 struct gem_rxd
*rxd
;
1564 if (gp
->rx_skbs
[i
] != NULL
) {
1565 skb
= gp
->rx_skbs
[i
];
1566 dma_addr
= le64_to_cpu(rxd
->buffer
);
1567 pci_unmap_page(gp
->pdev
, dma_addr
,
1568 RX_BUF_ALLOC_SIZE(gp
),
1569 PCI_DMA_FROMDEVICE
);
1570 dev_kfree_skb_any(skb
);
1571 gp
->rx_skbs
[i
] = NULL
;
1573 rxd
->status_word
= 0;
1578 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1579 if (gp
->tx_skbs
[i
] != NULL
) {
1580 struct gem_txd
*txd
;
1583 skb
= gp
->tx_skbs
[i
];
1584 gp
->tx_skbs
[i
] = NULL
;
1586 for (frag
= 0; frag
<= skb_shinfo(skb
)->nr_frags
; frag
++) {
1587 int ent
= i
& (TX_RING_SIZE
- 1);
1589 txd
= &gb
->txd
[ent
];
1590 dma_addr
= le64_to_cpu(txd
->buffer
);
1591 pci_unmap_page(gp
->pdev
, dma_addr
,
1592 le64_to_cpu(txd
->control_word
) &
1593 TXDCTRL_BUFSZ
, PCI_DMA_TODEVICE
);
1595 if (frag
!= skb_shinfo(skb
)->nr_frags
)
1598 dev_kfree_skb_any(skb
);
1603 /* Must be invoked under gp->lock and gp->tx_lock. */
1604 static void gem_init_rings(struct gem
*gp
)
1606 struct gem_init_block
*gb
= gp
->init_block
;
1607 struct net_device
*dev
= gp
->dev
;
1609 dma_addr_t dma_addr
;
1611 gp
->rx_new
= gp
->rx_old
= gp
->tx_new
= gp
->tx_old
= 0;
1613 gem_clean_rings(gp
);
1615 gp
->rx_buf_sz
= max(dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
,
1616 (unsigned)VLAN_ETH_FRAME_LEN
);
1618 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1619 struct sk_buff
*skb
;
1620 struct gem_rxd
*rxd
= &gb
->rxd
[i
];
1622 skb
= gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp
), GFP_ATOMIC
);
1625 rxd
->status_word
= 0;
1629 gp
->rx_skbs
[i
] = skb
;
1631 skb_put(skb
, (gp
->rx_buf_sz
+ RX_OFFSET
));
1632 dma_addr
= pci_map_page(gp
->pdev
,
1633 virt_to_page(skb
->data
),
1634 offset_in_page(skb
->data
),
1635 RX_BUF_ALLOC_SIZE(gp
),
1636 PCI_DMA_FROMDEVICE
);
1637 rxd
->buffer
= cpu_to_le64(dma_addr
);
1639 rxd
->status_word
= cpu_to_le64(RXDCTRL_FRESH(gp
));
1640 skb_reserve(skb
, RX_OFFSET
);
1643 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1644 struct gem_txd
*txd
= &gb
->txd
[i
];
1646 txd
->control_word
= 0;
1653 /* Init PHY interface and start link poll state machine */
1654 static void gem_init_phy(struct gem
*gp
)
1658 /* Revert MIF CFG setting done on stop_phy */
1659 mifcfg
= readl(gp
->regs
+ MIF_CFG
);
1660 mifcfg
&= ~MIF_CFG_BBMODE
;
1661 writel(mifcfg
, gp
->regs
+ MIF_CFG
);
1663 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_APPLE
) {
1666 /* Those delay sucks, the HW seem to love them though, I'll
1667 * serisouly consider breaking some locks here to be able
1668 * to schedule instead
1670 for (i
= 0; i
< 3; i
++) {
1671 #ifdef CONFIG_PPC_PMAC
1672 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET
, gp
->of_node
, 0, 0);
1675 /* Some PHYs used by apple have problem getting back to us,
1676 * we do an additional reset here
1678 phy_write(gp
, MII_BMCR
, BMCR_RESET
);
1680 if (phy_read(gp
, MII_BMCR
) != 0xffff)
1683 printk(KERN_WARNING
"%s: GMAC PHY not responding !\n",
1688 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
1689 gp
->pdev
->device
== PCI_DEVICE_ID_SUN_GEM
) {
1692 /* Init datapath mode register. */
1693 if (gp
->phy_type
== phy_mii_mdio0
||
1694 gp
->phy_type
== phy_mii_mdio1
) {
1695 val
= PCS_DMODE_MGM
;
1696 } else if (gp
->phy_type
== phy_serialink
) {
1697 val
= PCS_DMODE_SM
| PCS_DMODE_GMOE
;
1699 val
= PCS_DMODE_ESM
;
1702 writel(val
, gp
->regs
+ PCS_DMODE
);
1705 if (gp
->phy_type
== phy_mii_mdio0
||
1706 gp
->phy_type
== phy_mii_mdio1
) {
1707 // XXX check for errors
1708 mii_phy_probe(&gp
->phy_mii
, gp
->mii_phy_addr
);
1711 if (gp
->phy_mii
.def
&& gp
->phy_mii
.def
->ops
->init
)
1712 gp
->phy_mii
.def
->ops
->init(&gp
->phy_mii
);
1717 /* Reset PCS unit. */
1718 val
= readl(gp
->regs
+ PCS_MIICTRL
);
1719 val
|= PCS_MIICTRL_RST
;
1720 writeb(val
, gp
->regs
+ PCS_MIICTRL
);
1723 while (readl(gp
->regs
+ PCS_MIICTRL
) & PCS_MIICTRL_RST
) {
1729 printk(KERN_WARNING
"%s: PCS reset bit would not clear.\n",
1732 /* Make sure PCS is disabled while changing advertisement
1735 val
= readl(gp
->regs
+ PCS_CFG
);
1736 val
&= ~(PCS_CFG_ENABLE
| PCS_CFG_TO
);
1737 writel(val
, gp
->regs
+ PCS_CFG
);
1739 /* Advertise all capabilities except assymetric
1742 val
= readl(gp
->regs
+ PCS_MIIADV
);
1743 val
|= (PCS_MIIADV_FD
| PCS_MIIADV_HD
|
1744 PCS_MIIADV_SP
| PCS_MIIADV_AP
);
1745 writel(val
, gp
->regs
+ PCS_MIIADV
);
1747 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1748 * and re-enable PCS.
1750 val
= readl(gp
->regs
+ PCS_MIICTRL
);
1751 val
|= (PCS_MIICTRL_RAN
| PCS_MIICTRL_ANE
);
1752 val
&= ~PCS_MIICTRL_WB
;
1753 writel(val
, gp
->regs
+ PCS_MIICTRL
);
1755 val
= readl(gp
->regs
+ PCS_CFG
);
1756 val
|= PCS_CFG_ENABLE
;
1757 writel(val
, gp
->regs
+ PCS_CFG
);
1759 /* Make sure serialink loopback is off. The meaning
1760 * of this bit is logically inverted based upon whether
1761 * you are in Serialink or SERDES mode.
1763 val
= readl(gp
->regs
+ PCS_SCTRL
);
1764 if (gp
->phy_type
== phy_serialink
)
1765 val
&= ~PCS_SCTRL_LOOP
;
1767 val
|= PCS_SCTRL_LOOP
;
1768 writel(val
, gp
->regs
+ PCS_SCTRL
);
1771 /* Default aneg parameters */
1772 gp
->timer_ticks
= 0;
1773 gp
->lstate
= link_down
;
1774 netif_carrier_off(gp
->dev
);
1776 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1777 spin_lock_irq(&gp
->lock
);
1778 gem_begin_auto_negotiation(gp
, NULL
);
1779 spin_unlock_irq(&gp
->lock
);
1782 /* Must be invoked under gp->lock and gp->tx_lock. */
1783 static void gem_init_dma(struct gem
*gp
)
1785 u64 desc_dma
= (u64
) gp
->gblock_dvma
;
1788 val
= (TXDMA_CFG_BASE
| (0x7ff << 10) | TXDMA_CFG_PMODE
);
1789 writel(val
, gp
->regs
+ TXDMA_CFG
);
1791 writel(desc_dma
>> 32, gp
->regs
+ TXDMA_DBHI
);
1792 writel(desc_dma
& 0xffffffff, gp
->regs
+ TXDMA_DBLOW
);
1793 desc_dma
+= (INIT_BLOCK_TX_RING_SIZE
* sizeof(struct gem_txd
));
1795 writel(0, gp
->regs
+ TXDMA_KICK
);
1797 val
= (RXDMA_CFG_BASE
| (RX_OFFSET
<< 10) |
1798 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128
);
1799 writel(val
, gp
->regs
+ RXDMA_CFG
);
1801 writel(desc_dma
>> 32, gp
->regs
+ RXDMA_DBHI
);
1802 writel(desc_dma
& 0xffffffff, gp
->regs
+ RXDMA_DBLOW
);
1804 writel(RX_RING_SIZE
- 4, gp
->regs
+ RXDMA_KICK
);
1806 val
= (((gp
->rx_pause_off
/ 64) << 0) & RXDMA_PTHRESH_OFF
);
1807 val
|= (((gp
->rx_pause_on
/ 64) << 12) & RXDMA_PTHRESH_ON
);
1808 writel(val
, gp
->regs
+ RXDMA_PTHRESH
);
1810 if (readl(gp
->regs
+ GREG_BIFCFG
) & GREG_BIFCFG_M66EN
)
1811 writel(((5 & RXDMA_BLANK_IPKTS
) |
1812 ((8 << 12) & RXDMA_BLANK_ITIME
)),
1813 gp
->regs
+ RXDMA_BLANK
);
1815 writel(((5 & RXDMA_BLANK_IPKTS
) |
1816 ((4 << 12) & RXDMA_BLANK_ITIME
)),
1817 gp
->regs
+ RXDMA_BLANK
);
1820 /* Must be invoked under gp->lock and gp->tx_lock. */
1821 static u32
gem_setup_multicast(struct gem
*gp
)
1826 if ((gp
->dev
->flags
& IFF_ALLMULTI
) ||
1827 (gp
->dev
->mc_count
> 256)) {
1828 for (i
=0; i
<16; i
++)
1829 writel(0xffff, gp
->regs
+ MAC_HASH0
+ (i
<< 2));
1830 rxcfg
|= MAC_RXCFG_HFE
;
1831 } else if (gp
->dev
->flags
& IFF_PROMISC
) {
1832 rxcfg
|= MAC_RXCFG_PROM
;
1836 struct dev_mc_list
*dmi
= gp
->dev
->mc_list
;
1839 for (i
= 0; i
< 16; i
++)
1842 for (i
= 0; i
< gp
->dev
->mc_count
; i
++) {
1843 char *addrs
= dmi
->dmi_addr
;
1850 crc
= ether_crc_le(6, addrs
);
1852 hash_table
[crc
>> 4] |= 1 << (15 - (crc
& 0xf));
1854 for (i
=0; i
<16; i
++)
1855 writel(hash_table
[i
], gp
->regs
+ MAC_HASH0
+ (i
<< 2));
1856 rxcfg
|= MAC_RXCFG_HFE
;
1862 /* Must be invoked under gp->lock and gp->tx_lock. */
1863 static void gem_init_mac(struct gem
*gp
)
1865 unsigned char *e
= &gp
->dev
->dev_addr
[0];
1867 writel(0x1bf0, gp
->regs
+ MAC_SNDPAUSE
);
1869 writel(0x00, gp
->regs
+ MAC_IPG0
);
1870 writel(0x08, gp
->regs
+ MAC_IPG1
);
1871 writel(0x04, gp
->regs
+ MAC_IPG2
);
1872 writel(0x40, gp
->regs
+ MAC_STIME
);
1873 writel(0x40, gp
->regs
+ MAC_MINFSZ
);
1875 /* Ethernet payload + header + FCS + optional VLAN tag. */
1876 writel(0x20000000 | (gp
->rx_buf_sz
+ 4), gp
->regs
+ MAC_MAXFSZ
);
1878 writel(0x07, gp
->regs
+ MAC_PASIZE
);
1879 writel(0x04, gp
->regs
+ MAC_JAMSIZE
);
1880 writel(0x10, gp
->regs
+ MAC_ATTLIM
);
1881 writel(0x8808, gp
->regs
+ MAC_MCTYPE
);
1883 writel((e
[5] | (e
[4] << 8)) & 0x3ff, gp
->regs
+ MAC_RANDSEED
);
1885 writel((e
[4] << 8) | e
[5], gp
->regs
+ MAC_ADDR0
);
1886 writel((e
[2] << 8) | e
[3], gp
->regs
+ MAC_ADDR1
);
1887 writel((e
[0] << 8) | e
[1], gp
->regs
+ MAC_ADDR2
);
1889 writel(0, gp
->regs
+ MAC_ADDR3
);
1890 writel(0, gp
->regs
+ MAC_ADDR4
);
1891 writel(0, gp
->regs
+ MAC_ADDR5
);
1893 writel(0x0001, gp
->regs
+ MAC_ADDR6
);
1894 writel(0xc200, gp
->regs
+ MAC_ADDR7
);
1895 writel(0x0180, gp
->regs
+ MAC_ADDR8
);
1897 writel(0, gp
->regs
+ MAC_AFILT0
);
1898 writel(0, gp
->regs
+ MAC_AFILT1
);
1899 writel(0, gp
->regs
+ MAC_AFILT2
);
1900 writel(0, gp
->regs
+ MAC_AF21MSK
);
1901 writel(0, gp
->regs
+ MAC_AF0MSK
);
1903 gp
->mac_rx_cfg
= gem_setup_multicast(gp
);
1905 gp
->mac_rx_cfg
|= MAC_RXCFG_SFCS
;
1907 writel(0, gp
->regs
+ MAC_NCOLL
);
1908 writel(0, gp
->regs
+ MAC_FASUCC
);
1909 writel(0, gp
->regs
+ MAC_ECOLL
);
1910 writel(0, gp
->regs
+ MAC_LCOLL
);
1911 writel(0, gp
->regs
+ MAC_DTIMER
);
1912 writel(0, gp
->regs
+ MAC_PATMPS
);
1913 writel(0, gp
->regs
+ MAC_RFCTR
);
1914 writel(0, gp
->regs
+ MAC_LERR
);
1915 writel(0, gp
->regs
+ MAC_AERR
);
1916 writel(0, gp
->regs
+ MAC_FCSERR
);
1917 writel(0, gp
->regs
+ MAC_RXCVERR
);
1919 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1920 * them once a link is established.
1922 writel(0, gp
->regs
+ MAC_TXCFG
);
1923 writel(gp
->mac_rx_cfg
, gp
->regs
+ MAC_RXCFG
);
1924 writel(0, gp
->regs
+ MAC_MCCFG
);
1925 writel(0, gp
->regs
+ MAC_XIFCFG
);
1927 /* Setup MAC interrupts. We want to get all of the interesting
1928 * counter expiration events, but we do not want to hear about
1929 * normal rx/tx as the DMA engine tells us that.
1931 writel(MAC_TXSTAT_XMIT
, gp
->regs
+ MAC_TXMASK
);
1932 writel(MAC_RXSTAT_RCV
, gp
->regs
+ MAC_RXMASK
);
1934 /* Don't enable even the PAUSE interrupts for now, we
1935 * make no use of those events other than to record them.
1937 writel(0xffffffff, gp
->regs
+ MAC_MCMASK
);
1939 /* Don't enable GEM's WOL in normal operations
1942 writel(0, gp
->regs
+ WOL_WAKECSR
);
1945 /* Must be invoked under gp->lock and gp->tx_lock. */
1946 static void gem_init_pause_thresholds(struct gem
*gp
)
1950 /* Calculate pause thresholds. Setting the OFF threshold to the
1951 * full RX fifo size effectively disables PAUSE generation which
1952 * is what we do for 10/100 only GEMs which have FIFOs too small
1953 * to make real gains from PAUSE.
1955 if (gp
->rx_fifo_sz
<= (2 * 1024)) {
1956 gp
->rx_pause_off
= gp
->rx_pause_on
= gp
->rx_fifo_sz
;
1958 int max_frame
= (gp
->rx_buf_sz
+ 4 + 64) & ~63;
1959 int off
= (gp
->rx_fifo_sz
- (max_frame
* 2));
1960 int on
= off
- max_frame
;
1962 gp
->rx_pause_off
= off
;
1963 gp
->rx_pause_on
= on
;
1967 /* Configure the chip "burst" DMA mode & enable some
1968 * HW bug fixes on Apple version
1971 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_APPLE
)
1972 cfg
|= GREG_CFG_RONPAULBIT
| GREG_CFG_ENBUG2FIX
;
1973 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1974 cfg
|= GREG_CFG_IBURST
;
1976 cfg
|= ((31 << 1) & GREG_CFG_TXDMALIM
);
1977 cfg
|= ((31 << 6) & GREG_CFG_RXDMALIM
);
1978 writel(cfg
, gp
->regs
+ GREG_CFG
);
1980 /* If Infinite Burst didn't stick, then use different
1981 * thresholds (and Apple bug fixes don't exist)
1983 if (!(readl(gp
->regs
+ GREG_CFG
) & GREG_CFG_IBURST
)) {
1984 cfg
= ((2 << 1) & GREG_CFG_TXDMALIM
);
1985 cfg
|= ((8 << 6) & GREG_CFG_RXDMALIM
);
1986 writel(cfg
, gp
->regs
+ GREG_CFG
);
1990 static int gem_check_invariants(struct gem
*gp
)
1992 struct pci_dev
*pdev
= gp
->pdev
;
1995 /* On Apple's sungem, we can't rely on registers as the chip
1996 * was been powered down by the firmware. The PHY is looked
1999 if (pdev
->vendor
== PCI_VENDOR_ID_APPLE
) {
2000 gp
->phy_type
= phy_mii_mdio0
;
2001 gp
->tx_fifo_sz
= readl(gp
->regs
+ TXDMA_FSZ
) * 64;
2002 gp
->rx_fifo_sz
= readl(gp
->regs
+ RXDMA_FSZ
) * 64;
2005 mif_cfg
= readl(gp
->regs
+ MIF_CFG
);
2006 mif_cfg
&= ~(MIF_CFG_PSELECT
|MIF_CFG_POLL
|MIF_CFG_BBMODE
|MIF_CFG_MDI1
);
2007 mif_cfg
|= MIF_CFG_MDI0
;
2008 writel(mif_cfg
, gp
->regs
+ MIF_CFG
);
2009 writel(PCS_DMODE_MGM
, gp
->regs
+ PCS_DMODE
);
2010 writel(MAC_XIFCFG_OE
, gp
->regs
+ MAC_XIFCFG
);
2012 /* We hard-code the PHY address so we can properly bring it out of
2013 * reset later on, we can't really probe it at this point, though
2014 * that isn't an issue.
2016 if (gp
->pdev
->device
== PCI_DEVICE_ID_APPLE_K2_GMAC
)
2017 gp
->mii_phy_addr
= 1;
2019 gp
->mii_phy_addr
= 0;
2024 mif_cfg
= readl(gp
->regs
+ MIF_CFG
);
2026 if (pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
2027 pdev
->device
== PCI_DEVICE_ID_SUN_RIO_GEM
) {
2028 /* One of the MII PHYs _must_ be present
2029 * as this chip has no gigabit PHY.
2031 if ((mif_cfg
& (MIF_CFG_MDI0
| MIF_CFG_MDI1
)) == 0) {
2032 printk(KERN_ERR PFX
"RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2038 /* Determine initial PHY interface type guess. MDIO1 is the
2039 * external PHY and thus takes precedence over MDIO0.
2042 if (mif_cfg
& MIF_CFG_MDI1
) {
2043 gp
->phy_type
= phy_mii_mdio1
;
2044 mif_cfg
|= MIF_CFG_PSELECT
;
2045 writel(mif_cfg
, gp
->regs
+ MIF_CFG
);
2046 } else if (mif_cfg
& MIF_CFG_MDI0
) {
2047 gp
->phy_type
= phy_mii_mdio0
;
2048 mif_cfg
&= ~MIF_CFG_PSELECT
;
2049 writel(mif_cfg
, gp
->regs
+ MIF_CFG
);
2051 gp
->phy_type
= phy_serialink
;
2053 if (gp
->phy_type
== phy_mii_mdio1
||
2054 gp
->phy_type
== phy_mii_mdio0
) {
2057 for (i
= 0; i
< 32; i
++) {
2058 gp
->mii_phy_addr
= i
;
2059 if (phy_read(gp
, MII_BMCR
) != 0xffff)
2063 if (pdev
->device
!= PCI_DEVICE_ID_SUN_GEM
) {
2064 printk(KERN_ERR PFX
"RIO MII phy will not respond.\n");
2067 gp
->phy_type
= phy_serdes
;
2071 /* Fetch the FIFO configurations now too. */
2072 gp
->tx_fifo_sz
= readl(gp
->regs
+ TXDMA_FSZ
) * 64;
2073 gp
->rx_fifo_sz
= readl(gp
->regs
+ RXDMA_FSZ
) * 64;
2075 if (pdev
->vendor
== PCI_VENDOR_ID_SUN
) {
2076 if (pdev
->device
== PCI_DEVICE_ID_SUN_GEM
) {
2077 if (gp
->tx_fifo_sz
!= (9 * 1024) ||
2078 gp
->rx_fifo_sz
!= (20 * 1024)) {
2079 printk(KERN_ERR PFX
"GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2080 gp
->tx_fifo_sz
, gp
->rx_fifo_sz
);
2085 if (gp
->tx_fifo_sz
!= (2 * 1024) ||
2086 gp
->rx_fifo_sz
!= (2 * 1024)) {
2087 printk(KERN_ERR PFX
"RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2088 gp
->tx_fifo_sz
, gp
->rx_fifo_sz
);
2091 gp
->swrst_base
= (64 / 4) << GREG_SWRST_CACHE_SHIFT
;
2098 /* Must be invoked under gp->lock and gp->tx_lock. */
2099 static void gem_reinit_chip(struct gem
*gp
)
2101 /* Reset the chip */
2104 /* Make sure ints are disabled */
2105 gem_disable_ints(gp
);
2107 /* Allocate & setup ring buffers */
2110 /* Configure pause thresholds */
2111 gem_init_pause_thresholds(gp
);
2113 /* Init DMA & MAC engines */
2119 /* Must be invoked with no lock held. */
2120 static void gem_stop_phy(struct gem
*gp
, int wol
)
2123 unsigned long flags
;
2125 /* Let the chip settle down a bit, it seems that helps
2126 * for sleep mode on some models
2130 /* Make sure we aren't polling PHY status change. We
2131 * don't currently use that feature though
2133 mifcfg
= readl(gp
->regs
+ MIF_CFG
);
2134 mifcfg
&= ~MIF_CFG_POLL
;
2135 writel(mifcfg
, gp
->regs
+ MIF_CFG
);
2137 if (wol
&& gp
->has_wol
) {
2138 unsigned char *e
= &gp
->dev
->dev_addr
[0];
2141 /* Setup wake-on-lan for MAGIC packet */
2142 writel(MAC_RXCFG_HFE
| MAC_RXCFG_SFCS
| MAC_RXCFG_ENAB
,
2143 gp
->regs
+ MAC_RXCFG
);
2144 writel((e
[4] << 8) | e
[5], gp
->regs
+ WOL_MATCH0
);
2145 writel((e
[2] << 8) | e
[3], gp
->regs
+ WOL_MATCH1
);
2146 writel((e
[0] << 8) | e
[1], gp
->regs
+ WOL_MATCH2
);
2148 writel(WOL_MCOUNT_N
| WOL_MCOUNT_M
, gp
->regs
+ WOL_MCOUNT
);
2149 csr
= WOL_WAKECSR_ENABLE
;
2150 if ((readl(gp
->regs
+ MAC_XIFCFG
) & MAC_XIFCFG_GMII
) == 0)
2151 csr
|= WOL_WAKECSR_MII
;
2152 writel(csr
, gp
->regs
+ WOL_WAKECSR
);
2154 writel(0, gp
->regs
+ MAC_RXCFG
);
2155 (void)readl(gp
->regs
+ MAC_RXCFG
);
2156 /* Machine sleep will die in strange ways if we
2157 * dont wait a bit here, looks like the chip takes
2158 * some time to really shut down
2163 writel(0, gp
->regs
+ MAC_TXCFG
);
2164 writel(0, gp
->regs
+ MAC_XIFCFG
);
2165 writel(0, gp
->regs
+ TXDMA_CFG
);
2166 writel(0, gp
->regs
+ RXDMA_CFG
);
2169 spin_lock_irqsave(&gp
->lock
, flags
);
2170 spin_lock(&gp
->tx_lock
);
2172 writel(MAC_TXRST_CMD
, gp
->regs
+ MAC_TXRST
);
2173 writel(MAC_RXRST_CMD
, gp
->regs
+ MAC_RXRST
);
2174 spin_unlock(&gp
->tx_lock
);
2175 spin_unlock_irqrestore(&gp
->lock
, flags
);
2177 /* No need to take the lock here */
2179 if (found_mii_phy(gp
) && gp
->phy_mii
.def
->ops
->suspend
)
2180 gp
->phy_mii
.def
->ops
->suspend(&gp
->phy_mii
);
2182 /* According to Apple, we must set the MDIO pins to this begnign
2183 * state or we may 1) eat more current, 2) damage some PHYs
2185 writel(mifcfg
| MIF_CFG_BBMODE
, gp
->regs
+ MIF_CFG
);
2186 writel(0, gp
->regs
+ MIF_BBCLK
);
2187 writel(0, gp
->regs
+ MIF_BBDATA
);
2188 writel(0, gp
->regs
+ MIF_BBOENAB
);
2189 writel(MAC_XIFCFG_GMII
| MAC_XIFCFG_LBCK
, gp
->regs
+ MAC_XIFCFG
);
2190 (void) readl(gp
->regs
+ MAC_XIFCFG
);
2195 static int gem_do_start(struct net_device
*dev
)
2197 struct gem
*gp
= dev
->priv
;
2198 unsigned long flags
;
2200 spin_lock_irqsave(&gp
->lock
, flags
);
2201 spin_lock(&gp
->tx_lock
);
2203 /* Enable the cell */
2206 /* Init & setup chip hardware */
2207 gem_reinit_chip(gp
);
2211 if (gp
->lstate
== link_up
) {
2212 netif_carrier_on(gp
->dev
);
2213 gem_set_link_modes(gp
);
2216 netif_wake_queue(gp
->dev
);
2218 spin_unlock(&gp
->tx_lock
);
2219 spin_unlock_irqrestore(&gp
->lock
, flags
);
2221 if (request_irq(gp
->pdev
->irq
, gem_interrupt
,
2222 IRQF_SHARED
, dev
->name
, (void *)dev
)) {
2223 printk(KERN_ERR
"%s: failed to request irq !\n", gp
->dev
->name
);
2225 spin_lock_irqsave(&gp
->lock
, flags
);
2226 spin_lock(&gp
->tx_lock
);
2230 gem_clean_rings(gp
);
2233 spin_unlock(&gp
->tx_lock
);
2234 spin_unlock_irqrestore(&gp
->lock
, flags
);
2242 static void gem_do_stop(struct net_device
*dev
, int wol
)
2244 struct gem
*gp
= dev
->priv
;
2245 unsigned long flags
;
2247 spin_lock_irqsave(&gp
->lock
, flags
);
2248 spin_lock(&gp
->tx_lock
);
2252 /* Stop netif queue */
2253 netif_stop_queue(dev
);
2255 /* Make sure ints are disabled */
2256 gem_disable_ints(gp
);
2258 /* We can drop the lock now */
2259 spin_unlock(&gp
->tx_lock
);
2260 spin_unlock_irqrestore(&gp
->lock
, flags
);
2262 /* If we are going to sleep with WOL */
2269 /* Get rid of rings */
2270 gem_clean_rings(gp
);
2272 /* No irq needed anymore */
2273 free_irq(gp
->pdev
->irq
, (void *) dev
);
2275 /* Cell not needed neither if no WOL */
2277 spin_lock_irqsave(&gp
->lock
, flags
);
2279 spin_unlock_irqrestore(&gp
->lock
, flags
);
2283 static void gem_reset_task(struct work_struct
*work
)
2285 struct gem
*gp
= container_of(work
, struct gem
, reset_task
);
2287 mutex_lock(&gp
->pm_mutex
);
2289 netif_poll_disable(gp
->dev
);
2291 spin_lock_irq(&gp
->lock
);
2292 spin_lock(&gp
->tx_lock
);
2294 if (gp
->running
== 0)
2298 netif_stop_queue(gp
->dev
);
2300 /* Reset the chip & rings */
2301 gem_reinit_chip(gp
);
2302 if (gp
->lstate
== link_up
)
2303 gem_set_link_modes(gp
);
2304 netif_wake_queue(gp
->dev
);
2307 gp
->reset_task_pending
= 0;
2309 spin_unlock(&gp
->tx_lock
);
2310 spin_unlock_irq(&gp
->lock
);
2312 netif_poll_enable(gp
->dev
);
2314 mutex_unlock(&gp
->pm_mutex
);
2318 static int gem_open(struct net_device
*dev
)
2320 struct gem
*gp
= dev
->priv
;
2323 mutex_lock(&gp
->pm_mutex
);
2325 /* We need the cell enabled */
2327 rc
= gem_do_start(dev
);
2328 gp
->opened
= (rc
== 0);
2330 mutex_unlock(&gp
->pm_mutex
);
2335 static int gem_close(struct net_device
*dev
)
2337 struct gem
*gp
= dev
->priv
;
2339 /* Note: we don't need to call netif_poll_disable() here because
2340 * our caller (dev_close) already did it for us
2343 mutex_lock(&gp
->pm_mutex
);
2347 gem_do_stop(dev
, 0);
2349 mutex_unlock(&gp
->pm_mutex
);
2355 static int gem_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2357 struct net_device
*dev
= pci_get_drvdata(pdev
);
2358 struct gem
*gp
= dev
->priv
;
2359 unsigned long flags
;
2361 mutex_lock(&gp
->pm_mutex
);
2363 netif_poll_disable(dev
);
2365 printk(KERN_INFO
"%s: suspending, WakeOnLan %s\n",
2367 (gp
->wake_on_lan
&& gp
->opened
) ? "enabled" : "disabled");
2369 /* Keep the cell enabled during the entire operation */
2370 spin_lock_irqsave(&gp
->lock
, flags
);
2371 spin_lock(&gp
->tx_lock
);
2373 spin_unlock(&gp
->tx_lock
);
2374 spin_unlock_irqrestore(&gp
->lock
, flags
);
2376 /* If the driver is opened, we stop the MAC */
2378 /* Stop traffic, mark us closed */
2379 netif_device_detach(dev
);
2381 /* Switch off MAC, remember WOL setting */
2382 gp
->asleep_wol
= gp
->wake_on_lan
;
2383 gem_do_stop(dev
, gp
->asleep_wol
);
2387 /* Mark us asleep */
2391 /* Stop the link timer */
2392 del_timer_sync(&gp
->link_timer
);
2394 /* Now we release the mutex to not block the reset task who
2395 * can take it too. We are marked asleep, so there will be no
2398 mutex_unlock(&gp
->pm_mutex
);
2400 /* Wait for a pending reset task to complete */
2401 while (gp
->reset_task_pending
)
2403 flush_scheduled_work();
2405 /* Shut the PHY down eventually and setup WOL */
2406 gem_stop_phy(gp
, gp
->asleep_wol
);
2408 /* Make sure bus master is disabled */
2409 pci_disable_device(gp
->pdev
);
2411 /* Release the cell, no need to take a lock at this point since
2412 * nothing else can happen now
2419 static int gem_resume(struct pci_dev
*pdev
)
2421 struct net_device
*dev
= pci_get_drvdata(pdev
);
2422 struct gem
*gp
= dev
->priv
;
2423 unsigned long flags
;
2425 printk(KERN_INFO
"%s: resuming\n", dev
->name
);
2427 mutex_lock(&gp
->pm_mutex
);
2429 /* Keep the cell enabled during the entire operation, no need to
2430 * take a lock here tho since nothing else can happen while we are
2435 /* Make sure PCI access and bus master are enabled */
2436 if (pci_enable_device(gp
->pdev
)) {
2437 printk(KERN_ERR
"%s: Can't re-enable chip !\n",
2439 /* Put cell and forget it for now, it will be considered as
2440 * still asleep, a new sleep cycle may bring it back
2443 mutex_unlock(&gp
->pm_mutex
);
2446 pci_set_master(gp
->pdev
);
2448 /* Reset everything */
2451 /* Mark us woken up */
2455 /* Bring the PHY back. Again, lock is useless at this point as
2456 * nothing can be happening until we restart the whole thing
2460 /* If we were opened, bring everything back */
2465 /* Re-attach net device */
2466 netif_device_attach(dev
);
2470 spin_lock_irqsave(&gp
->lock
, flags
);
2471 spin_lock(&gp
->tx_lock
);
2473 /* If we had WOL enabled, the cell clock was never turned off during
2474 * sleep, so we end up beeing unbalanced. Fix that here
2479 /* This function doesn't need to hold the cell, it will be held if the
2480 * driver is open by gem_do_start().
2484 spin_unlock(&gp
->tx_lock
);
2485 spin_unlock_irqrestore(&gp
->lock
, flags
);
2487 netif_poll_enable(dev
);
2489 mutex_unlock(&gp
->pm_mutex
);
2493 #endif /* CONFIG_PM */
2495 static struct net_device_stats
*gem_get_stats(struct net_device
*dev
)
2497 struct gem
*gp
= dev
->priv
;
2498 struct net_device_stats
*stats
= &gp
->net_stats
;
2500 spin_lock_irq(&gp
->lock
);
2501 spin_lock(&gp
->tx_lock
);
2503 /* I have seen this being called while the PM was in progress,
2504 * so we shield against this
2507 stats
->rx_crc_errors
+= readl(gp
->regs
+ MAC_FCSERR
);
2508 writel(0, gp
->regs
+ MAC_FCSERR
);
2510 stats
->rx_frame_errors
+= readl(gp
->regs
+ MAC_AERR
);
2511 writel(0, gp
->regs
+ MAC_AERR
);
2513 stats
->rx_length_errors
+= readl(gp
->regs
+ MAC_LERR
);
2514 writel(0, gp
->regs
+ MAC_LERR
);
2516 stats
->tx_aborted_errors
+= readl(gp
->regs
+ MAC_ECOLL
);
2517 stats
->collisions
+=
2518 (readl(gp
->regs
+ MAC_ECOLL
) +
2519 readl(gp
->regs
+ MAC_LCOLL
));
2520 writel(0, gp
->regs
+ MAC_ECOLL
);
2521 writel(0, gp
->regs
+ MAC_LCOLL
);
2524 spin_unlock(&gp
->tx_lock
);
2525 spin_unlock_irq(&gp
->lock
);
2527 return &gp
->net_stats
;
2530 static int gem_set_mac_address(struct net_device
*dev
, void *addr
)
2532 struct sockaddr
*macaddr
= (struct sockaddr
*) addr
;
2533 struct gem
*gp
= dev
->priv
;
2534 unsigned char *e
= &dev
->dev_addr
[0];
2536 if (!is_valid_ether_addr(macaddr
->sa_data
))
2537 return -EADDRNOTAVAIL
;
2539 if (!netif_running(dev
) || !netif_device_present(dev
)) {
2540 /* We'll just catch it later when the
2541 * device is up'd or resumed.
2543 memcpy(dev
->dev_addr
, macaddr
->sa_data
, dev
->addr_len
);
2547 mutex_lock(&gp
->pm_mutex
);
2548 memcpy(dev
->dev_addr
, macaddr
->sa_data
, dev
->addr_len
);
2550 writel((e
[4] << 8) | e
[5], gp
->regs
+ MAC_ADDR0
);
2551 writel((e
[2] << 8) | e
[3], gp
->regs
+ MAC_ADDR1
);
2552 writel((e
[0] << 8) | e
[1], gp
->regs
+ MAC_ADDR2
);
2554 mutex_unlock(&gp
->pm_mutex
);
2559 static void gem_set_multicast(struct net_device
*dev
)
2561 struct gem
*gp
= dev
->priv
;
2562 u32 rxcfg
, rxcfg_new
;
2566 spin_lock_irq(&gp
->lock
);
2567 spin_lock(&gp
->tx_lock
);
2572 netif_stop_queue(dev
);
2574 rxcfg
= readl(gp
->regs
+ MAC_RXCFG
);
2575 rxcfg_new
= gem_setup_multicast(gp
);
2577 rxcfg_new
|= MAC_RXCFG_SFCS
;
2579 gp
->mac_rx_cfg
= rxcfg_new
;
2581 writel(rxcfg
& ~MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
2582 while (readl(gp
->regs
+ MAC_RXCFG
) & MAC_RXCFG_ENAB
) {
2588 rxcfg
&= ~(MAC_RXCFG_PROM
| MAC_RXCFG_HFE
);
2591 writel(rxcfg
, gp
->regs
+ MAC_RXCFG
);
2593 netif_wake_queue(dev
);
2596 spin_unlock(&gp
->tx_lock
);
2597 spin_unlock_irq(&gp
->lock
);
2600 /* Jumbo-grams don't seem to work :-( */
2601 #define GEM_MIN_MTU 68
2603 #define GEM_MAX_MTU 1500
2605 #define GEM_MAX_MTU 9000
2608 static int gem_change_mtu(struct net_device
*dev
, int new_mtu
)
2610 struct gem
*gp
= dev
->priv
;
2612 if (new_mtu
< GEM_MIN_MTU
|| new_mtu
> GEM_MAX_MTU
)
2615 if (!netif_running(dev
) || !netif_device_present(dev
)) {
2616 /* We'll just catch it later when the
2617 * device is up'd or resumed.
2623 mutex_lock(&gp
->pm_mutex
);
2624 spin_lock_irq(&gp
->lock
);
2625 spin_lock(&gp
->tx_lock
);
2628 gem_reinit_chip(gp
);
2629 if (gp
->lstate
== link_up
)
2630 gem_set_link_modes(gp
);
2632 spin_unlock(&gp
->tx_lock
);
2633 spin_unlock_irq(&gp
->lock
);
2634 mutex_unlock(&gp
->pm_mutex
);
2639 static void gem_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2641 struct gem
*gp
= dev
->priv
;
2643 strcpy(info
->driver
, DRV_NAME
);
2644 strcpy(info
->version
, DRV_VERSION
);
2645 strcpy(info
->bus_info
, pci_name(gp
->pdev
));
2648 static int gem_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2650 struct gem
*gp
= dev
->priv
;
2652 if (gp
->phy_type
== phy_mii_mdio0
||
2653 gp
->phy_type
== phy_mii_mdio1
) {
2654 if (gp
->phy_mii
.def
)
2655 cmd
->supported
= gp
->phy_mii
.def
->features
;
2657 cmd
->supported
= (SUPPORTED_10baseT_Half
|
2658 SUPPORTED_10baseT_Full
);
2660 /* XXX hardcoded stuff for now */
2661 cmd
->port
= PORT_MII
;
2662 cmd
->transceiver
= XCVR_EXTERNAL
;
2663 cmd
->phy_address
= 0; /* XXX fixed PHYAD */
2665 /* Return current PHY settings */
2666 spin_lock_irq(&gp
->lock
);
2667 cmd
->autoneg
= gp
->want_autoneg
;
2668 cmd
->speed
= gp
->phy_mii
.speed
;
2669 cmd
->duplex
= gp
->phy_mii
.duplex
;
2670 cmd
->advertising
= gp
->phy_mii
.advertising
;
2672 /* If we started with a forced mode, we don't have a default
2673 * advertise set, we need to return something sensible so
2674 * userland can re-enable autoneg properly.
2676 if (cmd
->advertising
== 0)
2677 cmd
->advertising
= cmd
->supported
;
2678 spin_unlock_irq(&gp
->lock
);
2679 } else { // XXX PCS ?
2681 (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
2682 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
2684 cmd
->advertising
= cmd
->supported
;
2686 cmd
->duplex
= cmd
->port
= cmd
->phy_address
=
2687 cmd
->transceiver
= cmd
->autoneg
= 0;
2689 cmd
->maxtxpkt
= cmd
->maxrxpkt
= 0;
2694 static int gem_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2696 struct gem
*gp
= dev
->priv
;
2698 /* Verify the settings we care about. */
2699 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
2700 cmd
->autoneg
!= AUTONEG_DISABLE
)
2703 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
2704 cmd
->advertising
== 0)
2707 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
2708 ((cmd
->speed
!= SPEED_1000
&&
2709 cmd
->speed
!= SPEED_100
&&
2710 cmd
->speed
!= SPEED_10
) ||
2711 (cmd
->duplex
!= DUPLEX_HALF
&&
2712 cmd
->duplex
!= DUPLEX_FULL
)))
2715 /* Apply settings and restart link process. */
2716 spin_lock_irq(&gp
->lock
);
2718 gem_begin_auto_negotiation(gp
, cmd
);
2720 spin_unlock_irq(&gp
->lock
);
2725 static int gem_nway_reset(struct net_device
*dev
)
2727 struct gem
*gp
= dev
->priv
;
2729 if (!gp
->want_autoneg
)
2732 /* Restart link process. */
2733 spin_lock_irq(&gp
->lock
);
2735 gem_begin_auto_negotiation(gp
, NULL
);
2737 spin_unlock_irq(&gp
->lock
);
2742 static u32
gem_get_msglevel(struct net_device
*dev
)
2744 struct gem
*gp
= dev
->priv
;
2745 return gp
->msg_enable
;
2748 static void gem_set_msglevel(struct net_device
*dev
, u32 value
)
2750 struct gem
*gp
= dev
->priv
;
2751 gp
->msg_enable
= value
;
2755 /* Add more when I understand how to program the chip */
2756 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2758 #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2760 static void gem_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2762 struct gem
*gp
= dev
->priv
;
2764 /* Add more when I understand how to program the chip */
2766 wol
->supported
= WOL_SUPPORTED_MASK
;
2767 wol
->wolopts
= gp
->wake_on_lan
;
2774 static int gem_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2776 struct gem
*gp
= dev
->priv
;
2780 gp
->wake_on_lan
= wol
->wolopts
& WOL_SUPPORTED_MASK
;
2784 static const struct ethtool_ops gem_ethtool_ops
= {
2785 .get_drvinfo
= gem_get_drvinfo
,
2786 .get_link
= ethtool_op_get_link
,
2787 .get_settings
= gem_get_settings
,
2788 .set_settings
= gem_set_settings
,
2789 .nway_reset
= gem_nway_reset
,
2790 .get_msglevel
= gem_get_msglevel
,
2791 .set_msglevel
= gem_set_msglevel
,
2792 .get_wol
= gem_get_wol
,
2793 .set_wol
= gem_set_wol
,
2796 static int gem_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2798 struct gem
*gp
= dev
->priv
;
2799 struct mii_ioctl_data
*data
= if_mii(ifr
);
2800 int rc
= -EOPNOTSUPP
;
2801 unsigned long flags
;
2803 /* Hold the PM mutex while doing ioctl's or we may collide
2804 * with power management.
2806 mutex_lock(&gp
->pm_mutex
);
2808 spin_lock_irqsave(&gp
->lock
, flags
);
2810 spin_unlock_irqrestore(&gp
->lock
, flags
);
2813 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
2814 data
->phy_id
= gp
->mii_phy_addr
;
2815 /* Fallthrough... */
2817 case SIOCGMIIREG
: /* Read MII PHY register. */
2821 data
->val_out
= __phy_read(gp
, data
->phy_id
& 0x1f,
2822 data
->reg_num
& 0x1f);
2827 case SIOCSMIIREG
: /* Write MII PHY register. */
2828 if (!capable(CAP_NET_ADMIN
))
2830 else if (!gp
->running
)
2833 __phy_write(gp
, data
->phy_id
& 0x1f, data
->reg_num
& 0x1f,
2840 spin_lock_irqsave(&gp
->lock
, flags
);
2842 spin_unlock_irqrestore(&gp
->lock
, flags
);
2844 mutex_unlock(&gp
->pm_mutex
);
2849 #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC))
2850 /* Fetch MAC address from vital product data of PCI ROM. */
2851 static int find_eth_addr_in_vpd(void __iomem
*rom_base
, int len
, unsigned char *dev_addr
)
2855 for (this_offset
= 0x20; this_offset
< len
; this_offset
++) {
2856 void __iomem
*p
= rom_base
+ this_offset
;
2859 if (readb(p
+ 0) != 0x90 ||
2860 readb(p
+ 1) != 0x00 ||
2861 readb(p
+ 2) != 0x09 ||
2862 readb(p
+ 3) != 0x4e ||
2863 readb(p
+ 4) != 0x41 ||
2864 readb(p
+ 5) != 0x06)
2870 for (i
= 0; i
< 6; i
++)
2871 dev_addr
[i
] = readb(p
+ i
);
2877 static void get_gem_mac_nonobp(struct pci_dev
*pdev
, unsigned char *dev_addr
)
2880 void __iomem
*p
= pci_map_rom(pdev
, &size
);
2885 found
= readb(p
) == 0x55 &&
2886 readb(p
+ 1) == 0xaa &&
2887 find_eth_addr_in_vpd(p
, (64 * 1024), dev_addr
);
2888 pci_unmap_rom(pdev
, p
);
2893 /* Sun MAC prefix then 3 random bytes. */
2897 get_random_bytes(dev_addr
+ 3, 3);
2900 #endif /* not Sparc and not PPC */
2902 static int __devinit
gem_get_device_address(struct gem
*gp
)
2904 #if defined(__sparc__) || defined(CONFIG_PPC_PMAC)
2905 struct net_device
*dev
= gp
->dev
;
2908 #if defined(__sparc__)
2909 struct pci_dev
*pdev
= gp
->pdev
;
2910 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
2914 unsigned char *addr
;
2917 addr
= of_get_property(pcp
->prom_node
, "local-mac-address",
2919 if (addr
&& len
== 6) {
2921 memcpy(dev
->dev_addr
, addr
, 6);
2925 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
2926 #elif defined(CONFIG_PPC_PMAC)
2927 const unsigned char *addr
;
2929 addr
= get_property(gp
->of_node
, "local-mac-address", NULL
);
2932 printk(KERN_ERR
"%s: can't get mac-address\n", dev
->name
);
2935 memcpy(dev
->dev_addr
, addr
, 6);
2937 get_gem_mac_nonobp(gp
->pdev
, gp
->dev
->dev_addr
);
2942 static void gem_remove_one(struct pci_dev
*pdev
)
2944 struct net_device
*dev
= pci_get_drvdata(pdev
);
2947 struct gem
*gp
= dev
->priv
;
2949 unregister_netdev(dev
);
2951 /* Stop the link timer */
2952 del_timer_sync(&gp
->link_timer
);
2954 /* We shouldn't need any locking here */
2957 /* Wait for a pending reset task to complete */
2958 while (gp
->reset_task_pending
)
2960 flush_scheduled_work();
2962 /* Shut the PHY down */
2963 gem_stop_phy(gp
, 0);
2967 /* Make sure bus master is disabled */
2968 pci_disable_device(gp
->pdev
);
2970 /* Free resources */
2971 pci_free_consistent(pdev
,
2972 sizeof(struct gem_init_block
),
2976 pci_release_regions(pdev
);
2979 pci_set_drvdata(pdev
, NULL
);
2983 static int __devinit
gem_init_one(struct pci_dev
*pdev
,
2984 const struct pci_device_id
*ent
)
2986 static int gem_version_printed
= 0;
2987 unsigned long gemreg_base
, gemreg_len
;
2988 struct net_device
*dev
;
2990 int i
, err
, pci_using_dac
;
2992 if (gem_version_printed
++ == 0)
2993 printk(KERN_INFO
"%s", version
);
2995 /* Apple gmac note: during probe, the chip is powered up by
2996 * the arch code to allow the code below to work (and to let
2997 * the chip be probed on the config space. It won't stay powered
2998 * up until the interface is brought up however, so we can't rely
2999 * on register configuration done at this point.
3001 err
= pci_enable_device(pdev
);
3003 printk(KERN_ERR PFX
"Cannot enable MMIO operation, "
3007 pci_set_master(pdev
);
3009 /* Configure DMA attributes. */
3011 /* All of the GEM documentation states that 64-bit DMA addressing
3012 * is fully supported and should work just fine. However the
3013 * front end for RIO based GEMs is different and only supports
3014 * 32-bit addressing.
3016 * For now we assume the various PPC GEMs are 32-bit only as well.
3018 if (pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
3019 pdev
->device
== PCI_DEVICE_ID_SUN_GEM
&&
3020 !pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3023 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3025 printk(KERN_ERR PFX
"No usable DMA configuration, "
3027 goto err_disable_device
;
3032 gemreg_base
= pci_resource_start(pdev
, 0);
3033 gemreg_len
= pci_resource_len(pdev
, 0);
3035 if ((pci_resource_flags(pdev
, 0) & IORESOURCE_IO
) != 0) {
3036 printk(KERN_ERR PFX
"Cannot find proper PCI device "
3037 "base address, aborting.\n");
3039 goto err_disable_device
;
3042 dev
= alloc_etherdev(sizeof(*gp
));
3044 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
3046 goto err_disable_device
;
3048 SET_MODULE_OWNER(dev
);
3049 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3053 err
= pci_request_regions(pdev
, DRV_NAME
);
3055 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
3057 goto err_out_free_netdev
;
3061 dev
->base_addr
= (long) pdev
;
3064 gp
->msg_enable
= DEFAULT_MSG
;
3066 spin_lock_init(&gp
->lock
);
3067 spin_lock_init(&gp
->tx_lock
);
3068 mutex_init(&gp
->pm_mutex
);
3070 init_timer(&gp
->link_timer
);
3071 gp
->link_timer
.function
= gem_link_timer
;
3072 gp
->link_timer
.data
= (unsigned long) gp
;
3074 INIT_WORK(&gp
->reset_task
, gem_reset_task
);
3076 gp
->lstate
= link_down
;
3077 gp
->timer_ticks
= 0;
3078 netif_carrier_off(dev
);
3080 gp
->regs
= ioremap(gemreg_base
, gemreg_len
);
3081 if (gp
->regs
== 0UL) {
3082 printk(KERN_ERR PFX
"Cannot map device registers, "
3085 goto err_out_free_res
;
3088 /* On Apple, we want a reference to the Open Firmware device-tree
3089 * node. We use it for clock control.
3091 #ifdef CONFIG_PPC_PMAC
3092 gp
->of_node
= pci_device_to_OF_node(pdev
);
3095 /* Only Apple version supports WOL afaik */
3096 if (pdev
->vendor
== PCI_VENDOR_ID_APPLE
)
3099 /* Make sure cell is enabled */
3102 /* Make sure everything is stopped and in init state */
3105 /* Fill up the mii_phy structure (even if we won't use it) */
3106 gp
->phy_mii
.dev
= dev
;
3107 gp
->phy_mii
.mdio_read
= _phy_read
;
3108 gp
->phy_mii
.mdio_write
= _phy_write
;
3109 #ifdef CONFIG_PPC_PMAC
3110 gp
->phy_mii
.platform_data
= gp
->of_node
;
3112 /* By default, we start with autoneg */
3113 gp
->want_autoneg
= 1;
3115 /* Check fifo sizes, PHY type, etc... */
3116 if (gem_check_invariants(gp
)) {
3118 goto err_out_iounmap
;
3121 /* It is guaranteed that the returned buffer will be at least
3122 * PAGE_SIZE aligned.
3124 gp
->init_block
= (struct gem_init_block
*)
3125 pci_alloc_consistent(pdev
, sizeof(struct gem_init_block
),
3127 if (!gp
->init_block
) {
3128 printk(KERN_ERR PFX
"Cannot allocate init block, "
3131 goto err_out_iounmap
;
3134 if (gem_get_device_address(gp
))
3135 goto err_out_free_consistent
;
3137 dev
->open
= gem_open
;
3138 dev
->stop
= gem_close
;
3139 dev
->hard_start_xmit
= gem_start_xmit
;
3140 dev
->get_stats
= gem_get_stats
;
3141 dev
->set_multicast_list
= gem_set_multicast
;
3142 dev
->do_ioctl
= gem_ioctl
;
3143 dev
->poll
= gem_poll
;
3145 dev
->ethtool_ops
= &gem_ethtool_ops
;
3146 dev
->tx_timeout
= gem_tx_timeout
;
3147 dev
->watchdog_timeo
= 5 * HZ
;
3148 dev
->change_mtu
= gem_change_mtu
;
3149 dev
->irq
= pdev
->irq
;
3151 dev
->set_mac_address
= gem_set_mac_address
;
3152 #ifdef CONFIG_NET_POLL_CONTROLLER
3153 dev
->poll_controller
= gem_poll_controller
;
3156 /* Set that now, in case PM kicks in now */
3157 pci_set_drvdata(pdev
, dev
);
3159 /* Detect & init PHY, start autoneg, we release the cell now
3160 * too, it will be managed by whoever needs it
3164 spin_lock_irq(&gp
->lock
);
3166 spin_unlock_irq(&gp
->lock
);
3168 /* Register with kernel */
3169 if (register_netdev(dev
)) {
3170 printk(KERN_ERR PFX
"Cannot register net device, "
3173 goto err_out_free_consistent
;
3176 printk(KERN_INFO
"%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
3178 for (i
= 0; i
< 6; i
++)
3179 printk("%2.2x%c", dev
->dev_addr
[i
],
3180 i
== 5 ? ' ' : ':');
3183 if (gp
->phy_type
== phy_mii_mdio0
||
3184 gp
->phy_type
== phy_mii_mdio1
)
3185 printk(KERN_INFO
"%s: Found %s PHY\n", dev
->name
,
3186 gp
->phy_mii
.def
? gp
->phy_mii
.def
->name
: "no");
3188 /* GEM can do it all... */
3189 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_LLTX
;
3191 dev
->features
|= NETIF_F_HIGHDMA
;
3195 err_out_free_consistent
:
3196 gem_remove_one(pdev
);
3202 pci_release_regions(pdev
);
3204 err_out_free_netdev
:
3207 pci_disable_device(pdev
);
3213 static struct pci_driver gem_driver
= {
3214 .name
= GEM_MODULE_NAME
,
3215 .id_table
= gem_pci_tbl
,
3216 .probe
= gem_init_one
,
3217 .remove
= gem_remove_one
,
3219 .suspend
= gem_suspend
,
3220 .resume
= gem_resume
,
3221 #endif /* CONFIG_PM */
3224 static int __init
gem_init(void)
3226 return pci_register_driver(&gem_driver
);
3229 static void __exit
gem_cleanup(void)
3231 pci_unregister_driver(&gem_driver
);
3234 module_init(gem_init
);
3235 module_exit(gem_cleanup
);