1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver.
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/fcntl.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
24 #include <linux/sched.h>
25 #include <linux/string.h>
26 #include <linux/delay.h>
27 #include <linux/errno.h>
28 #include <linux/pci.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/crc32.h>
36 #include <linux/random.h>
37 #include <linux/workqueue.h>
38 #include <linux/if_vlan.h>
39 #include <linux/bitops.h>
41 #include <linux/gfp.h>
44 #include <asm/byteorder.h>
45 #include <linux/uaccess.h>
49 #include <asm/idprom.h>
53 #ifdef CONFIG_PPC_PMAC
55 #include <asm/machdep.h>
56 #include <asm/pmac_feature.h>
59 #include <linux/sungem_phy.h>
64 #define DEFAULT_MSG (NETIF_MSG_DRV | \
68 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
69 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
70 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
71 SUPPORTED_Pause | SUPPORTED_Autoneg)
73 #define DRV_NAME "sungem"
74 #define DRV_VERSION "1.0"
75 #define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
77 static char version
[] =
78 DRV_NAME
".c:v" DRV_VERSION
" " DRV_AUTHOR
"\n";
80 MODULE_AUTHOR(DRV_AUTHOR
);
81 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
82 MODULE_LICENSE("GPL");
84 #define GEM_MODULE_NAME "gem"
86 static const struct pci_device_id gem_pci_tbl
[] = {
87 { PCI_VENDOR_ID_SUN
, PCI_DEVICE_ID_SUN_GEM
,
88 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
90 /* These models only differ from the original GEM in
91 * that their tx/rx fifos are of a different size and
92 * they only support 10/100 speeds. -DaveM
94 * Apple's GMAC does support gigabit on machines with
95 * the BCM54xx PHYs. -BenH
97 { PCI_VENDOR_ID_SUN
, PCI_DEVICE_ID_SUN_RIO_GEM
,
98 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
99 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_GMAC
,
100 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
101 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_GMACP
,
102 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
103 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2
,
104 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
105 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_K2_GMAC
,
106 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
107 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_SH_SUNGEM
,
108 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
109 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_IPID2_GMAC
,
110 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
114 MODULE_DEVICE_TABLE(pci
, gem_pci_tbl
);
116 static u16
__sungem_phy_read(struct gem
*gp
, int phy_addr
, int reg
)
123 cmd
|= (phy_addr
<< 23) & MIF_FRAME_PHYAD
;
124 cmd
|= (reg
<< 18) & MIF_FRAME_REGAD
;
125 cmd
|= (MIF_FRAME_TAMSB
);
126 writel(cmd
, gp
->regs
+ MIF_FRAME
);
129 cmd
= readl(gp
->regs
+ MIF_FRAME
);
130 if (cmd
& MIF_FRAME_TALSB
)
139 return cmd
& MIF_FRAME_DATA
;
142 static inline int _sungem_phy_read(struct net_device
*dev
, int mii_id
, int reg
)
144 struct gem
*gp
= netdev_priv(dev
);
145 return __sungem_phy_read(gp
, mii_id
, reg
);
148 static inline u16
sungem_phy_read(struct gem
*gp
, int reg
)
150 return __sungem_phy_read(gp
, gp
->mii_phy_addr
, reg
);
153 static void __sungem_phy_write(struct gem
*gp
, int phy_addr
, int reg
, u16 val
)
160 cmd
|= (phy_addr
<< 23) & MIF_FRAME_PHYAD
;
161 cmd
|= (reg
<< 18) & MIF_FRAME_REGAD
;
162 cmd
|= (MIF_FRAME_TAMSB
);
163 cmd
|= (val
& MIF_FRAME_DATA
);
164 writel(cmd
, gp
->regs
+ MIF_FRAME
);
167 cmd
= readl(gp
->regs
+ MIF_FRAME
);
168 if (cmd
& MIF_FRAME_TALSB
)
175 static inline void _sungem_phy_write(struct net_device
*dev
, int mii_id
, int reg
, int val
)
177 struct gem
*gp
= netdev_priv(dev
);
178 __sungem_phy_write(gp
, mii_id
, reg
, val
& 0xffff);
181 static inline void sungem_phy_write(struct gem
*gp
, int reg
, u16 val
)
183 __sungem_phy_write(gp
, gp
->mii_phy_addr
, reg
, val
);
186 static inline void gem_enable_ints(struct gem
*gp
)
188 /* Enable all interrupts but TXDONE */
189 writel(GREG_STAT_TXDONE
, gp
->regs
+ GREG_IMASK
);
192 static inline void gem_disable_ints(struct gem
*gp
)
194 /* Disable all interrupts, including TXDONE */
195 writel(GREG_STAT_NAPI
| GREG_STAT_TXDONE
, gp
->regs
+ GREG_IMASK
);
196 (void)readl(gp
->regs
+ GREG_IMASK
); /* write posting */
199 static void gem_get_cell(struct gem
*gp
)
201 BUG_ON(gp
->cell_enabled
< 0);
203 #ifdef CONFIG_PPC_PMAC
204 if (gp
->cell_enabled
== 1) {
206 pmac_call_feature(PMAC_FTR_GMAC_ENABLE
, gp
->of_node
, 0, 1);
209 #endif /* CONFIG_PPC_PMAC */
212 /* Turn off the chip's clock */
213 static void gem_put_cell(struct gem
*gp
)
215 BUG_ON(gp
->cell_enabled
<= 0);
217 #ifdef CONFIG_PPC_PMAC
218 if (gp
->cell_enabled
== 0) {
220 pmac_call_feature(PMAC_FTR_GMAC_ENABLE
, gp
->of_node
, 0, 0);
223 #endif /* CONFIG_PPC_PMAC */
226 static inline void gem_netif_stop(struct gem
*gp
)
228 netif_trans_update(gp
->dev
); /* prevent tx timeout */
229 napi_disable(&gp
->napi
);
230 netif_tx_disable(gp
->dev
);
233 static inline void gem_netif_start(struct gem
*gp
)
235 /* NOTE: unconditional netif_wake_queue is only
236 * appropriate so long as all callers are assured to
237 * have free tx slots.
239 netif_wake_queue(gp
->dev
);
240 napi_enable(&gp
->napi
);
243 static void gem_schedule_reset(struct gem
*gp
)
245 gp
->reset_task_pending
= 1;
246 schedule_work(&gp
->reset_task
);
249 static void gem_handle_mif_event(struct gem
*gp
, u32 reg_val
, u32 changed_bits
)
251 if (netif_msg_intr(gp
))
252 printk(KERN_DEBUG
"%s: mif interrupt\n", gp
->dev
->name
);
255 static int gem_pcs_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
257 u32 pcs_istat
= readl(gp
->regs
+ PCS_ISTAT
);
260 if (netif_msg_intr(gp
))
261 printk(KERN_DEBUG
"%s: pcs interrupt, pcs_istat: 0x%x\n",
262 gp
->dev
->name
, pcs_istat
);
264 if (!(pcs_istat
& PCS_ISTAT_LSC
)) {
265 netdev_err(dev
, "PCS irq but no link status change???\n");
269 /* The link status bit latches on zero, so you must
270 * read it twice in such a case to see a transition
271 * to the link being up.
273 pcs_miistat
= readl(gp
->regs
+ PCS_MIISTAT
);
274 if (!(pcs_miistat
& PCS_MIISTAT_LS
))
276 (readl(gp
->regs
+ PCS_MIISTAT
) &
279 if (pcs_miistat
& PCS_MIISTAT_ANC
) {
280 /* The remote-fault indication is only valid
281 * when autoneg has completed.
283 if (pcs_miistat
& PCS_MIISTAT_RF
)
284 netdev_info(dev
, "PCS AutoNEG complete, RemoteFault\n");
286 netdev_info(dev
, "PCS AutoNEG complete\n");
289 if (pcs_miistat
& PCS_MIISTAT_LS
) {
290 netdev_info(dev
, "PCS link is now up\n");
291 netif_carrier_on(gp
->dev
);
293 netdev_info(dev
, "PCS link is now down\n");
294 netif_carrier_off(gp
->dev
);
295 /* If this happens and the link timer is not running,
296 * reset so we re-negotiate.
298 if (!timer_pending(&gp
->link_timer
))
305 static int gem_txmac_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
307 u32 txmac_stat
= readl(gp
->regs
+ MAC_TXSTAT
);
309 if (netif_msg_intr(gp
))
310 printk(KERN_DEBUG
"%s: txmac interrupt, txmac_stat: 0x%x\n",
311 gp
->dev
->name
, txmac_stat
);
313 /* Defer timer expiration is quite normal,
314 * don't even log the event.
316 if ((txmac_stat
& MAC_TXSTAT_DTE
) &&
317 !(txmac_stat
& ~MAC_TXSTAT_DTE
))
320 if (txmac_stat
& MAC_TXSTAT_URUN
) {
321 netdev_err(dev
, "TX MAC xmit underrun\n");
322 dev
->stats
.tx_fifo_errors
++;
325 if (txmac_stat
& MAC_TXSTAT_MPE
) {
326 netdev_err(dev
, "TX MAC max packet size error\n");
327 dev
->stats
.tx_errors
++;
330 /* The rest are all cases of one of the 16-bit TX
333 if (txmac_stat
& MAC_TXSTAT_NCE
)
334 dev
->stats
.collisions
+= 0x10000;
336 if (txmac_stat
& MAC_TXSTAT_ECE
) {
337 dev
->stats
.tx_aborted_errors
+= 0x10000;
338 dev
->stats
.collisions
+= 0x10000;
341 if (txmac_stat
& MAC_TXSTAT_LCE
) {
342 dev
->stats
.tx_aborted_errors
+= 0x10000;
343 dev
->stats
.collisions
+= 0x10000;
346 /* We do not keep track of MAC_TXSTAT_FCE and
347 * MAC_TXSTAT_PCE events.
352 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
353 * so we do the following.
355 * If any part of the reset goes wrong, we return 1 and that causes the
356 * whole chip to be reset.
358 static int gem_rxmac_reset(struct gem
*gp
)
360 struct net_device
*dev
= gp
->dev
;
365 /* First, reset & disable MAC RX. */
366 writel(MAC_RXRST_CMD
, gp
->regs
+ MAC_RXRST
);
367 for (limit
= 0; limit
< 5000; limit
++) {
368 if (!(readl(gp
->regs
+ MAC_RXRST
) & MAC_RXRST_CMD
))
373 netdev_err(dev
, "RX MAC will not reset, resetting whole chip\n");
377 writel(gp
->mac_rx_cfg
& ~MAC_RXCFG_ENAB
,
378 gp
->regs
+ MAC_RXCFG
);
379 for (limit
= 0; limit
< 5000; limit
++) {
380 if (!(readl(gp
->regs
+ MAC_RXCFG
) & MAC_RXCFG_ENAB
))
385 netdev_err(dev
, "RX MAC will not disable, resetting whole chip\n");
389 /* Second, disable RX DMA. */
390 writel(0, gp
->regs
+ RXDMA_CFG
);
391 for (limit
= 0; limit
< 5000; limit
++) {
392 if (!(readl(gp
->regs
+ RXDMA_CFG
) & RXDMA_CFG_ENABLE
))
397 netdev_err(dev
, "RX DMA will not disable, resetting whole chip\n");
403 /* Execute RX reset command. */
404 writel(gp
->swrst_base
| GREG_SWRST_RXRST
,
405 gp
->regs
+ GREG_SWRST
);
406 for (limit
= 0; limit
< 5000; limit
++) {
407 if (!(readl(gp
->regs
+ GREG_SWRST
) & GREG_SWRST_RXRST
))
412 netdev_err(dev
, "RX reset command will not execute, resetting whole chip\n");
416 /* Refresh the RX ring. */
417 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
418 struct gem_rxd
*rxd
= &gp
->init_block
->rxd
[i
];
420 if (gp
->rx_skbs
[i
] == NULL
) {
421 netdev_err(dev
, "Parts of RX ring empty, resetting whole chip\n");
425 rxd
->status_word
= cpu_to_le64(RXDCTRL_FRESH(gp
));
427 gp
->rx_new
= gp
->rx_old
= 0;
429 /* Now we must reprogram the rest of RX unit. */
430 desc_dma
= (u64
) gp
->gblock_dvma
;
431 desc_dma
+= (INIT_BLOCK_TX_RING_SIZE
* sizeof(struct gem_txd
));
432 writel(desc_dma
>> 32, gp
->regs
+ RXDMA_DBHI
);
433 writel(desc_dma
& 0xffffffff, gp
->regs
+ RXDMA_DBLOW
);
434 writel(RX_RING_SIZE
- 4, gp
->regs
+ RXDMA_KICK
);
435 val
= (RXDMA_CFG_BASE
| (RX_OFFSET
<< 10) |
436 (ETH_HLEN
<< 13) | RXDMA_CFG_FTHRESH_128
);
437 writel(val
, gp
->regs
+ RXDMA_CFG
);
438 if (readl(gp
->regs
+ GREG_BIFCFG
) & GREG_BIFCFG_M66EN
)
439 writel(((5 & RXDMA_BLANK_IPKTS
) |
440 ((8 << 12) & RXDMA_BLANK_ITIME
)),
441 gp
->regs
+ RXDMA_BLANK
);
443 writel(((5 & RXDMA_BLANK_IPKTS
) |
444 ((4 << 12) & RXDMA_BLANK_ITIME
)),
445 gp
->regs
+ RXDMA_BLANK
);
446 val
= (((gp
->rx_pause_off
/ 64) << 0) & RXDMA_PTHRESH_OFF
);
447 val
|= (((gp
->rx_pause_on
/ 64) << 12) & RXDMA_PTHRESH_ON
);
448 writel(val
, gp
->regs
+ RXDMA_PTHRESH
);
449 val
= readl(gp
->regs
+ RXDMA_CFG
);
450 writel(val
| RXDMA_CFG_ENABLE
, gp
->regs
+ RXDMA_CFG
);
451 writel(MAC_RXSTAT_RCV
, gp
->regs
+ MAC_RXMASK
);
452 val
= readl(gp
->regs
+ MAC_RXCFG
);
453 writel(val
| MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
458 static int gem_rxmac_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
460 u32 rxmac_stat
= readl(gp
->regs
+ MAC_RXSTAT
);
463 if (netif_msg_intr(gp
))
464 printk(KERN_DEBUG
"%s: rxmac interrupt, rxmac_stat: 0x%x\n",
465 gp
->dev
->name
, rxmac_stat
);
467 if (rxmac_stat
& MAC_RXSTAT_OFLW
) {
468 u32 smac
= readl(gp
->regs
+ MAC_SMACHINE
);
470 netdev_err(dev
, "RX MAC fifo overflow smac[%08x]\n", smac
);
471 dev
->stats
.rx_over_errors
++;
472 dev
->stats
.rx_fifo_errors
++;
474 ret
= gem_rxmac_reset(gp
);
477 if (rxmac_stat
& MAC_RXSTAT_ACE
)
478 dev
->stats
.rx_frame_errors
+= 0x10000;
480 if (rxmac_stat
& MAC_RXSTAT_CCE
)
481 dev
->stats
.rx_crc_errors
+= 0x10000;
483 if (rxmac_stat
& MAC_RXSTAT_LCE
)
484 dev
->stats
.rx_length_errors
+= 0x10000;
486 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
492 static int gem_mac_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
494 u32 mac_cstat
= readl(gp
->regs
+ MAC_CSTAT
);
496 if (netif_msg_intr(gp
))
497 printk(KERN_DEBUG
"%s: mac interrupt, mac_cstat: 0x%x\n",
498 gp
->dev
->name
, mac_cstat
);
500 /* This interrupt is just for pause frame and pause
501 * tracking. It is useful for diagnostics and debug
502 * but probably by default we will mask these events.
504 if (mac_cstat
& MAC_CSTAT_PS
)
507 if (mac_cstat
& MAC_CSTAT_PRCV
)
508 gp
->pause_last_time_recvd
= (mac_cstat
>> 16);
513 static int gem_mif_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
515 u32 mif_status
= readl(gp
->regs
+ MIF_STATUS
);
516 u32 reg_val
, changed_bits
;
518 reg_val
= (mif_status
& MIF_STATUS_DATA
) >> 16;
519 changed_bits
= (mif_status
& MIF_STATUS_STAT
);
521 gem_handle_mif_event(gp
, reg_val
, changed_bits
);
526 static int gem_pci_interrupt(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
528 u32 pci_estat
= readl(gp
->regs
+ GREG_PCIESTAT
);
530 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
531 gp
->pdev
->device
== PCI_DEVICE_ID_SUN_GEM
) {
532 netdev_err(dev
, "PCI error [%04x]", pci_estat
);
534 if (pci_estat
& GREG_PCIESTAT_BADACK
)
535 pr_cont(" <No ACK64# during ABS64 cycle>");
536 if (pci_estat
& GREG_PCIESTAT_DTRTO
)
537 pr_cont(" <Delayed transaction timeout>");
538 if (pci_estat
& GREG_PCIESTAT_OTHER
)
542 pci_estat
|= GREG_PCIESTAT_OTHER
;
543 netdev_err(dev
, "PCI error\n");
546 if (pci_estat
& GREG_PCIESTAT_OTHER
) {
549 /* Interrogate PCI config space for the
552 pci_read_config_word(gp
->pdev
, PCI_STATUS
,
554 netdev_err(dev
, "Read PCI cfg space status [%04x]\n",
556 if (pci_cfg_stat
& PCI_STATUS_PARITY
)
557 netdev_err(dev
, "PCI parity error detected\n");
558 if (pci_cfg_stat
& PCI_STATUS_SIG_TARGET_ABORT
)
559 netdev_err(dev
, "PCI target abort\n");
560 if (pci_cfg_stat
& PCI_STATUS_REC_TARGET_ABORT
)
561 netdev_err(dev
, "PCI master acks target abort\n");
562 if (pci_cfg_stat
& PCI_STATUS_REC_MASTER_ABORT
)
563 netdev_err(dev
, "PCI master abort\n");
564 if (pci_cfg_stat
& PCI_STATUS_SIG_SYSTEM_ERROR
)
565 netdev_err(dev
, "PCI system error SERR#\n");
566 if (pci_cfg_stat
& PCI_STATUS_DETECTED_PARITY
)
567 netdev_err(dev
, "PCI parity error\n");
569 /* Write the error bits back to clear them. */
570 pci_cfg_stat
&= (PCI_STATUS_PARITY
|
571 PCI_STATUS_SIG_TARGET_ABORT
|
572 PCI_STATUS_REC_TARGET_ABORT
|
573 PCI_STATUS_REC_MASTER_ABORT
|
574 PCI_STATUS_SIG_SYSTEM_ERROR
|
575 PCI_STATUS_DETECTED_PARITY
);
576 pci_write_config_word(gp
->pdev
,
577 PCI_STATUS
, pci_cfg_stat
);
580 /* For all PCI errors, we should reset the chip. */
584 /* All non-normal interrupt conditions get serviced here.
585 * Returns non-zero if we should just exit the interrupt
586 * handler right now (ie. if we reset the card which invalidates
587 * all of the other original irq status bits).
589 static int gem_abnormal_irq(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
591 if (gem_status
& GREG_STAT_RXNOBUF
) {
592 /* Frame arrived, no free RX buffers available. */
593 if (netif_msg_rx_err(gp
))
594 printk(KERN_DEBUG
"%s: no buffer for rx frame\n",
596 dev
->stats
.rx_dropped
++;
599 if (gem_status
& GREG_STAT_RXTAGERR
) {
600 /* corrupt RX tag framing */
601 if (netif_msg_rx_err(gp
))
602 printk(KERN_DEBUG
"%s: corrupt rx tag framing\n",
604 dev
->stats
.rx_errors
++;
609 if (gem_status
& GREG_STAT_PCS
) {
610 if (gem_pcs_interrupt(dev
, gp
, gem_status
))
614 if (gem_status
& GREG_STAT_TXMAC
) {
615 if (gem_txmac_interrupt(dev
, gp
, gem_status
))
619 if (gem_status
& GREG_STAT_RXMAC
) {
620 if (gem_rxmac_interrupt(dev
, gp
, gem_status
))
624 if (gem_status
& GREG_STAT_MAC
) {
625 if (gem_mac_interrupt(dev
, gp
, gem_status
))
629 if (gem_status
& GREG_STAT_MIF
) {
630 if (gem_mif_interrupt(dev
, gp
, gem_status
))
634 if (gem_status
& GREG_STAT_PCIERR
) {
635 if (gem_pci_interrupt(dev
, gp
, gem_status
))
642 static __inline__
void gem_tx(struct net_device
*dev
, struct gem
*gp
, u32 gem_status
)
647 limit
= ((gem_status
& GREG_STAT_TXNR
) >> GREG_STAT_TXNR_SHIFT
);
648 while (entry
!= limit
) {
655 if (netif_msg_tx_done(gp
))
656 printk(KERN_DEBUG
"%s: tx done, slot %d\n",
657 gp
->dev
->name
, entry
);
658 skb
= gp
->tx_skbs
[entry
];
659 if (skb_shinfo(skb
)->nr_frags
) {
660 int last
= entry
+ skb_shinfo(skb
)->nr_frags
;
664 last
&= (TX_RING_SIZE
- 1);
666 walk
= NEXT_TX(walk
);
675 gp
->tx_skbs
[entry
] = NULL
;
676 dev
->stats
.tx_bytes
+= skb
->len
;
678 for (frag
= 0; frag
<= skb_shinfo(skb
)->nr_frags
; frag
++) {
679 txd
= &gp
->init_block
->txd
[entry
];
681 dma_addr
= le64_to_cpu(txd
->buffer
);
682 dma_len
= le64_to_cpu(txd
->control_word
) & TXDCTRL_BUFSZ
;
684 pci_unmap_page(gp
->pdev
, dma_addr
, dma_len
, PCI_DMA_TODEVICE
);
685 entry
= NEXT_TX(entry
);
688 dev
->stats
.tx_packets
++;
689 dev_consume_skb_any(skb
);
693 /* Need to make the tx_old update visible to gem_start_xmit()
694 * before checking for netif_queue_stopped(). Without the
695 * memory barrier, there is a small possibility that gem_start_xmit()
696 * will miss it and cause the queue to be stopped forever.
700 if (unlikely(netif_queue_stopped(dev
) &&
701 TX_BUFFS_AVAIL(gp
) > (MAX_SKB_FRAGS
+ 1))) {
702 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, 0);
704 __netif_tx_lock(txq
, smp_processor_id());
705 if (netif_queue_stopped(dev
) &&
706 TX_BUFFS_AVAIL(gp
) > (MAX_SKB_FRAGS
+ 1))
707 netif_wake_queue(dev
);
708 __netif_tx_unlock(txq
);
712 static __inline__
void gem_post_rxds(struct gem
*gp
, int limit
)
714 int cluster_start
, curr
, count
, kick
;
716 cluster_start
= curr
= (gp
->rx_new
& ~(4 - 1));
720 while (curr
!= limit
) {
721 curr
= NEXT_RX(curr
);
723 struct gem_rxd
*rxd
=
724 &gp
->init_block
->rxd
[cluster_start
];
726 rxd
->status_word
= cpu_to_le64(RXDCTRL_FRESH(gp
));
728 cluster_start
= NEXT_RX(cluster_start
);
729 if (cluster_start
== curr
)
738 writel(kick
, gp
->regs
+ RXDMA_KICK
);
742 #define ALIGNED_RX_SKB_ADDR(addr) \
743 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
744 static __inline__
struct sk_buff
*gem_alloc_skb(struct net_device
*dev
, int size
,
747 struct sk_buff
*skb
= alloc_skb(size
+ 64, gfp_flags
);
750 unsigned long offset
= ALIGNED_RX_SKB_ADDR(skb
->data
);
751 skb_reserve(skb
, offset
);
756 static int gem_rx(struct gem
*gp
, int work_to_do
)
758 struct net_device
*dev
= gp
->dev
;
759 int entry
, drops
, work_done
= 0;
762 if (netif_msg_rx_status(gp
))
763 printk(KERN_DEBUG
"%s: rx interrupt, done: %d, rx_new: %d\n",
764 gp
->dev
->name
, readl(gp
->regs
+ RXDMA_DONE
), gp
->rx_new
);
768 done
= readl(gp
->regs
+ RXDMA_DONE
);
770 struct gem_rxd
*rxd
= &gp
->init_block
->rxd
[entry
];
772 u64 status
= le64_to_cpu(rxd
->status_word
);
776 if ((status
& RXDCTRL_OWN
) != 0)
779 if (work_done
>= RX_RING_SIZE
|| work_done
>= work_to_do
)
782 /* When writing back RX descriptor, GEM writes status
783 * then buffer address, possibly in separate transactions.
784 * If we don't wait for the chip to write both, we could
785 * post a new buffer to this descriptor then have GEM spam
786 * on the buffer address. We sync on the RX completion
787 * register to prevent this from happening.
790 done
= readl(gp
->regs
+ RXDMA_DONE
);
795 /* We can now account for the work we're about to do */
798 skb
= gp
->rx_skbs
[entry
];
800 len
= (status
& RXDCTRL_BUFSZ
) >> 16;
801 if ((len
< ETH_ZLEN
) || (status
& RXDCTRL_BAD
)) {
802 dev
->stats
.rx_errors
++;
804 dev
->stats
.rx_length_errors
++;
805 if (len
& RXDCTRL_BAD
)
806 dev
->stats
.rx_crc_errors
++;
808 /* We'll just return it to GEM. */
810 dev
->stats
.rx_dropped
++;
814 dma_addr
= le64_to_cpu(rxd
->buffer
);
815 if (len
> RX_COPY_THRESHOLD
) {
816 struct sk_buff
*new_skb
;
818 new_skb
= gem_alloc_skb(dev
, RX_BUF_ALLOC_SIZE(gp
), GFP_ATOMIC
);
819 if (new_skb
== NULL
) {
823 pci_unmap_page(gp
->pdev
, dma_addr
,
824 RX_BUF_ALLOC_SIZE(gp
),
826 gp
->rx_skbs
[entry
] = new_skb
;
827 skb_put(new_skb
, (gp
->rx_buf_sz
+ RX_OFFSET
));
828 rxd
->buffer
= cpu_to_le64(pci_map_page(gp
->pdev
,
829 virt_to_page(new_skb
->data
),
830 offset_in_page(new_skb
->data
),
831 RX_BUF_ALLOC_SIZE(gp
),
832 PCI_DMA_FROMDEVICE
));
833 skb_reserve(new_skb
, RX_OFFSET
);
835 /* Trim the original skb for the netif. */
838 struct sk_buff
*copy_skb
= netdev_alloc_skb(dev
, len
+ 2);
840 if (copy_skb
== NULL
) {
845 skb_reserve(copy_skb
, 2);
846 skb_put(copy_skb
, len
);
847 pci_dma_sync_single_for_cpu(gp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
848 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
849 pci_dma_sync_single_for_device(gp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
851 /* We'll reuse the original ring buffer. */
855 if (likely(dev
->features
& NETIF_F_RXCSUM
)) {
858 csum
= (__force __sum16
)htons((status
& RXDCTRL_TCPCSUM
) ^ 0xffff);
859 skb
->csum
= csum_unfold(csum
);
860 skb
->ip_summed
= CHECKSUM_COMPLETE
;
862 skb
->protocol
= eth_type_trans(skb
, gp
->dev
);
864 napi_gro_receive(&gp
->napi
, skb
);
866 dev
->stats
.rx_packets
++;
867 dev
->stats
.rx_bytes
+= len
;
870 entry
= NEXT_RX(entry
);
873 gem_post_rxds(gp
, entry
);
878 netdev_info(gp
->dev
, "Memory squeeze, deferring packet\n");
883 static int gem_poll(struct napi_struct
*napi
, int budget
)
885 struct gem
*gp
= container_of(napi
, struct gem
, napi
);
886 struct net_device
*dev
= gp
->dev
;
891 /* Handle anomalies */
892 if (unlikely(gp
->status
& GREG_STAT_ABNORMAL
)) {
893 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, 0);
896 /* We run the abnormal interrupt handling code with
897 * the Tx lock. It only resets the Rx portion of the
898 * chip, but we need to guard it against DMA being
899 * restarted by the link poll timer
901 __netif_tx_lock(txq
, smp_processor_id());
902 reset
= gem_abnormal_irq(dev
, gp
, gp
->status
);
903 __netif_tx_unlock(txq
);
905 gem_schedule_reset(gp
);
911 /* Run TX completion thread */
912 gem_tx(dev
, gp
, gp
->status
);
914 /* Run RX thread. We don't use any locking here,
915 * code willing to do bad things - like cleaning the
916 * rx ring - must call napi_disable(), which
917 * schedule_timeout()'s if polling is already disabled.
919 work_done
+= gem_rx(gp
, budget
- work_done
);
921 if (work_done
>= budget
)
924 gp
->status
= readl(gp
->regs
+ GREG_STAT
);
925 } while (gp
->status
& GREG_STAT_NAPI
);
927 napi_complete_done(napi
, work_done
);
933 static irqreturn_t
gem_interrupt(int irq
, void *dev_id
)
935 struct net_device
*dev
= dev_id
;
936 struct gem
*gp
= netdev_priv(dev
);
938 if (napi_schedule_prep(&gp
->napi
)) {
939 u32 gem_status
= readl(gp
->regs
+ GREG_STAT
);
941 if (unlikely(gem_status
== 0)) {
942 napi_enable(&gp
->napi
);
945 if (netif_msg_intr(gp
))
946 printk(KERN_DEBUG
"%s: gem_interrupt() gem_status: 0x%x\n",
947 gp
->dev
->name
, gem_status
);
949 gp
->status
= gem_status
;
950 gem_disable_ints(gp
);
951 __napi_schedule(&gp
->napi
);
954 /* If polling was disabled at the time we received that
955 * interrupt, we may return IRQ_HANDLED here while we
956 * should return IRQ_NONE. No big deal...
961 #ifdef CONFIG_NET_POLL_CONTROLLER
962 static void gem_poll_controller(struct net_device
*dev
)
964 struct gem
*gp
= netdev_priv(dev
);
966 disable_irq(gp
->pdev
->irq
);
967 gem_interrupt(gp
->pdev
->irq
, dev
);
968 enable_irq(gp
->pdev
->irq
);
972 static void gem_tx_timeout(struct net_device
*dev
)
974 struct gem
*gp
= netdev_priv(dev
);
976 netdev_err(dev
, "transmit timed out, resetting\n");
978 netdev_err(dev
, "TX_STATE[%08x:%08x:%08x]\n",
979 readl(gp
->regs
+ TXDMA_CFG
),
980 readl(gp
->regs
+ MAC_TXSTAT
),
981 readl(gp
->regs
+ MAC_TXCFG
));
982 netdev_err(dev
, "RX_STATE[%08x:%08x:%08x]\n",
983 readl(gp
->regs
+ RXDMA_CFG
),
984 readl(gp
->regs
+ MAC_RXSTAT
),
985 readl(gp
->regs
+ MAC_RXCFG
));
987 gem_schedule_reset(gp
);
990 static __inline__
int gem_intme(int entry
)
992 /* Algorithm: IRQ every 1/2 of descriptors. */
993 if (!(entry
& ((TX_RING_SIZE
>>1)-1)))
999 static netdev_tx_t
gem_start_xmit(struct sk_buff
*skb
,
1000 struct net_device
*dev
)
1002 struct gem
*gp
= netdev_priv(dev
);
1007 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1008 const u64 csum_start_off
= skb_checksum_start_offset(skb
);
1009 const u64 csum_stuff_off
= csum_start_off
+ skb
->csum_offset
;
1011 ctrl
= (TXDCTRL_CENAB
|
1012 (csum_start_off
<< 15) |
1013 (csum_stuff_off
<< 21));
1016 if (unlikely(TX_BUFFS_AVAIL(gp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
1017 /* This is a hard error, log it. */
1018 if (!netif_queue_stopped(dev
)) {
1019 netif_stop_queue(dev
);
1020 netdev_err(dev
, "BUG! Tx Ring full when queue awake!\n");
1022 return NETDEV_TX_BUSY
;
1026 gp
->tx_skbs
[entry
] = skb
;
1028 if (skb_shinfo(skb
)->nr_frags
== 0) {
1029 struct gem_txd
*txd
= &gp
->init_block
->txd
[entry
];
1034 mapping
= pci_map_page(gp
->pdev
,
1035 virt_to_page(skb
->data
),
1036 offset_in_page(skb
->data
),
1037 len
, PCI_DMA_TODEVICE
);
1038 ctrl
|= TXDCTRL_SOF
| TXDCTRL_EOF
| len
;
1039 if (gem_intme(entry
))
1040 ctrl
|= TXDCTRL_INTME
;
1041 txd
->buffer
= cpu_to_le64(mapping
);
1043 txd
->control_word
= cpu_to_le64(ctrl
);
1044 entry
= NEXT_TX(entry
);
1046 struct gem_txd
*txd
;
1049 dma_addr_t first_mapping
;
1050 int frag
, first_entry
= entry
;
1053 if (gem_intme(entry
))
1054 intme
|= TXDCTRL_INTME
;
1056 /* We must give this initial chunk to the device last.
1057 * Otherwise we could race with the device.
1059 first_len
= skb_headlen(skb
);
1060 first_mapping
= pci_map_page(gp
->pdev
, virt_to_page(skb
->data
),
1061 offset_in_page(skb
->data
),
1062 first_len
, PCI_DMA_TODEVICE
);
1063 entry
= NEXT_TX(entry
);
1065 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1066 const skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
1071 len
= skb_frag_size(this_frag
);
1072 mapping
= skb_frag_dma_map(&gp
->pdev
->dev
, this_frag
,
1073 0, len
, DMA_TO_DEVICE
);
1075 if (frag
== skb_shinfo(skb
)->nr_frags
- 1)
1076 this_ctrl
|= TXDCTRL_EOF
;
1078 txd
= &gp
->init_block
->txd
[entry
];
1079 txd
->buffer
= cpu_to_le64(mapping
);
1081 txd
->control_word
= cpu_to_le64(this_ctrl
| len
);
1083 if (gem_intme(entry
))
1084 intme
|= TXDCTRL_INTME
;
1086 entry
= NEXT_TX(entry
);
1088 txd
= &gp
->init_block
->txd
[first_entry
];
1089 txd
->buffer
= cpu_to_le64(first_mapping
);
1092 cpu_to_le64(ctrl
| TXDCTRL_SOF
| intme
| first_len
);
1096 if (unlikely(TX_BUFFS_AVAIL(gp
) <= (MAX_SKB_FRAGS
+ 1))) {
1097 netif_stop_queue(dev
);
1099 /* netif_stop_queue() must be done before checking
1100 * checking tx index in TX_BUFFS_AVAIL() below, because
1101 * in gem_tx(), we update tx_old before checking for
1102 * netif_queue_stopped().
1105 if (TX_BUFFS_AVAIL(gp
) > (MAX_SKB_FRAGS
+ 1))
1106 netif_wake_queue(dev
);
1108 if (netif_msg_tx_queued(gp
))
1109 printk(KERN_DEBUG
"%s: tx queued, slot %d, skblen %d\n",
1110 dev
->name
, entry
, skb
->len
);
1112 writel(gp
->tx_new
, gp
->regs
+ TXDMA_KICK
);
1114 return NETDEV_TX_OK
;
1117 static void gem_pcs_reset(struct gem
*gp
)
1122 /* Reset PCS unit. */
1123 val
= readl(gp
->regs
+ PCS_MIICTRL
);
1124 val
|= PCS_MIICTRL_RST
;
1125 writel(val
, gp
->regs
+ PCS_MIICTRL
);
1128 while (readl(gp
->regs
+ PCS_MIICTRL
) & PCS_MIICTRL_RST
) {
1134 netdev_warn(gp
->dev
, "PCS reset bit would not clear\n");
1137 static void gem_pcs_reinit_adv(struct gem
*gp
)
1141 /* Make sure PCS is disabled while changing advertisement
1144 val
= readl(gp
->regs
+ PCS_CFG
);
1145 val
&= ~(PCS_CFG_ENABLE
| PCS_CFG_TO
);
1146 writel(val
, gp
->regs
+ PCS_CFG
);
1148 /* Advertise all capabilities except asymmetric
1151 val
= readl(gp
->regs
+ PCS_MIIADV
);
1152 val
|= (PCS_MIIADV_FD
| PCS_MIIADV_HD
|
1153 PCS_MIIADV_SP
| PCS_MIIADV_AP
);
1154 writel(val
, gp
->regs
+ PCS_MIIADV
);
1156 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1157 * and re-enable PCS.
1159 val
= readl(gp
->regs
+ PCS_MIICTRL
);
1160 val
|= (PCS_MIICTRL_RAN
| PCS_MIICTRL_ANE
);
1161 val
&= ~PCS_MIICTRL_WB
;
1162 writel(val
, gp
->regs
+ PCS_MIICTRL
);
1164 val
= readl(gp
->regs
+ PCS_CFG
);
1165 val
|= PCS_CFG_ENABLE
;
1166 writel(val
, gp
->regs
+ PCS_CFG
);
1168 /* Make sure serialink loopback is off. The meaning
1169 * of this bit is logically inverted based upon whether
1170 * you are in Serialink or SERDES mode.
1172 val
= readl(gp
->regs
+ PCS_SCTRL
);
1173 if (gp
->phy_type
== phy_serialink
)
1174 val
&= ~PCS_SCTRL_LOOP
;
1176 val
|= PCS_SCTRL_LOOP
;
1177 writel(val
, gp
->regs
+ PCS_SCTRL
);
1180 #define STOP_TRIES 32
1182 static void gem_reset(struct gem
*gp
)
1187 /* Make sure we won't get any more interrupts */
1188 writel(0xffffffff, gp
->regs
+ GREG_IMASK
);
1190 /* Reset the chip */
1191 writel(gp
->swrst_base
| GREG_SWRST_TXRST
| GREG_SWRST_RXRST
,
1192 gp
->regs
+ GREG_SWRST
);
1198 val
= readl(gp
->regs
+ GREG_SWRST
);
1201 } while (val
& (GREG_SWRST_TXRST
| GREG_SWRST_RXRST
));
1204 netdev_err(gp
->dev
, "SW reset is ghetto\n");
1206 if (gp
->phy_type
== phy_serialink
|| gp
->phy_type
== phy_serdes
)
1207 gem_pcs_reinit_adv(gp
);
1210 static void gem_start_dma(struct gem
*gp
)
1214 /* We are ready to rock, turn everything on. */
1215 val
= readl(gp
->regs
+ TXDMA_CFG
);
1216 writel(val
| TXDMA_CFG_ENABLE
, gp
->regs
+ TXDMA_CFG
);
1217 val
= readl(gp
->regs
+ RXDMA_CFG
);
1218 writel(val
| RXDMA_CFG_ENABLE
, gp
->regs
+ RXDMA_CFG
);
1219 val
= readl(gp
->regs
+ MAC_TXCFG
);
1220 writel(val
| MAC_TXCFG_ENAB
, gp
->regs
+ MAC_TXCFG
);
1221 val
= readl(gp
->regs
+ MAC_RXCFG
);
1222 writel(val
| MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
1224 (void) readl(gp
->regs
+ MAC_RXCFG
);
1227 gem_enable_ints(gp
);
1229 writel(RX_RING_SIZE
- 4, gp
->regs
+ RXDMA_KICK
);
1232 /* DMA won't be actually stopped before about 4ms tho ...
1234 static void gem_stop_dma(struct gem
*gp
)
1238 /* We are done rocking, turn everything off. */
1239 val
= readl(gp
->regs
+ TXDMA_CFG
);
1240 writel(val
& ~TXDMA_CFG_ENABLE
, gp
->regs
+ TXDMA_CFG
);
1241 val
= readl(gp
->regs
+ RXDMA_CFG
);
1242 writel(val
& ~RXDMA_CFG_ENABLE
, gp
->regs
+ RXDMA_CFG
);
1243 val
= readl(gp
->regs
+ MAC_TXCFG
);
1244 writel(val
& ~MAC_TXCFG_ENAB
, gp
->regs
+ MAC_TXCFG
);
1245 val
= readl(gp
->regs
+ MAC_RXCFG
);
1246 writel(val
& ~MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
1248 (void) readl(gp
->regs
+ MAC_RXCFG
);
1250 /* Need to wait a bit ... done by the caller */
1254 // XXX dbl check what that function should do when called on PCS PHY
1255 static void gem_begin_auto_negotiation(struct gem
*gp
,
1256 const struct ethtool_link_ksettings
*ep
)
1258 u32 advertise
, features
;
1265 ethtool_convert_link_mode_to_legacy_u32(
1266 &advertising
, ep
->link_modes
.advertising
);
1268 if (gp
->phy_type
!= phy_mii_mdio0
&&
1269 gp
->phy_type
!= phy_mii_mdio1
)
1272 /* Setup advertise */
1273 if (found_mii_phy(gp
))
1274 features
= gp
->phy_mii
.def
->features
;
1278 advertise
= features
& ADVERTISE_MASK
;
1279 if (gp
->phy_mii
.advertising
!= 0)
1280 advertise
&= gp
->phy_mii
.advertising
;
1282 autoneg
= gp
->want_autoneg
;
1283 speed
= gp
->phy_mii
.speed
;
1284 duplex
= gp
->phy_mii
.duplex
;
1286 /* Setup link parameters */
1289 if (ep
->base
.autoneg
== AUTONEG_ENABLE
) {
1290 advertise
= advertising
;
1294 speed
= ep
->base
.speed
;
1295 duplex
= ep
->base
.duplex
;
1299 /* Sanitize settings based on PHY capabilities */
1300 if ((features
& SUPPORTED_Autoneg
) == 0)
1302 if (speed
== SPEED_1000
&&
1303 !(features
& (SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full
)))
1305 if (speed
== SPEED_100
&&
1306 !(features
& (SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
)))
1308 if (duplex
== DUPLEX_FULL
&&
1309 !(features
& (SUPPORTED_1000baseT_Full
|
1310 SUPPORTED_100baseT_Full
|
1311 SUPPORTED_10baseT_Full
)))
1312 duplex
= DUPLEX_HALF
;
1316 /* If we are asleep, we don't try to actually setup the PHY, we
1317 * just store the settings
1319 if (!netif_device_present(gp
->dev
)) {
1320 gp
->phy_mii
.autoneg
= gp
->want_autoneg
= autoneg
;
1321 gp
->phy_mii
.speed
= speed
;
1322 gp
->phy_mii
.duplex
= duplex
;
1326 /* Configure PHY & start aneg */
1327 gp
->want_autoneg
= autoneg
;
1329 if (found_mii_phy(gp
))
1330 gp
->phy_mii
.def
->ops
->setup_aneg(&gp
->phy_mii
, advertise
);
1331 gp
->lstate
= link_aneg
;
1333 if (found_mii_phy(gp
))
1334 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
, speed
, duplex
);
1335 gp
->lstate
= link_force_ok
;
1339 gp
->timer_ticks
= 0;
1340 mod_timer(&gp
->link_timer
, jiffies
+ ((12 * HZ
) / 10));
1343 /* A link-up condition has occurred, initialize and enable the
1346 static int gem_set_link_modes(struct gem
*gp
)
1348 struct netdev_queue
*txq
= netdev_get_tx_queue(gp
->dev
, 0);
1349 int full_duplex
, speed
, pause
;
1356 if (found_mii_phy(gp
)) {
1357 if (gp
->phy_mii
.def
->ops
->read_link(&gp
->phy_mii
))
1359 full_duplex
= (gp
->phy_mii
.duplex
== DUPLEX_FULL
);
1360 speed
= gp
->phy_mii
.speed
;
1361 pause
= gp
->phy_mii
.pause
;
1362 } else if (gp
->phy_type
== phy_serialink
||
1363 gp
->phy_type
== phy_serdes
) {
1364 u32 pcs_lpa
= readl(gp
->regs
+ PCS_MIILP
);
1366 if ((pcs_lpa
& PCS_MIIADV_FD
) || gp
->phy_type
== phy_serdes
)
1371 netif_info(gp
, link
, gp
->dev
, "Link is up at %d Mbps, %s-duplex\n",
1372 speed
, (full_duplex
? "full" : "half"));
1375 /* We take the tx queue lock to avoid collisions between
1376 * this code, the tx path and the NAPI-driven error path
1378 __netif_tx_lock(txq
, smp_processor_id());
1380 val
= (MAC_TXCFG_EIPG0
| MAC_TXCFG_NGU
);
1382 val
|= (MAC_TXCFG_ICS
| MAC_TXCFG_ICOLL
);
1384 /* MAC_TXCFG_NBO must be zero. */
1386 writel(val
, gp
->regs
+ MAC_TXCFG
);
1388 val
= (MAC_XIFCFG_OE
| MAC_XIFCFG_LLED
);
1390 (gp
->phy_type
== phy_mii_mdio0
||
1391 gp
->phy_type
== phy_mii_mdio1
)) {
1392 val
|= MAC_XIFCFG_DISE
;
1393 } else if (full_duplex
) {
1394 val
|= MAC_XIFCFG_FLED
;
1397 if (speed
== SPEED_1000
)
1398 val
|= (MAC_XIFCFG_GMII
);
1400 writel(val
, gp
->regs
+ MAC_XIFCFG
);
1402 /* If gigabit and half-duplex, enable carrier extension
1403 * mode. Else, disable it.
1405 if (speed
== SPEED_1000
&& !full_duplex
) {
1406 val
= readl(gp
->regs
+ MAC_TXCFG
);
1407 writel(val
| MAC_TXCFG_TCE
, gp
->regs
+ MAC_TXCFG
);
1409 val
= readl(gp
->regs
+ MAC_RXCFG
);
1410 writel(val
| MAC_RXCFG_RCE
, gp
->regs
+ MAC_RXCFG
);
1412 val
= readl(gp
->regs
+ MAC_TXCFG
);
1413 writel(val
& ~MAC_TXCFG_TCE
, gp
->regs
+ MAC_TXCFG
);
1415 val
= readl(gp
->regs
+ MAC_RXCFG
);
1416 writel(val
& ~MAC_RXCFG_RCE
, gp
->regs
+ MAC_RXCFG
);
1419 if (gp
->phy_type
== phy_serialink
||
1420 gp
->phy_type
== phy_serdes
) {
1421 u32 pcs_lpa
= readl(gp
->regs
+ PCS_MIILP
);
1423 if (pcs_lpa
& (PCS_MIIADV_SP
| PCS_MIIADV_AP
))
1428 writel(512, gp
->regs
+ MAC_STIME
);
1430 writel(64, gp
->regs
+ MAC_STIME
);
1431 val
= readl(gp
->regs
+ MAC_MCCFG
);
1433 val
|= (MAC_MCCFG_SPE
| MAC_MCCFG_RPE
);
1435 val
&= ~(MAC_MCCFG_SPE
| MAC_MCCFG_RPE
);
1436 writel(val
, gp
->regs
+ MAC_MCCFG
);
1440 __netif_tx_unlock(txq
);
1442 if (netif_msg_link(gp
)) {
1444 netdev_info(gp
->dev
,
1445 "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
1450 netdev_info(gp
->dev
, "Pause is disabled\n");
1457 static int gem_mdio_link_not_up(struct gem
*gp
)
1459 switch (gp
->lstate
) {
1460 case link_force_ret
:
1461 netif_info(gp
, link
, gp
->dev
,
1462 "Autoneg failed again, keeping forced mode\n");
1463 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
,
1464 gp
->last_forced_speed
, DUPLEX_HALF
);
1465 gp
->timer_ticks
= 5;
1466 gp
->lstate
= link_force_ok
;
1469 /* We try forced modes after a failed aneg only on PHYs that don't
1470 * have "magic_aneg" bit set, which means they internally do the
1471 * while forced-mode thingy. On these, we just restart aneg
1473 if (gp
->phy_mii
.def
->magic_aneg
)
1475 netif_info(gp
, link
, gp
->dev
, "switching to forced 100bt\n");
1476 /* Try forced modes. */
1477 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
, SPEED_100
,
1479 gp
->timer_ticks
= 5;
1480 gp
->lstate
= link_force_try
;
1482 case link_force_try
:
1483 /* Downgrade from 100 to 10 Mbps if necessary.
1484 * If already at 10Mbps, warn user about the
1485 * situation every 10 ticks.
1487 if (gp
->phy_mii
.speed
== SPEED_100
) {
1488 gp
->phy_mii
.def
->ops
->setup_forced(&gp
->phy_mii
, SPEED_10
,
1490 gp
->timer_ticks
= 5;
1491 netif_info(gp
, link
, gp
->dev
,
1492 "switching to forced 10bt\n");
1501 static void gem_link_timer(struct timer_list
*t
)
1503 struct gem
*gp
= from_timer(gp
, t
, link_timer
);
1504 struct net_device
*dev
= gp
->dev
;
1505 int restart_aneg
= 0;
1507 /* There's no point doing anything if we're going to be reset */
1508 if (gp
->reset_task_pending
)
1511 if (gp
->phy_type
== phy_serialink
||
1512 gp
->phy_type
== phy_serdes
) {
1513 u32 val
= readl(gp
->regs
+ PCS_MIISTAT
);
1515 if (!(val
& PCS_MIISTAT_LS
))
1516 val
= readl(gp
->regs
+ PCS_MIISTAT
);
1518 if ((val
& PCS_MIISTAT_LS
) != 0) {
1519 if (gp
->lstate
== link_up
)
1522 gp
->lstate
= link_up
;
1523 netif_carrier_on(dev
);
1524 (void)gem_set_link_modes(gp
);
1528 if (found_mii_phy(gp
) && gp
->phy_mii
.def
->ops
->poll_link(&gp
->phy_mii
)) {
1529 /* Ok, here we got a link. If we had it due to a forced
1530 * fallback, and we were configured for autoneg, we do
1531 * retry a short autoneg pass. If you know your hub is
1532 * broken, use ethtool ;)
1534 if (gp
->lstate
== link_force_try
&& gp
->want_autoneg
) {
1535 gp
->lstate
= link_force_ret
;
1536 gp
->last_forced_speed
= gp
->phy_mii
.speed
;
1537 gp
->timer_ticks
= 5;
1538 if (netif_msg_link(gp
))
1540 "Got link after fallback, retrying autoneg once...\n");
1541 gp
->phy_mii
.def
->ops
->setup_aneg(&gp
->phy_mii
, gp
->phy_mii
.advertising
);
1542 } else if (gp
->lstate
!= link_up
) {
1543 gp
->lstate
= link_up
;
1544 netif_carrier_on(dev
);
1545 if (gem_set_link_modes(gp
))
1549 /* If the link was previously up, we restart the
1552 if (gp
->lstate
== link_up
) {
1553 gp
->lstate
= link_down
;
1554 netif_info(gp
, link
, dev
, "Link down\n");
1555 netif_carrier_off(dev
);
1556 gem_schedule_reset(gp
);
1557 /* The reset task will restart the timer */
1559 } else if (++gp
->timer_ticks
> 10) {
1560 if (found_mii_phy(gp
))
1561 restart_aneg
= gem_mdio_link_not_up(gp
);
1567 gem_begin_auto_negotiation(gp
, NULL
);
1571 mod_timer(&gp
->link_timer
, jiffies
+ ((12 * HZ
) / 10));
1574 static void gem_clean_rings(struct gem
*gp
)
1576 struct gem_init_block
*gb
= gp
->init_block
;
1577 struct sk_buff
*skb
;
1579 dma_addr_t dma_addr
;
1581 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1582 struct gem_rxd
*rxd
;
1585 if (gp
->rx_skbs
[i
] != NULL
) {
1586 skb
= gp
->rx_skbs
[i
];
1587 dma_addr
= le64_to_cpu(rxd
->buffer
);
1588 pci_unmap_page(gp
->pdev
, dma_addr
,
1589 RX_BUF_ALLOC_SIZE(gp
),
1590 PCI_DMA_FROMDEVICE
);
1591 dev_kfree_skb_any(skb
);
1592 gp
->rx_skbs
[i
] = NULL
;
1594 rxd
->status_word
= 0;
1599 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1600 if (gp
->tx_skbs
[i
] != NULL
) {
1601 struct gem_txd
*txd
;
1604 skb
= gp
->tx_skbs
[i
];
1605 gp
->tx_skbs
[i
] = NULL
;
1607 for (frag
= 0; frag
<= skb_shinfo(skb
)->nr_frags
; frag
++) {
1608 int ent
= i
& (TX_RING_SIZE
- 1);
1610 txd
= &gb
->txd
[ent
];
1611 dma_addr
= le64_to_cpu(txd
->buffer
);
1612 pci_unmap_page(gp
->pdev
, dma_addr
,
1613 le64_to_cpu(txd
->control_word
) &
1614 TXDCTRL_BUFSZ
, PCI_DMA_TODEVICE
);
1616 if (frag
!= skb_shinfo(skb
)->nr_frags
)
1619 dev_kfree_skb_any(skb
);
1624 static void gem_init_rings(struct gem
*gp
)
1626 struct gem_init_block
*gb
= gp
->init_block
;
1627 struct net_device
*dev
= gp
->dev
;
1629 dma_addr_t dma_addr
;
1631 gp
->rx_new
= gp
->rx_old
= gp
->tx_new
= gp
->tx_old
= 0;
1633 gem_clean_rings(gp
);
1635 gp
->rx_buf_sz
= max(dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
,
1636 (unsigned)VLAN_ETH_FRAME_LEN
);
1638 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1639 struct sk_buff
*skb
;
1640 struct gem_rxd
*rxd
= &gb
->rxd
[i
];
1642 skb
= gem_alloc_skb(dev
, RX_BUF_ALLOC_SIZE(gp
), GFP_KERNEL
);
1645 rxd
->status_word
= 0;
1649 gp
->rx_skbs
[i
] = skb
;
1650 skb_put(skb
, (gp
->rx_buf_sz
+ RX_OFFSET
));
1651 dma_addr
= pci_map_page(gp
->pdev
,
1652 virt_to_page(skb
->data
),
1653 offset_in_page(skb
->data
),
1654 RX_BUF_ALLOC_SIZE(gp
),
1655 PCI_DMA_FROMDEVICE
);
1656 rxd
->buffer
= cpu_to_le64(dma_addr
);
1658 rxd
->status_word
= cpu_to_le64(RXDCTRL_FRESH(gp
));
1659 skb_reserve(skb
, RX_OFFSET
);
1662 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1663 struct gem_txd
*txd
= &gb
->txd
[i
];
1665 txd
->control_word
= 0;
1672 /* Init PHY interface and start link poll state machine */
1673 static void gem_init_phy(struct gem
*gp
)
1677 /* Revert MIF CFG setting done on stop_phy */
1678 mifcfg
= readl(gp
->regs
+ MIF_CFG
);
1679 mifcfg
&= ~MIF_CFG_BBMODE
;
1680 writel(mifcfg
, gp
->regs
+ MIF_CFG
);
1682 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_APPLE
) {
1685 /* Those delay sucks, the HW seem to love them though, I'll
1686 * serisouly consider breaking some locks here to be able
1687 * to schedule instead
1689 for (i
= 0; i
< 3; i
++) {
1690 #ifdef CONFIG_PPC_PMAC
1691 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET
, gp
->of_node
, 0, 0);
1694 /* Some PHYs used by apple have problem getting back to us,
1695 * we do an additional reset here
1697 sungem_phy_write(gp
, MII_BMCR
, BMCR_RESET
);
1699 if (sungem_phy_read(gp
, MII_BMCR
) != 0xffff)
1702 netdev_warn(gp
->dev
, "GMAC PHY not responding !\n");
1706 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
1707 gp
->pdev
->device
== PCI_DEVICE_ID_SUN_GEM
) {
1710 /* Init datapath mode register. */
1711 if (gp
->phy_type
== phy_mii_mdio0
||
1712 gp
->phy_type
== phy_mii_mdio1
) {
1713 val
= PCS_DMODE_MGM
;
1714 } else if (gp
->phy_type
== phy_serialink
) {
1715 val
= PCS_DMODE_SM
| PCS_DMODE_GMOE
;
1717 val
= PCS_DMODE_ESM
;
1720 writel(val
, gp
->regs
+ PCS_DMODE
);
1723 if (gp
->phy_type
== phy_mii_mdio0
||
1724 gp
->phy_type
== phy_mii_mdio1
) {
1725 /* Reset and detect MII PHY */
1726 sungem_phy_probe(&gp
->phy_mii
, gp
->mii_phy_addr
);
1729 if (gp
->phy_mii
.def
&& gp
->phy_mii
.def
->ops
->init
)
1730 gp
->phy_mii
.def
->ops
->init(&gp
->phy_mii
);
1733 gem_pcs_reinit_adv(gp
);
1736 /* Default aneg parameters */
1737 gp
->timer_ticks
= 0;
1738 gp
->lstate
= link_down
;
1739 netif_carrier_off(gp
->dev
);
1741 /* Print things out */
1742 if (gp
->phy_type
== phy_mii_mdio0
||
1743 gp
->phy_type
== phy_mii_mdio1
)
1744 netdev_info(gp
->dev
, "Found %s PHY\n",
1745 gp
->phy_mii
.def
? gp
->phy_mii
.def
->name
: "no");
1747 gem_begin_auto_negotiation(gp
, NULL
);
1750 static void gem_init_dma(struct gem
*gp
)
1752 u64 desc_dma
= (u64
) gp
->gblock_dvma
;
1755 val
= (TXDMA_CFG_BASE
| (0x7ff << 10) | TXDMA_CFG_PMODE
);
1756 writel(val
, gp
->regs
+ TXDMA_CFG
);
1758 writel(desc_dma
>> 32, gp
->regs
+ TXDMA_DBHI
);
1759 writel(desc_dma
& 0xffffffff, gp
->regs
+ TXDMA_DBLOW
);
1760 desc_dma
+= (INIT_BLOCK_TX_RING_SIZE
* sizeof(struct gem_txd
));
1762 writel(0, gp
->regs
+ TXDMA_KICK
);
1764 val
= (RXDMA_CFG_BASE
| (RX_OFFSET
<< 10) |
1765 (ETH_HLEN
<< 13) | RXDMA_CFG_FTHRESH_128
);
1766 writel(val
, gp
->regs
+ RXDMA_CFG
);
1768 writel(desc_dma
>> 32, gp
->regs
+ RXDMA_DBHI
);
1769 writel(desc_dma
& 0xffffffff, gp
->regs
+ RXDMA_DBLOW
);
1771 writel(RX_RING_SIZE
- 4, gp
->regs
+ RXDMA_KICK
);
1773 val
= (((gp
->rx_pause_off
/ 64) << 0) & RXDMA_PTHRESH_OFF
);
1774 val
|= (((gp
->rx_pause_on
/ 64) << 12) & RXDMA_PTHRESH_ON
);
1775 writel(val
, gp
->regs
+ RXDMA_PTHRESH
);
1777 if (readl(gp
->regs
+ GREG_BIFCFG
) & GREG_BIFCFG_M66EN
)
1778 writel(((5 & RXDMA_BLANK_IPKTS
) |
1779 ((8 << 12) & RXDMA_BLANK_ITIME
)),
1780 gp
->regs
+ RXDMA_BLANK
);
1782 writel(((5 & RXDMA_BLANK_IPKTS
) |
1783 ((4 << 12) & RXDMA_BLANK_ITIME
)),
1784 gp
->regs
+ RXDMA_BLANK
);
1787 static u32
gem_setup_multicast(struct gem
*gp
)
1792 if ((gp
->dev
->flags
& IFF_ALLMULTI
) ||
1793 (netdev_mc_count(gp
->dev
) > 256)) {
1794 for (i
=0; i
<16; i
++)
1795 writel(0xffff, gp
->regs
+ MAC_HASH0
+ (i
<< 2));
1796 rxcfg
|= MAC_RXCFG_HFE
;
1797 } else if (gp
->dev
->flags
& IFF_PROMISC
) {
1798 rxcfg
|= MAC_RXCFG_PROM
;
1802 struct netdev_hw_addr
*ha
;
1805 memset(hash_table
, 0, sizeof(hash_table
));
1806 netdev_for_each_mc_addr(ha
, gp
->dev
) {
1807 crc
= ether_crc_le(6, ha
->addr
);
1809 hash_table
[crc
>> 4] |= 1 << (15 - (crc
& 0xf));
1811 for (i
=0; i
<16; i
++)
1812 writel(hash_table
[i
], gp
->regs
+ MAC_HASH0
+ (i
<< 2));
1813 rxcfg
|= MAC_RXCFG_HFE
;
1819 static void gem_init_mac(struct gem
*gp
)
1821 unsigned char *e
= &gp
->dev
->dev_addr
[0];
1823 writel(0x1bf0, gp
->regs
+ MAC_SNDPAUSE
);
1825 writel(0x00, gp
->regs
+ MAC_IPG0
);
1826 writel(0x08, gp
->regs
+ MAC_IPG1
);
1827 writel(0x04, gp
->regs
+ MAC_IPG2
);
1828 writel(0x40, gp
->regs
+ MAC_STIME
);
1829 writel(0x40, gp
->regs
+ MAC_MINFSZ
);
1831 /* Ethernet payload + header + FCS + optional VLAN tag. */
1832 writel(0x20000000 | (gp
->rx_buf_sz
+ 4), gp
->regs
+ MAC_MAXFSZ
);
1834 writel(0x07, gp
->regs
+ MAC_PASIZE
);
1835 writel(0x04, gp
->regs
+ MAC_JAMSIZE
);
1836 writel(0x10, gp
->regs
+ MAC_ATTLIM
);
1837 writel(0x8808, gp
->regs
+ MAC_MCTYPE
);
1839 writel((e
[5] | (e
[4] << 8)) & 0x3ff, gp
->regs
+ MAC_RANDSEED
);
1841 writel((e
[4] << 8) | e
[5], gp
->regs
+ MAC_ADDR0
);
1842 writel((e
[2] << 8) | e
[3], gp
->regs
+ MAC_ADDR1
);
1843 writel((e
[0] << 8) | e
[1], gp
->regs
+ MAC_ADDR2
);
1845 writel(0, gp
->regs
+ MAC_ADDR3
);
1846 writel(0, gp
->regs
+ MAC_ADDR4
);
1847 writel(0, gp
->regs
+ MAC_ADDR5
);
1849 writel(0x0001, gp
->regs
+ MAC_ADDR6
);
1850 writel(0xc200, gp
->regs
+ MAC_ADDR7
);
1851 writel(0x0180, gp
->regs
+ MAC_ADDR8
);
1853 writel(0, gp
->regs
+ MAC_AFILT0
);
1854 writel(0, gp
->regs
+ MAC_AFILT1
);
1855 writel(0, gp
->regs
+ MAC_AFILT2
);
1856 writel(0, gp
->regs
+ MAC_AF21MSK
);
1857 writel(0, gp
->regs
+ MAC_AF0MSK
);
1859 gp
->mac_rx_cfg
= gem_setup_multicast(gp
);
1861 gp
->mac_rx_cfg
|= MAC_RXCFG_SFCS
;
1863 writel(0, gp
->regs
+ MAC_NCOLL
);
1864 writel(0, gp
->regs
+ MAC_FASUCC
);
1865 writel(0, gp
->regs
+ MAC_ECOLL
);
1866 writel(0, gp
->regs
+ MAC_LCOLL
);
1867 writel(0, gp
->regs
+ MAC_DTIMER
);
1868 writel(0, gp
->regs
+ MAC_PATMPS
);
1869 writel(0, gp
->regs
+ MAC_RFCTR
);
1870 writel(0, gp
->regs
+ MAC_LERR
);
1871 writel(0, gp
->regs
+ MAC_AERR
);
1872 writel(0, gp
->regs
+ MAC_FCSERR
);
1873 writel(0, gp
->regs
+ MAC_RXCVERR
);
1875 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1876 * them once a link is established.
1878 writel(0, gp
->regs
+ MAC_TXCFG
);
1879 writel(gp
->mac_rx_cfg
, gp
->regs
+ MAC_RXCFG
);
1880 writel(0, gp
->regs
+ MAC_MCCFG
);
1881 writel(0, gp
->regs
+ MAC_XIFCFG
);
1883 /* Setup MAC interrupts. We want to get all of the interesting
1884 * counter expiration events, but we do not want to hear about
1885 * normal rx/tx as the DMA engine tells us that.
1887 writel(MAC_TXSTAT_XMIT
, gp
->regs
+ MAC_TXMASK
);
1888 writel(MAC_RXSTAT_RCV
, gp
->regs
+ MAC_RXMASK
);
1890 /* Don't enable even the PAUSE interrupts for now, we
1891 * make no use of those events other than to record them.
1893 writel(0xffffffff, gp
->regs
+ MAC_MCMASK
);
1895 /* Don't enable GEM's WOL in normal operations
1898 writel(0, gp
->regs
+ WOL_WAKECSR
);
1901 static void gem_init_pause_thresholds(struct gem
*gp
)
1905 /* Calculate pause thresholds. Setting the OFF threshold to the
1906 * full RX fifo size effectively disables PAUSE generation which
1907 * is what we do for 10/100 only GEMs which have FIFOs too small
1908 * to make real gains from PAUSE.
1910 if (gp
->rx_fifo_sz
<= (2 * 1024)) {
1911 gp
->rx_pause_off
= gp
->rx_pause_on
= gp
->rx_fifo_sz
;
1913 int max_frame
= (gp
->rx_buf_sz
+ 4 + 64) & ~63;
1914 int off
= (gp
->rx_fifo_sz
- (max_frame
* 2));
1915 int on
= off
- max_frame
;
1917 gp
->rx_pause_off
= off
;
1918 gp
->rx_pause_on
= on
;
1922 /* Configure the chip "burst" DMA mode & enable some
1923 * HW bug fixes on Apple version
1926 if (gp
->pdev
->vendor
== PCI_VENDOR_ID_APPLE
)
1927 cfg
|= GREG_CFG_RONPAULBIT
| GREG_CFG_ENBUG2FIX
;
1928 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1929 cfg
|= GREG_CFG_IBURST
;
1931 cfg
|= ((31 << 1) & GREG_CFG_TXDMALIM
);
1932 cfg
|= ((31 << 6) & GREG_CFG_RXDMALIM
);
1933 writel(cfg
, gp
->regs
+ GREG_CFG
);
1935 /* If Infinite Burst didn't stick, then use different
1936 * thresholds (and Apple bug fixes don't exist)
1938 if (!(readl(gp
->regs
+ GREG_CFG
) & GREG_CFG_IBURST
)) {
1939 cfg
= ((2 << 1) & GREG_CFG_TXDMALIM
);
1940 cfg
|= ((8 << 6) & GREG_CFG_RXDMALIM
);
1941 writel(cfg
, gp
->regs
+ GREG_CFG
);
1945 static int gem_check_invariants(struct gem
*gp
)
1947 struct pci_dev
*pdev
= gp
->pdev
;
1950 /* On Apple's sungem, we can't rely on registers as the chip
1951 * was been powered down by the firmware. The PHY is looked
1954 if (pdev
->vendor
== PCI_VENDOR_ID_APPLE
) {
1955 gp
->phy_type
= phy_mii_mdio0
;
1956 gp
->tx_fifo_sz
= readl(gp
->regs
+ TXDMA_FSZ
) * 64;
1957 gp
->rx_fifo_sz
= readl(gp
->regs
+ RXDMA_FSZ
) * 64;
1960 mif_cfg
= readl(gp
->regs
+ MIF_CFG
);
1961 mif_cfg
&= ~(MIF_CFG_PSELECT
|MIF_CFG_POLL
|MIF_CFG_BBMODE
|MIF_CFG_MDI1
);
1962 mif_cfg
|= MIF_CFG_MDI0
;
1963 writel(mif_cfg
, gp
->regs
+ MIF_CFG
);
1964 writel(PCS_DMODE_MGM
, gp
->regs
+ PCS_DMODE
);
1965 writel(MAC_XIFCFG_OE
, gp
->regs
+ MAC_XIFCFG
);
1967 /* We hard-code the PHY address so we can properly bring it out of
1968 * reset later on, we can't really probe it at this point, though
1969 * that isn't an issue.
1971 if (gp
->pdev
->device
== PCI_DEVICE_ID_APPLE_K2_GMAC
)
1972 gp
->mii_phy_addr
= 1;
1974 gp
->mii_phy_addr
= 0;
1979 mif_cfg
= readl(gp
->regs
+ MIF_CFG
);
1981 if (pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
1982 pdev
->device
== PCI_DEVICE_ID_SUN_RIO_GEM
) {
1983 /* One of the MII PHYs _must_ be present
1984 * as this chip has no gigabit PHY.
1986 if ((mif_cfg
& (MIF_CFG_MDI0
| MIF_CFG_MDI1
)) == 0) {
1987 pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
1993 /* Determine initial PHY interface type guess. MDIO1 is the
1994 * external PHY and thus takes precedence over MDIO0.
1997 if (mif_cfg
& MIF_CFG_MDI1
) {
1998 gp
->phy_type
= phy_mii_mdio1
;
1999 mif_cfg
|= MIF_CFG_PSELECT
;
2000 writel(mif_cfg
, gp
->regs
+ MIF_CFG
);
2001 } else if (mif_cfg
& MIF_CFG_MDI0
) {
2002 gp
->phy_type
= phy_mii_mdio0
;
2003 mif_cfg
&= ~MIF_CFG_PSELECT
;
2004 writel(mif_cfg
, gp
->regs
+ MIF_CFG
);
2009 p
= of_get_property(gp
->of_node
, "shared-pins", NULL
);
2010 if (p
&& !strcmp(p
, "serdes"))
2011 gp
->phy_type
= phy_serdes
;
2014 gp
->phy_type
= phy_serialink
;
2016 if (gp
->phy_type
== phy_mii_mdio1
||
2017 gp
->phy_type
== phy_mii_mdio0
) {
2020 for (i
= 0; i
< 32; i
++) {
2021 gp
->mii_phy_addr
= i
;
2022 if (sungem_phy_read(gp
, MII_BMCR
) != 0xffff)
2026 if (pdev
->device
!= PCI_DEVICE_ID_SUN_GEM
) {
2027 pr_err("RIO MII phy will not respond\n");
2030 gp
->phy_type
= phy_serdes
;
2034 /* Fetch the FIFO configurations now too. */
2035 gp
->tx_fifo_sz
= readl(gp
->regs
+ TXDMA_FSZ
) * 64;
2036 gp
->rx_fifo_sz
= readl(gp
->regs
+ RXDMA_FSZ
) * 64;
2038 if (pdev
->vendor
== PCI_VENDOR_ID_SUN
) {
2039 if (pdev
->device
== PCI_DEVICE_ID_SUN_GEM
) {
2040 if (gp
->tx_fifo_sz
!= (9 * 1024) ||
2041 gp
->rx_fifo_sz
!= (20 * 1024)) {
2042 pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2043 gp
->tx_fifo_sz
, gp
->rx_fifo_sz
);
2048 if (gp
->tx_fifo_sz
!= (2 * 1024) ||
2049 gp
->rx_fifo_sz
!= (2 * 1024)) {
2050 pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2051 gp
->tx_fifo_sz
, gp
->rx_fifo_sz
);
2054 gp
->swrst_base
= (64 / 4) << GREG_SWRST_CACHE_SHIFT
;
2061 static void gem_reinit_chip(struct gem
*gp
)
2063 /* Reset the chip */
2066 /* Make sure ints are disabled */
2067 gem_disable_ints(gp
);
2069 /* Allocate & setup ring buffers */
2072 /* Configure pause thresholds */
2073 gem_init_pause_thresholds(gp
);
2075 /* Init DMA & MAC engines */
2081 static void gem_stop_phy(struct gem
*gp
, int wol
)
2085 /* Let the chip settle down a bit, it seems that helps
2086 * for sleep mode on some models
2090 /* Make sure we aren't polling PHY status change. We
2091 * don't currently use that feature though
2093 mifcfg
= readl(gp
->regs
+ MIF_CFG
);
2094 mifcfg
&= ~MIF_CFG_POLL
;
2095 writel(mifcfg
, gp
->regs
+ MIF_CFG
);
2097 if (wol
&& gp
->has_wol
) {
2098 unsigned char *e
= &gp
->dev
->dev_addr
[0];
2101 /* Setup wake-on-lan for MAGIC packet */
2102 writel(MAC_RXCFG_HFE
| MAC_RXCFG_SFCS
| MAC_RXCFG_ENAB
,
2103 gp
->regs
+ MAC_RXCFG
);
2104 writel((e
[4] << 8) | e
[5], gp
->regs
+ WOL_MATCH0
);
2105 writel((e
[2] << 8) | e
[3], gp
->regs
+ WOL_MATCH1
);
2106 writel((e
[0] << 8) | e
[1], gp
->regs
+ WOL_MATCH2
);
2108 writel(WOL_MCOUNT_N
| WOL_MCOUNT_M
, gp
->regs
+ WOL_MCOUNT
);
2109 csr
= WOL_WAKECSR_ENABLE
;
2110 if ((readl(gp
->regs
+ MAC_XIFCFG
) & MAC_XIFCFG_GMII
) == 0)
2111 csr
|= WOL_WAKECSR_MII
;
2112 writel(csr
, gp
->regs
+ WOL_WAKECSR
);
2114 writel(0, gp
->regs
+ MAC_RXCFG
);
2115 (void)readl(gp
->regs
+ MAC_RXCFG
);
2116 /* Machine sleep will die in strange ways if we
2117 * dont wait a bit here, looks like the chip takes
2118 * some time to really shut down
2123 writel(0, gp
->regs
+ MAC_TXCFG
);
2124 writel(0, gp
->regs
+ MAC_XIFCFG
);
2125 writel(0, gp
->regs
+ TXDMA_CFG
);
2126 writel(0, gp
->regs
+ RXDMA_CFG
);
2130 writel(MAC_TXRST_CMD
, gp
->regs
+ MAC_TXRST
);
2131 writel(MAC_RXRST_CMD
, gp
->regs
+ MAC_RXRST
);
2133 if (found_mii_phy(gp
) && gp
->phy_mii
.def
->ops
->suspend
)
2134 gp
->phy_mii
.def
->ops
->suspend(&gp
->phy_mii
);
2136 /* According to Apple, we must set the MDIO pins to this begnign
2137 * state or we may 1) eat more current, 2) damage some PHYs
2139 writel(mifcfg
| MIF_CFG_BBMODE
, gp
->regs
+ MIF_CFG
);
2140 writel(0, gp
->regs
+ MIF_BBCLK
);
2141 writel(0, gp
->regs
+ MIF_BBDATA
);
2142 writel(0, gp
->regs
+ MIF_BBOENAB
);
2143 writel(MAC_XIFCFG_GMII
| MAC_XIFCFG_LBCK
, gp
->regs
+ MAC_XIFCFG
);
2144 (void) readl(gp
->regs
+ MAC_XIFCFG
);
2148 static int gem_do_start(struct net_device
*dev
)
2150 struct gem
*gp
= netdev_priv(dev
);
2153 /* Enable the cell */
2156 /* Make sure PCI access and bus master are enabled */
2157 rc
= pci_enable_device(gp
->pdev
);
2159 netdev_err(dev
, "Failed to enable chip on PCI bus !\n");
2161 /* Put cell and forget it for now, it will be considered as
2162 * still asleep, a new sleep cycle may bring it back
2167 pci_set_master(gp
->pdev
);
2169 /* Init & setup chip hardware */
2170 gem_reinit_chip(gp
);
2172 /* An interrupt might come in handy */
2173 rc
= request_irq(gp
->pdev
->irq
, gem_interrupt
,
2174 IRQF_SHARED
, dev
->name
, (void *)dev
);
2176 netdev_err(dev
, "failed to request irq !\n");
2179 gem_clean_rings(gp
);
2184 /* Mark us as attached again if we come from resume(), this has
2185 * no effect if we weren't detached and needs to be done now.
2187 netif_device_attach(dev
);
2189 /* Restart NAPI & queues */
2190 gem_netif_start(gp
);
2192 /* Detect & init PHY, start autoneg etc... this will
2193 * eventually result in starting DMA operations when
2201 static void gem_do_stop(struct net_device
*dev
, int wol
)
2203 struct gem
*gp
= netdev_priv(dev
);
2205 /* Stop NAPI and stop tx queue */
2208 /* Make sure ints are disabled. We don't care about
2209 * synchronizing as NAPI is disabled, thus a stray
2210 * interrupt will do nothing bad (our irq handler
2211 * just schedules NAPI)
2213 gem_disable_ints(gp
);
2215 /* Stop the link timer */
2216 del_timer_sync(&gp
->link_timer
);
2218 /* We cannot cancel the reset task while holding the
2219 * rtnl lock, we'd get an A->B / B->A deadlock stituation
2220 * if we did. This is not an issue however as the reset
2221 * task is synchronized vs. us (rtnl_lock) and will do
2222 * nothing if the device is down or suspended. We do
2223 * still clear reset_task_pending to avoid a spurrious
2224 * reset later on in case we do resume before it gets
2227 gp
->reset_task_pending
= 0;
2229 /* If we are going to sleep with WOL */
2236 /* Get rid of rings */
2237 gem_clean_rings(gp
);
2239 /* No irq needed anymore */
2240 free_irq(gp
->pdev
->irq
, (void *) dev
);
2242 /* Shut the PHY down eventually and setup WOL */
2243 gem_stop_phy(gp
, wol
);
2245 /* Make sure bus master is disabled */
2246 pci_disable_device(gp
->pdev
);
2248 /* Cell not needed neither if no WOL */
2253 static void gem_reset_task(struct work_struct
*work
)
2255 struct gem
*gp
= container_of(work
, struct gem
, reset_task
);
2257 /* Lock out the network stack (essentially shield ourselves
2258 * against a racing open, close, control call, or suspend
2262 /* Skip the reset task if suspended or closed, or if it's
2263 * been cancelled by gem_do_stop (see comment there)
2265 if (!netif_device_present(gp
->dev
) ||
2266 !netif_running(gp
->dev
) ||
2267 !gp
->reset_task_pending
) {
2272 /* Stop the link timer */
2273 del_timer_sync(&gp
->link_timer
);
2275 /* Stop NAPI and tx */
2278 /* Reset the chip & rings */
2279 gem_reinit_chip(gp
);
2280 if (gp
->lstate
== link_up
)
2281 gem_set_link_modes(gp
);
2283 /* Restart NAPI and Tx */
2284 gem_netif_start(gp
);
2287 gp
->reset_task_pending
= 0;
2289 /* If the link is not up, restart autoneg, else restart the
2292 if (gp
->lstate
!= link_up
)
2293 gem_begin_auto_negotiation(gp
, NULL
);
2295 mod_timer(&gp
->link_timer
, jiffies
+ ((12 * HZ
) / 10));
2300 static int gem_open(struct net_device
*dev
)
2302 /* We allow open while suspended, we just do nothing,
2303 * the chip will be initialized in resume()
2305 if (netif_device_present(dev
))
2306 return gem_do_start(dev
);
2310 static int gem_close(struct net_device
*dev
)
2312 if (netif_device_present(dev
))
2313 gem_do_stop(dev
, 0);
2319 static int gem_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2321 struct net_device
*dev
= pci_get_drvdata(pdev
);
2322 struct gem
*gp
= netdev_priv(dev
);
2324 /* Lock the network stack first to avoid racing with open/close,
2325 * reset task and setting calls
2329 /* Not running, mark ourselves non-present, no need for
2332 if (!netif_running(dev
)) {
2333 netif_device_detach(dev
);
2337 netdev_info(dev
, "suspending, WakeOnLan %s\n",
2338 (gp
->wake_on_lan
&& netif_running(dev
)) ?
2339 "enabled" : "disabled");
2341 /* Tell the network stack we're gone. gem_do_stop() below will
2342 * synchronize with TX, stop NAPI etc...
2344 netif_device_detach(dev
);
2346 /* Switch off chip, remember WOL setting */
2347 gp
->asleep_wol
= !!gp
->wake_on_lan
;
2348 gem_do_stop(dev
, gp
->asleep_wol
);
2350 /* Unlock the network stack */
2356 static int gem_resume(struct pci_dev
*pdev
)
2358 struct net_device
*dev
= pci_get_drvdata(pdev
);
2359 struct gem
*gp
= netdev_priv(dev
);
2361 /* See locking comment in gem_suspend */
2364 /* Not running, mark ourselves present, no need for
2367 if (!netif_running(dev
)) {
2368 netif_device_attach(dev
);
2373 /* Restart chip. If that fails there isn't much we can do, we
2374 * leave things stopped.
2378 /* If we had WOL enabled, the cell clock was never turned off during
2379 * sleep, so we end up beeing unbalanced. Fix that here
2384 /* Unlock the network stack */
2389 #endif /* CONFIG_PM */
2391 static struct net_device_stats
*gem_get_stats(struct net_device
*dev
)
2393 struct gem
*gp
= netdev_priv(dev
);
2395 /* I have seen this being called while the PM was in progress,
2396 * so we shield against this. Let's also not poke at registers
2397 * while the reset task is going on.
2399 * TODO: Move stats collection elsewhere (link timer ?) and
2400 * make this a nop to avoid all those synchro issues
2402 if (!netif_device_present(dev
) || !netif_running(dev
))
2405 /* Better safe than sorry... */
2406 if (WARN_ON(!gp
->cell_enabled
))
2409 dev
->stats
.rx_crc_errors
+= readl(gp
->regs
+ MAC_FCSERR
);
2410 writel(0, gp
->regs
+ MAC_FCSERR
);
2412 dev
->stats
.rx_frame_errors
+= readl(gp
->regs
+ MAC_AERR
);
2413 writel(0, gp
->regs
+ MAC_AERR
);
2415 dev
->stats
.rx_length_errors
+= readl(gp
->regs
+ MAC_LERR
);
2416 writel(0, gp
->regs
+ MAC_LERR
);
2418 dev
->stats
.tx_aborted_errors
+= readl(gp
->regs
+ MAC_ECOLL
);
2419 dev
->stats
.collisions
+=
2420 (readl(gp
->regs
+ MAC_ECOLL
) + readl(gp
->regs
+ MAC_LCOLL
));
2421 writel(0, gp
->regs
+ MAC_ECOLL
);
2422 writel(0, gp
->regs
+ MAC_LCOLL
);
2427 static int gem_set_mac_address(struct net_device
*dev
, void *addr
)
2429 struct sockaddr
*macaddr
= (struct sockaddr
*) addr
;
2430 struct gem
*gp
= netdev_priv(dev
);
2431 unsigned char *e
= &dev
->dev_addr
[0];
2433 if (!is_valid_ether_addr(macaddr
->sa_data
))
2434 return -EADDRNOTAVAIL
;
2436 memcpy(dev
->dev_addr
, macaddr
->sa_data
, dev
->addr_len
);
2438 /* We'll just catch it later when the device is up'd or resumed */
2439 if (!netif_running(dev
) || !netif_device_present(dev
))
2442 /* Better safe than sorry... */
2443 if (WARN_ON(!gp
->cell_enabled
))
2446 writel((e
[4] << 8) | e
[5], gp
->regs
+ MAC_ADDR0
);
2447 writel((e
[2] << 8) | e
[3], gp
->regs
+ MAC_ADDR1
);
2448 writel((e
[0] << 8) | e
[1], gp
->regs
+ MAC_ADDR2
);
2453 static void gem_set_multicast(struct net_device
*dev
)
2455 struct gem
*gp
= netdev_priv(dev
);
2456 u32 rxcfg
, rxcfg_new
;
2459 if (!netif_running(dev
) || !netif_device_present(dev
))
2462 /* Better safe than sorry... */
2463 if (gp
->reset_task_pending
|| WARN_ON(!gp
->cell_enabled
))
2466 rxcfg
= readl(gp
->regs
+ MAC_RXCFG
);
2467 rxcfg_new
= gem_setup_multicast(gp
);
2469 rxcfg_new
|= MAC_RXCFG_SFCS
;
2471 gp
->mac_rx_cfg
= rxcfg_new
;
2473 writel(rxcfg
& ~MAC_RXCFG_ENAB
, gp
->regs
+ MAC_RXCFG
);
2474 while (readl(gp
->regs
+ MAC_RXCFG
) & MAC_RXCFG_ENAB
) {
2480 rxcfg
&= ~(MAC_RXCFG_PROM
| MAC_RXCFG_HFE
);
2483 writel(rxcfg
, gp
->regs
+ MAC_RXCFG
);
2486 /* Jumbo-grams don't seem to work :-( */
2487 #define GEM_MIN_MTU ETH_MIN_MTU
2489 #define GEM_MAX_MTU ETH_DATA_LEN
2491 #define GEM_MAX_MTU 9000
2494 static int gem_change_mtu(struct net_device
*dev
, int new_mtu
)
2496 struct gem
*gp
= netdev_priv(dev
);
2500 /* We'll just catch it later when the device is up'd or resumed */
2501 if (!netif_running(dev
) || !netif_device_present(dev
))
2504 /* Better safe than sorry... */
2505 if (WARN_ON(!gp
->cell_enabled
))
2509 gem_reinit_chip(gp
);
2510 if (gp
->lstate
== link_up
)
2511 gem_set_link_modes(gp
);
2512 gem_netif_start(gp
);
2517 static void gem_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2519 struct gem
*gp
= netdev_priv(dev
);
2521 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
2522 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
2523 strlcpy(info
->bus_info
, pci_name(gp
->pdev
), sizeof(info
->bus_info
));
2526 static int gem_get_link_ksettings(struct net_device
*dev
,
2527 struct ethtool_link_ksettings
*cmd
)
2529 struct gem
*gp
= netdev_priv(dev
);
2530 u32 supported
, advertising
;
2532 if (gp
->phy_type
== phy_mii_mdio0
||
2533 gp
->phy_type
== phy_mii_mdio1
) {
2534 if (gp
->phy_mii
.def
)
2535 supported
= gp
->phy_mii
.def
->features
;
2537 supported
= (SUPPORTED_10baseT_Half
|
2538 SUPPORTED_10baseT_Full
);
2540 /* XXX hardcoded stuff for now */
2541 cmd
->base
.port
= PORT_MII
;
2542 cmd
->base
.phy_address
= 0; /* XXX fixed PHYAD */
2544 /* Return current PHY settings */
2545 cmd
->base
.autoneg
= gp
->want_autoneg
;
2546 cmd
->base
.speed
= gp
->phy_mii
.speed
;
2547 cmd
->base
.duplex
= gp
->phy_mii
.duplex
;
2548 advertising
= gp
->phy_mii
.advertising
;
2550 /* If we started with a forced mode, we don't have a default
2551 * advertise set, we need to return something sensible so
2552 * userland can re-enable autoneg properly.
2554 if (advertising
== 0)
2555 advertising
= supported
;
2556 } else { // XXX PCS ?
2558 (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
2559 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
2561 advertising
= supported
;
2562 cmd
->base
.speed
= 0;
2563 cmd
->base
.duplex
= 0;
2565 cmd
->base
.phy_address
= 0;
2566 cmd
->base
.autoneg
= 0;
2568 /* serdes means usually a Fibre connector, with most fixed */
2569 if (gp
->phy_type
== phy_serdes
) {
2570 cmd
->base
.port
= PORT_FIBRE
;
2571 supported
= (SUPPORTED_1000baseT_Half
|
2572 SUPPORTED_1000baseT_Full
|
2573 SUPPORTED_FIBRE
| SUPPORTED_Autoneg
|
2574 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
);
2575 advertising
= supported
;
2576 if (gp
->lstate
== link_up
)
2577 cmd
->base
.speed
= SPEED_1000
;
2578 cmd
->base
.duplex
= DUPLEX_FULL
;
2579 cmd
->base
.autoneg
= 1;
2583 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
2585 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
2591 static int gem_set_link_ksettings(struct net_device
*dev
,
2592 const struct ethtool_link_ksettings
*cmd
)
2594 struct gem
*gp
= netdev_priv(dev
);
2595 u32 speed
= cmd
->base
.speed
;
2598 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
2599 cmd
->link_modes
.advertising
);
2601 /* Verify the settings we care about. */
2602 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
2603 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
2606 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
&&
2610 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
2611 ((speed
!= SPEED_1000
&&
2612 speed
!= SPEED_100
&&
2613 speed
!= SPEED_10
) ||
2614 (cmd
->base
.duplex
!= DUPLEX_HALF
&&
2615 cmd
->base
.duplex
!= DUPLEX_FULL
)))
2618 /* Apply settings and restart link process. */
2619 if (netif_device_present(gp
->dev
)) {
2620 del_timer_sync(&gp
->link_timer
);
2621 gem_begin_auto_negotiation(gp
, cmd
);
2627 static int gem_nway_reset(struct net_device
*dev
)
2629 struct gem
*gp
= netdev_priv(dev
);
2631 if (!gp
->want_autoneg
)
2634 /* Restart link process */
2635 if (netif_device_present(gp
->dev
)) {
2636 del_timer_sync(&gp
->link_timer
);
2637 gem_begin_auto_negotiation(gp
, NULL
);
2643 static u32
gem_get_msglevel(struct net_device
*dev
)
2645 struct gem
*gp
= netdev_priv(dev
);
2646 return gp
->msg_enable
;
2649 static void gem_set_msglevel(struct net_device
*dev
, u32 value
)
2651 struct gem
*gp
= netdev_priv(dev
);
2652 gp
->msg_enable
= value
;
2656 /* Add more when I understand how to program the chip */
2657 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2659 #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2661 static void gem_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2663 struct gem
*gp
= netdev_priv(dev
);
2665 /* Add more when I understand how to program the chip */
2667 wol
->supported
= WOL_SUPPORTED_MASK
;
2668 wol
->wolopts
= gp
->wake_on_lan
;
2675 static int gem_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2677 struct gem
*gp
= netdev_priv(dev
);
2681 gp
->wake_on_lan
= wol
->wolopts
& WOL_SUPPORTED_MASK
;
2685 static const struct ethtool_ops gem_ethtool_ops
= {
2686 .get_drvinfo
= gem_get_drvinfo
,
2687 .get_link
= ethtool_op_get_link
,
2688 .nway_reset
= gem_nway_reset
,
2689 .get_msglevel
= gem_get_msglevel
,
2690 .set_msglevel
= gem_set_msglevel
,
2691 .get_wol
= gem_get_wol
,
2692 .set_wol
= gem_set_wol
,
2693 .get_link_ksettings
= gem_get_link_ksettings
,
2694 .set_link_ksettings
= gem_set_link_ksettings
,
2697 static int gem_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2699 struct gem
*gp
= netdev_priv(dev
);
2700 struct mii_ioctl_data
*data
= if_mii(ifr
);
2701 int rc
= -EOPNOTSUPP
;
2703 /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
2704 * netif_device_present() is true and holds rtnl_lock for us
2705 * so we have nothing to worry about
2709 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
2710 data
->phy_id
= gp
->mii_phy_addr
;
2711 /* Fallthrough... */
2713 case SIOCGMIIREG
: /* Read MII PHY register. */
2714 data
->val_out
= __sungem_phy_read(gp
, data
->phy_id
& 0x1f,
2715 data
->reg_num
& 0x1f);
2719 case SIOCSMIIREG
: /* Write MII PHY register. */
2720 __sungem_phy_write(gp
, data
->phy_id
& 0x1f, data
->reg_num
& 0x1f,
2728 #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2729 /* Fetch MAC address from vital product data of PCI ROM. */
2730 static int find_eth_addr_in_vpd(void __iomem
*rom_base
, int len
, unsigned char *dev_addr
)
2734 for (this_offset
= 0x20; this_offset
< len
; this_offset
++) {
2735 void __iomem
*p
= rom_base
+ this_offset
;
2738 if (readb(p
+ 0) != 0x90 ||
2739 readb(p
+ 1) != 0x00 ||
2740 readb(p
+ 2) != 0x09 ||
2741 readb(p
+ 3) != 0x4e ||
2742 readb(p
+ 4) != 0x41 ||
2743 readb(p
+ 5) != 0x06)
2749 for (i
= 0; i
< 6; i
++)
2750 dev_addr
[i
] = readb(p
+ i
);
2756 static void get_gem_mac_nonobp(struct pci_dev
*pdev
, unsigned char *dev_addr
)
2759 void __iomem
*p
= pci_map_rom(pdev
, &size
);
2764 found
= readb(p
) == 0x55 &&
2765 readb(p
+ 1) == 0xaa &&
2766 find_eth_addr_in_vpd(p
, (64 * 1024), dev_addr
);
2767 pci_unmap_rom(pdev
, p
);
2772 /* Sun MAC prefix then 3 random bytes. */
2776 get_random_bytes(dev_addr
+ 3, 3);
2778 #endif /* not Sparc and not PPC */
2780 static int gem_get_device_address(struct gem
*gp
)
2782 #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2783 struct net_device
*dev
= gp
->dev
;
2784 const unsigned char *addr
;
2786 addr
= of_get_property(gp
->of_node
, "local-mac-address", NULL
);
2789 addr
= idprom
->id_ethaddr
;
2792 pr_err("%s: can't get mac-address\n", dev
->name
);
2796 memcpy(dev
->dev_addr
, addr
, ETH_ALEN
);
2798 get_gem_mac_nonobp(gp
->pdev
, gp
->dev
->dev_addr
);
2803 static void gem_remove_one(struct pci_dev
*pdev
)
2805 struct net_device
*dev
= pci_get_drvdata(pdev
);
2808 struct gem
*gp
= netdev_priv(dev
);
2810 unregister_netdev(dev
);
2812 /* Ensure reset task is truly gone */
2813 cancel_work_sync(&gp
->reset_task
);
2815 /* Free resources */
2816 pci_free_consistent(pdev
,
2817 sizeof(struct gem_init_block
),
2821 pci_release_regions(pdev
);
2826 static const struct net_device_ops gem_netdev_ops
= {
2827 .ndo_open
= gem_open
,
2828 .ndo_stop
= gem_close
,
2829 .ndo_start_xmit
= gem_start_xmit
,
2830 .ndo_get_stats
= gem_get_stats
,
2831 .ndo_set_rx_mode
= gem_set_multicast
,
2832 .ndo_do_ioctl
= gem_ioctl
,
2833 .ndo_tx_timeout
= gem_tx_timeout
,
2834 .ndo_change_mtu
= gem_change_mtu
,
2835 .ndo_validate_addr
= eth_validate_addr
,
2836 .ndo_set_mac_address
= gem_set_mac_address
,
2837 #ifdef CONFIG_NET_POLL_CONTROLLER
2838 .ndo_poll_controller
= gem_poll_controller
,
2842 static int gem_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2844 unsigned long gemreg_base
, gemreg_len
;
2845 struct net_device
*dev
;
2847 int err
, pci_using_dac
;
2849 printk_once(KERN_INFO
"%s", version
);
2851 /* Apple gmac note: during probe, the chip is powered up by
2852 * the arch code to allow the code below to work (and to let
2853 * the chip be probed on the config space. It won't stay powered
2854 * up until the interface is brought up however, so we can't rely
2855 * on register configuration done at this point.
2857 err
= pci_enable_device(pdev
);
2859 pr_err("Cannot enable MMIO operation, aborting\n");
2862 pci_set_master(pdev
);
2864 /* Configure DMA attributes. */
2866 /* All of the GEM documentation states that 64-bit DMA addressing
2867 * is fully supported and should work just fine. However the
2868 * front end for RIO based GEMs is different and only supports
2869 * 32-bit addressing.
2871 * For now we assume the various PPC GEMs are 32-bit only as well.
2873 if (pdev
->vendor
== PCI_VENDOR_ID_SUN
&&
2874 pdev
->device
== PCI_DEVICE_ID_SUN_GEM
&&
2875 !pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
2878 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2880 pr_err("No usable DMA configuration, aborting\n");
2881 goto err_disable_device
;
2886 gemreg_base
= pci_resource_start(pdev
, 0);
2887 gemreg_len
= pci_resource_len(pdev
, 0);
2889 if ((pci_resource_flags(pdev
, 0) & IORESOURCE_IO
) != 0) {
2890 pr_err("Cannot find proper PCI device base address, aborting\n");
2892 goto err_disable_device
;
2895 dev
= alloc_etherdev(sizeof(*gp
));
2898 goto err_disable_device
;
2900 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2902 gp
= netdev_priv(dev
);
2904 err
= pci_request_regions(pdev
, DRV_NAME
);
2906 pr_err("Cannot obtain PCI resources, aborting\n");
2907 goto err_out_free_netdev
;
2913 gp
->msg_enable
= DEFAULT_MSG
;
2915 timer_setup(&gp
->link_timer
, gem_link_timer
, 0);
2917 INIT_WORK(&gp
->reset_task
, gem_reset_task
);
2919 gp
->lstate
= link_down
;
2920 gp
->timer_ticks
= 0;
2921 netif_carrier_off(dev
);
2923 gp
->regs
= ioremap(gemreg_base
, gemreg_len
);
2925 pr_err("Cannot map device registers, aborting\n");
2927 goto err_out_free_res
;
2930 /* On Apple, we want a reference to the Open Firmware device-tree
2931 * node. We use it for clock control.
2933 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
2934 gp
->of_node
= pci_device_to_OF_node(pdev
);
2937 /* Only Apple version supports WOL afaik */
2938 if (pdev
->vendor
== PCI_VENDOR_ID_APPLE
)
2941 /* Make sure cell is enabled */
2944 /* Make sure everything is stopped and in init state */
2947 /* Fill up the mii_phy structure (even if we won't use it) */
2948 gp
->phy_mii
.dev
= dev
;
2949 gp
->phy_mii
.mdio_read
= _sungem_phy_read
;
2950 gp
->phy_mii
.mdio_write
= _sungem_phy_write
;
2951 #ifdef CONFIG_PPC_PMAC
2952 gp
->phy_mii
.platform_data
= gp
->of_node
;
2954 /* By default, we start with autoneg */
2955 gp
->want_autoneg
= 1;
2957 /* Check fifo sizes, PHY type, etc... */
2958 if (gem_check_invariants(gp
)) {
2960 goto err_out_iounmap
;
2963 /* It is guaranteed that the returned buffer will be at least
2964 * PAGE_SIZE aligned.
2966 gp
->init_block
= (struct gem_init_block
*)
2967 pci_alloc_consistent(pdev
, sizeof(struct gem_init_block
),
2969 if (!gp
->init_block
) {
2970 pr_err("Cannot allocate init block, aborting\n");
2972 goto err_out_iounmap
;
2975 err
= gem_get_device_address(gp
);
2977 goto err_out_free_consistent
;
2979 dev
->netdev_ops
= &gem_netdev_ops
;
2980 netif_napi_add(dev
, &gp
->napi
, gem_poll
, 64);
2981 dev
->ethtool_ops
= &gem_ethtool_ops
;
2982 dev
->watchdog_timeo
= 5 * HZ
;
2985 /* Set that now, in case PM kicks in now */
2986 pci_set_drvdata(pdev
, dev
);
2988 /* We can do scatter/gather and HW checksum */
2989 dev
->hw_features
= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
2990 dev
->features
= dev
->hw_features
;
2992 dev
->features
|= NETIF_F_HIGHDMA
;
2994 /* MTU range: 68 - 1500 (Jumbo mode is broken) */
2995 dev
->min_mtu
= GEM_MIN_MTU
;
2996 dev
->max_mtu
= GEM_MAX_MTU
;
2998 /* Register with kernel */
2999 if (register_netdev(dev
)) {
3000 pr_err("Cannot register net device, aborting\n");
3002 goto err_out_free_consistent
;
3005 /* Undo the get_cell with appropriate locking (we could use
3006 * ndo_init/uninit but that would be even more clumsy imho)
3012 netdev_info(dev
, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3016 err_out_free_consistent
:
3017 gem_remove_one(pdev
);
3023 pci_release_regions(pdev
);
3025 err_out_free_netdev
:
3028 pci_disable_device(pdev
);
3034 static struct pci_driver gem_driver
= {
3035 .name
= GEM_MODULE_NAME
,
3036 .id_table
= gem_pci_tbl
,
3037 .probe
= gem_init_one
,
3038 .remove
= gem_remove_one
,
3040 .suspend
= gem_suspend
,
3041 .resume
= gem_resume
,
3042 #endif /* CONFIG_PM */
3045 module_pci_driver(gem_driver
);