]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/amd/au1000_eth.c
Merge branch 'master' into for-next
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / amd / au1000_eth.c
1 /*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001-2003, 2006 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
13 * converted to use linux-2.6.x's PHY framework
14 *
15 * Author: MontaVista Software, Inc.
16 * ppopov@mvista.com or source@mvista.com
17 *
18 * ########################################################################
19 *
20 * This program is free software; you can distribute it and/or modify it
21 * under the terms of the GNU General Public License (Version 2) as
22 * published by the Free Software Foundation.
23 *
24 * This program is distributed in the hope it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, see <http://www.gnu.org/licenses/>.
31 *
32 * ########################################################################
33 *
34 *
35 */
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
38 #include <linux/capability.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/string.h>
43 #include <linux/timer.h>
44 #include <linux/errno.h>
45 #include <linux/in.h>
46 #include <linux/ioport.h>
47 #include <linux/bitops.h>
48 #include <linux/slab.h>
49 #include <linux/interrupt.h>
50 #include <linux/netdevice.h>
51 #include <linux/etherdevice.h>
52 #include <linux/ethtool.h>
53 #include <linux/mii.h>
54 #include <linux/skbuff.h>
55 #include <linux/delay.h>
56 #include <linux/crc32.h>
57 #include <linux/phy.h>
58 #include <linux/platform_device.h>
59 #include <linux/cpu.h>
60 #include <linux/io.h>
61
62 #include <asm/mipsregs.h>
63 #include <asm/irq.h>
64 #include <asm/processor.h>
65
66 #include <au1000.h>
67 #include <au1xxx_eth.h>
68 #include <prom.h>
69
70 #include "au1000_eth.h"
71
72 #ifdef AU1000_ETH_DEBUG
73 static int au1000_debug = 5;
74 #else
75 static int au1000_debug = 3;
76 #endif
77
78 #define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK)
81
82 #define DRV_NAME "au1000_eth"
83 #define DRV_VERSION "1.7"
84 #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
85 #define DRV_DESC "Au1xxx on-chip Ethernet driver"
86
87 MODULE_AUTHOR(DRV_AUTHOR);
88 MODULE_DESCRIPTION(DRV_DESC);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 /*
93 * Theory of operation
94 *
95 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
96 * There are four receive and four transmit descriptors. These
97 * descriptors are not in memory; rather, they are just a set of
98 * hardware registers.
99 *
100 * Since the Au1000 has a coherent data cache, the receive and
101 * transmit buffers are allocated from the KSEG0 segment. The
102 * hardware registers, however, are still mapped at KSEG1 to
103 * make sure there's no out-of-order writes, and that all writes
104 * complete immediately.
105 */
106
107 /*
108 * board-specific configurations
109 *
110 * PHY detection algorithm
111 *
112 * If phy_static_config is undefined, the PHY setup is
113 * autodetected:
114 *
115 * mii_probe() first searches the current MAC's MII bus for a PHY,
116 * selecting the first (or last, if phy_search_highest_addr is
117 * defined) PHY address not already claimed by another netdev.
118 *
119 * If nothing was found that way when searching for the 2nd ethernet
120 * controller's PHY and phy1_search_mac0 is defined, then
121 * the first MII bus is searched as well for an unclaimed PHY; this is
122 * needed in case of a dual-PHY accessible only through the MAC0's MII
123 * bus.
124 *
125 * Finally, if no PHY is found, then the corresponding ethernet
126 * controller is not registered to the network subsystem.
127 */
128
129 /* autodetection defaults: phy1_search_mac0 */
130
131 /* static PHY setup
132 *
133 * most boards PHY setup should be detectable properly with the
134 * autodetection algorithm in mii_probe(), but in some cases (e.g. if
135 * you have a switch attached, or want to use the PHY's interrupt
136 * notification capabilities) you can provide a static PHY
137 * configuration here
138 *
139 * IRQs may only be set, if a PHY address was configured
140 * If a PHY address is given, also a bus id is required to be set
141 *
142 * ps: make sure the used irqs are configured properly in the board
143 * specific irq-map
144 */
145
146 static void au1000_enable_mac(struct net_device *dev, int force_reset)
147 {
148 unsigned long flags;
149 struct au1000_private *aup = netdev_priv(dev);
150
151 spin_lock_irqsave(&aup->lock, flags);
152
153 if (force_reset || (!aup->mac_enabled)) {
154 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
155 au_sync_delay(2);
156 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
157 | MAC_EN_CLOCK_ENABLE), aup->enable);
158 au_sync_delay(2);
159
160 aup->mac_enabled = 1;
161 }
162
163 spin_unlock_irqrestore(&aup->lock, flags);
164 }
165
166 /*
167 * MII operations
168 */
169 static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
170 {
171 struct au1000_private *aup = netdev_priv(dev);
172 u32 *const mii_control_reg = &aup->mac->mii_control;
173 u32 *const mii_data_reg = &aup->mac->mii_data;
174 u32 timedout = 20;
175 u32 mii_control;
176
177 while (readl(mii_control_reg) & MAC_MII_BUSY) {
178 mdelay(1);
179 if (--timedout == 0) {
180 netdev_err(dev, "read_MII busy timeout!!\n");
181 return -1;
182 }
183 }
184
185 mii_control = MAC_SET_MII_SELECT_REG(reg) |
186 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
187
188 writel(mii_control, mii_control_reg);
189
190 timedout = 20;
191 while (readl(mii_control_reg) & MAC_MII_BUSY) {
192 mdelay(1);
193 if (--timedout == 0) {
194 netdev_err(dev, "mdio_read busy timeout!!\n");
195 return -1;
196 }
197 }
198 return readl(mii_data_reg);
199 }
200
201 static void au1000_mdio_write(struct net_device *dev, int phy_addr,
202 int reg, u16 value)
203 {
204 struct au1000_private *aup = netdev_priv(dev);
205 u32 *const mii_control_reg = &aup->mac->mii_control;
206 u32 *const mii_data_reg = &aup->mac->mii_data;
207 u32 timedout = 20;
208 u32 mii_control;
209
210 while (readl(mii_control_reg) & MAC_MII_BUSY) {
211 mdelay(1);
212 if (--timedout == 0) {
213 netdev_err(dev, "mdio_write busy timeout!!\n");
214 return;
215 }
216 }
217
218 mii_control = MAC_SET_MII_SELECT_REG(reg) |
219 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
220
221 writel(value, mii_data_reg);
222 writel(mii_control, mii_control_reg);
223 }
224
225 static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
226 {
227 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
228 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
229 */
230 struct net_device *const dev = bus->priv;
231
232 /* make sure the MAC associated with this
233 * mii_bus is enabled
234 */
235 au1000_enable_mac(dev, 0);
236
237 return au1000_mdio_read(dev, phy_addr, regnum);
238 }
239
240 static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
241 u16 value)
242 {
243 struct net_device *const dev = bus->priv;
244
245 /* make sure the MAC associated with this
246 * mii_bus is enabled
247 */
248 au1000_enable_mac(dev, 0);
249
250 au1000_mdio_write(dev, phy_addr, regnum, value);
251 return 0;
252 }
253
254 static int au1000_mdiobus_reset(struct mii_bus *bus)
255 {
256 struct net_device *const dev = bus->priv;
257
258 /* make sure the MAC associated with this
259 * mii_bus is enabled
260 */
261 au1000_enable_mac(dev, 0);
262
263 return 0;
264 }
265
266 static void au1000_hard_stop(struct net_device *dev)
267 {
268 struct au1000_private *aup = netdev_priv(dev);
269 u32 reg;
270
271 netif_dbg(aup, drv, dev, "hard stop\n");
272
273 reg = readl(&aup->mac->control);
274 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
275 writel(reg, &aup->mac->control);
276 au_sync_delay(10);
277 }
278
279 static void au1000_enable_rx_tx(struct net_device *dev)
280 {
281 struct au1000_private *aup = netdev_priv(dev);
282 u32 reg;
283
284 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
285
286 reg = readl(&aup->mac->control);
287 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
288 writel(reg, &aup->mac->control);
289 au_sync_delay(10);
290 }
291
292 static void
293 au1000_adjust_link(struct net_device *dev)
294 {
295 struct au1000_private *aup = netdev_priv(dev);
296 struct phy_device *phydev = aup->phy_dev;
297 unsigned long flags;
298 u32 reg;
299
300 int status_change = 0;
301
302 BUG_ON(!aup->phy_dev);
303
304 spin_lock_irqsave(&aup->lock, flags);
305
306 if (phydev->link && (aup->old_speed != phydev->speed)) {
307 /* speed changed */
308
309 switch (phydev->speed) {
310 case SPEED_10:
311 case SPEED_100:
312 break;
313 default:
314 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
315 phydev->speed);
316 break;
317 }
318
319 aup->old_speed = phydev->speed;
320
321 status_change = 1;
322 }
323
324 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
325 /* duplex mode changed */
326
327 /* switching duplex mode requires to disable rx and tx! */
328 au1000_hard_stop(dev);
329
330 reg = readl(&aup->mac->control);
331 if (DUPLEX_FULL == phydev->duplex) {
332 reg |= MAC_FULL_DUPLEX;
333 reg &= ~MAC_DISABLE_RX_OWN;
334 } else {
335 reg &= ~MAC_FULL_DUPLEX;
336 reg |= MAC_DISABLE_RX_OWN;
337 }
338 writel(reg, &aup->mac->control);
339 au_sync_delay(1);
340
341 au1000_enable_rx_tx(dev);
342 aup->old_duplex = phydev->duplex;
343
344 status_change = 1;
345 }
346
347 if (phydev->link != aup->old_link) {
348 /* link state changed */
349
350 if (!phydev->link) {
351 /* link went down */
352 aup->old_speed = 0;
353 aup->old_duplex = -1;
354 }
355
356 aup->old_link = phydev->link;
357 status_change = 1;
358 }
359
360 spin_unlock_irqrestore(&aup->lock, flags);
361
362 if (status_change) {
363 if (phydev->link)
364 netdev_info(dev, "link up (%d/%s)\n",
365 phydev->speed,
366 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
367 else
368 netdev_info(dev, "link down\n");
369 }
370 }
371
372 static int au1000_mii_probe(struct net_device *dev)
373 {
374 struct au1000_private *const aup = netdev_priv(dev);
375 struct phy_device *phydev = NULL;
376 int phy_addr;
377
378 if (aup->phy_static_config) {
379 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
380
381 if (aup->phy_addr)
382 phydev = aup->mii_bus->phy_map[aup->phy_addr];
383 else
384 netdev_info(dev, "using PHY-less setup\n");
385 return 0;
386 }
387
388 /* find the first (lowest address) PHY
389 * on the current MAC's MII bus
390 */
391 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
392 if (aup->mii_bus->phy_map[phy_addr]) {
393 phydev = aup->mii_bus->phy_map[phy_addr];
394 if (!aup->phy_search_highest_addr)
395 /* break out with first one found */
396 break;
397 }
398
399 if (aup->phy1_search_mac0) {
400 /* try harder to find a PHY */
401 if (!phydev && (aup->mac_id == 1)) {
402 /* no PHY found, maybe we have a dual PHY? */
403 dev_info(&dev->dev, ": no PHY found on MAC1, "
404 "let's see if it's attached to MAC0...\n");
405
406 /* find the first (lowest address) non-attached
407 * PHY on the MAC0 MII bus
408 */
409 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
410 struct phy_device *const tmp_phydev =
411 aup->mii_bus->phy_map[phy_addr];
412
413 if (aup->mac_id == 1)
414 break;
415
416 /* no PHY here... */
417 if (!tmp_phydev)
418 continue;
419
420 /* already claimed by MAC0 */
421 if (tmp_phydev->attached_dev)
422 continue;
423
424 phydev = tmp_phydev;
425 break; /* found it */
426 }
427 }
428 }
429
430 if (!phydev) {
431 netdev_err(dev, "no PHY found\n");
432 return -1;
433 }
434
435 /* now we are supposed to have a proper phydev, to attach to... */
436 BUG_ON(phydev->attached_dev);
437
438 phydev = phy_connect(dev, dev_name(&phydev->dev),
439 &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
440
441 if (IS_ERR(phydev)) {
442 netdev_err(dev, "Could not attach to PHY\n");
443 return PTR_ERR(phydev);
444 }
445
446 /* mask with MAC supported features */
447 phydev->supported &= (SUPPORTED_10baseT_Half
448 | SUPPORTED_10baseT_Full
449 | SUPPORTED_100baseT_Half
450 | SUPPORTED_100baseT_Full
451 | SUPPORTED_Autoneg
452 /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
453 | SUPPORTED_MII
454 | SUPPORTED_TP);
455
456 phydev->advertising = phydev->supported;
457
458 aup->old_link = 0;
459 aup->old_speed = 0;
460 aup->old_duplex = -1;
461 aup->phy_dev = phydev;
462
463 netdev_info(dev, "attached PHY driver [%s] "
464 "(mii_bus:phy_addr=%s, irq=%d)\n",
465 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
466
467 return 0;
468 }
469
470
471 /*
472 * Buffer allocation/deallocation routines. The buffer descriptor returned
473 * has the virtual and dma address of a buffer suitable for
474 * both, receive and transmit operations.
475 */
476 static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
477 {
478 struct db_dest *pDB;
479 pDB = aup->pDBfree;
480
481 if (pDB)
482 aup->pDBfree = pDB->pnext;
483
484 return pDB;
485 }
486
487 void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
488 {
489 struct db_dest *pDBfree = aup->pDBfree;
490 if (pDBfree)
491 pDBfree->pnext = pDB;
492 aup->pDBfree = pDB;
493 }
494
495 static void au1000_reset_mac_unlocked(struct net_device *dev)
496 {
497 struct au1000_private *const aup = netdev_priv(dev);
498 int i;
499
500 au1000_hard_stop(dev);
501
502 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
503 au_sync_delay(2);
504 writel(0, aup->enable);
505 au_sync_delay(2);
506
507 aup->tx_full = 0;
508 for (i = 0; i < NUM_RX_DMA; i++) {
509 /* reset control bits */
510 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
511 }
512 for (i = 0; i < NUM_TX_DMA; i++) {
513 /* reset control bits */
514 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
515 }
516
517 aup->mac_enabled = 0;
518
519 }
520
521 static void au1000_reset_mac(struct net_device *dev)
522 {
523 struct au1000_private *const aup = netdev_priv(dev);
524 unsigned long flags;
525
526 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
527 (unsigned)aup);
528
529 spin_lock_irqsave(&aup->lock, flags);
530
531 au1000_reset_mac_unlocked(dev);
532
533 spin_unlock_irqrestore(&aup->lock, flags);
534 }
535
536 /*
537 * Setup the receive and transmit "rings". These pointers are the addresses
538 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
539 * these are not descriptors sitting in memory.
540 */
541 static void
542 au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
543 {
544 int i;
545
546 for (i = 0; i < NUM_RX_DMA; i++) {
547 aup->rx_dma_ring[i] = (struct rx_dma *)
548 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
549 }
550 for (i = 0; i < NUM_TX_DMA; i++) {
551 aup->tx_dma_ring[i] = (struct tx_dma *)
552 (tx_base + sizeof(struct tx_dma) * i);
553 }
554 }
555
556 /*
557 * ethtool operations
558 */
559
560 static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
561 {
562 struct au1000_private *aup = netdev_priv(dev);
563
564 if (aup->phy_dev)
565 return phy_ethtool_gset(aup->phy_dev, cmd);
566
567 return -EINVAL;
568 }
569
570 static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
571 {
572 struct au1000_private *aup = netdev_priv(dev);
573
574 if (!capable(CAP_NET_ADMIN))
575 return -EPERM;
576
577 if (aup->phy_dev)
578 return phy_ethtool_sset(aup->phy_dev, cmd);
579
580 return -EINVAL;
581 }
582
583 static void
584 au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
585 {
586 struct au1000_private *aup = netdev_priv(dev);
587
588 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
589 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
590 snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
591 aup->mac_id);
592 info->regdump_len = 0;
593 }
594
595 static void au1000_set_msglevel(struct net_device *dev, u32 value)
596 {
597 struct au1000_private *aup = netdev_priv(dev);
598 aup->msg_enable = value;
599 }
600
601 static u32 au1000_get_msglevel(struct net_device *dev)
602 {
603 struct au1000_private *aup = netdev_priv(dev);
604 return aup->msg_enable;
605 }
606
607 static const struct ethtool_ops au1000_ethtool_ops = {
608 .get_settings = au1000_get_settings,
609 .set_settings = au1000_set_settings,
610 .get_drvinfo = au1000_get_drvinfo,
611 .get_link = ethtool_op_get_link,
612 .get_msglevel = au1000_get_msglevel,
613 .set_msglevel = au1000_set_msglevel,
614 };
615
616
617 /*
618 * Initialize the interface.
619 *
620 * When the device powers up, the clocks are disabled and the
621 * mac is in reset state. When the interface is closed, we
622 * do the same -- reset the device and disable the clocks to
623 * conserve power. Thus, whenever au1000_init() is called,
624 * the device should already be in reset state.
625 */
626 static int au1000_init(struct net_device *dev)
627 {
628 struct au1000_private *aup = netdev_priv(dev);
629 unsigned long flags;
630 int i;
631 u32 control;
632
633 netif_dbg(aup, hw, dev, "au1000_init\n");
634
635 /* bring the device out of reset */
636 au1000_enable_mac(dev, 1);
637
638 spin_lock_irqsave(&aup->lock, flags);
639
640 writel(0, &aup->mac->control);
641 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
642 aup->tx_tail = aup->tx_head;
643 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
644
645 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
646 &aup->mac->mac_addr_high);
647 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
648 dev->dev_addr[1]<<8 | dev->dev_addr[0],
649 &aup->mac->mac_addr_low);
650
651
652 for (i = 0; i < NUM_RX_DMA; i++)
653 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
654
655 au_sync();
656
657 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
658 #ifndef CONFIG_CPU_LITTLE_ENDIAN
659 control |= MAC_BIG_ENDIAN;
660 #endif
661 if (aup->phy_dev) {
662 if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
663 control |= MAC_FULL_DUPLEX;
664 else
665 control |= MAC_DISABLE_RX_OWN;
666 } else { /* PHY-less op, assume full-duplex */
667 control |= MAC_FULL_DUPLEX;
668 }
669
670 writel(control, &aup->mac->control);
671 writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
672 au_sync();
673
674 spin_unlock_irqrestore(&aup->lock, flags);
675 return 0;
676 }
677
678 static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
679 {
680 struct net_device_stats *ps = &dev->stats;
681
682 ps->rx_packets++;
683 if (status & RX_MCAST_FRAME)
684 ps->multicast++;
685
686 if (status & RX_ERROR) {
687 ps->rx_errors++;
688 if (status & RX_MISSED_FRAME)
689 ps->rx_missed_errors++;
690 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
691 ps->rx_length_errors++;
692 if (status & RX_CRC_ERROR)
693 ps->rx_crc_errors++;
694 if (status & RX_COLL)
695 ps->collisions++;
696 } else
697 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
698
699 }
700
701 /*
702 * Au1000 receive routine.
703 */
704 static int au1000_rx(struct net_device *dev)
705 {
706 struct au1000_private *aup = netdev_priv(dev);
707 struct sk_buff *skb;
708 struct rx_dma *prxd;
709 u32 buff_stat, status;
710 struct db_dest *pDB;
711 u32 frmlen;
712
713 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
714
715 prxd = aup->rx_dma_ring[aup->rx_head];
716 buff_stat = prxd->buff_stat;
717 while (buff_stat & RX_T_DONE) {
718 status = prxd->status;
719 pDB = aup->rx_db_inuse[aup->rx_head];
720 au1000_update_rx_stats(dev, status);
721 if (!(status & RX_ERROR)) {
722
723 /* good frame */
724 frmlen = (status & RX_FRAME_LEN_MASK);
725 frmlen -= 4; /* Remove FCS */
726 skb = netdev_alloc_skb(dev, frmlen + 2);
727 if (skb == NULL) {
728 dev->stats.rx_dropped++;
729 continue;
730 }
731 skb_reserve(skb, 2); /* 16 byte IP header align */
732 skb_copy_to_linear_data(skb,
733 (unsigned char *)pDB->vaddr, frmlen);
734 skb_put(skb, frmlen);
735 skb->protocol = eth_type_trans(skb, dev);
736 netif_rx(skb); /* pass the packet to upper layers */
737 } else {
738 if (au1000_debug > 4) {
739 pr_err("rx_error(s):");
740 if (status & RX_MISSED_FRAME)
741 pr_cont(" miss");
742 if (status & RX_WDOG_TIMER)
743 pr_cont(" wdog");
744 if (status & RX_RUNT)
745 pr_cont(" runt");
746 if (status & RX_OVERLEN)
747 pr_cont(" overlen");
748 if (status & RX_COLL)
749 pr_cont(" coll");
750 if (status & RX_MII_ERROR)
751 pr_cont(" mii error");
752 if (status & RX_CRC_ERROR)
753 pr_cont(" crc error");
754 if (status & RX_LEN_ERROR)
755 pr_cont(" len error");
756 if (status & RX_U_CNTRL_FRAME)
757 pr_cont(" u control frame");
758 pr_cont("\n");
759 }
760 }
761 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
762 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
763 au_sync();
764
765 /* next descriptor */
766 prxd = aup->rx_dma_ring[aup->rx_head];
767 buff_stat = prxd->buff_stat;
768 }
769 return 0;
770 }
771
772 static void au1000_update_tx_stats(struct net_device *dev, u32 status)
773 {
774 struct au1000_private *aup = netdev_priv(dev);
775 struct net_device_stats *ps = &dev->stats;
776
777 if (status & TX_FRAME_ABORTED) {
778 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
779 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
780 /* any other tx errors are only valid
781 * in half duplex mode
782 */
783 ps->tx_errors++;
784 ps->tx_aborted_errors++;
785 }
786 } else {
787 ps->tx_errors++;
788 ps->tx_aborted_errors++;
789 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
790 ps->tx_carrier_errors++;
791 }
792 }
793 }
794
795 /*
796 * Called from the interrupt service routine to acknowledge
797 * the TX DONE bits. This is a must if the irq is setup as
798 * edge triggered.
799 */
800 static void au1000_tx_ack(struct net_device *dev)
801 {
802 struct au1000_private *aup = netdev_priv(dev);
803 struct tx_dma *ptxd;
804
805 ptxd = aup->tx_dma_ring[aup->tx_tail];
806
807 while (ptxd->buff_stat & TX_T_DONE) {
808 au1000_update_tx_stats(dev, ptxd->status);
809 ptxd->buff_stat &= ~TX_T_DONE;
810 ptxd->len = 0;
811 au_sync();
812
813 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
814 ptxd = aup->tx_dma_ring[aup->tx_tail];
815
816 if (aup->tx_full) {
817 aup->tx_full = 0;
818 netif_wake_queue(dev);
819 }
820 }
821 }
822
823 /*
824 * Au1000 interrupt service routine.
825 */
826 static irqreturn_t au1000_interrupt(int irq, void *dev_id)
827 {
828 struct net_device *dev = dev_id;
829
830 /* Handle RX interrupts first to minimize chance of overrun */
831
832 au1000_rx(dev);
833 au1000_tx_ack(dev);
834 return IRQ_RETVAL(1);
835 }
836
837 static int au1000_open(struct net_device *dev)
838 {
839 int retval;
840 struct au1000_private *aup = netdev_priv(dev);
841
842 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
843
844 retval = request_irq(dev->irq, au1000_interrupt, 0,
845 dev->name, dev);
846 if (retval) {
847 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
848 return retval;
849 }
850
851 retval = au1000_init(dev);
852 if (retval) {
853 netdev_err(dev, "error in au1000_init\n");
854 free_irq(dev->irq, dev);
855 return retval;
856 }
857
858 if (aup->phy_dev) {
859 /* cause the PHY state machine to schedule a link state check */
860 aup->phy_dev->state = PHY_CHANGELINK;
861 phy_start(aup->phy_dev);
862 }
863
864 netif_start_queue(dev);
865
866 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
867
868 return 0;
869 }
870
871 static int au1000_close(struct net_device *dev)
872 {
873 unsigned long flags;
874 struct au1000_private *const aup = netdev_priv(dev);
875
876 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
877
878 if (aup->phy_dev)
879 phy_stop(aup->phy_dev);
880
881 spin_lock_irqsave(&aup->lock, flags);
882
883 au1000_reset_mac_unlocked(dev);
884
885 /* stop the device */
886 netif_stop_queue(dev);
887
888 /* disable the interrupt */
889 free_irq(dev->irq, dev);
890 spin_unlock_irqrestore(&aup->lock, flags);
891
892 return 0;
893 }
894
895 /*
896 * Au1000 transmit routine.
897 */
898 static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
899 {
900 struct au1000_private *aup = netdev_priv(dev);
901 struct net_device_stats *ps = &dev->stats;
902 struct tx_dma *ptxd;
903 u32 buff_stat;
904 struct db_dest *pDB;
905 int i;
906
907 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
908 (unsigned)aup, skb->len,
909 skb->data, aup->tx_head);
910
911 ptxd = aup->tx_dma_ring[aup->tx_head];
912 buff_stat = ptxd->buff_stat;
913 if (buff_stat & TX_DMA_ENABLE) {
914 /* We've wrapped around and the transmitter is still busy */
915 netif_stop_queue(dev);
916 aup->tx_full = 1;
917 return NETDEV_TX_BUSY;
918 } else if (buff_stat & TX_T_DONE) {
919 au1000_update_tx_stats(dev, ptxd->status);
920 ptxd->len = 0;
921 }
922
923 if (aup->tx_full) {
924 aup->tx_full = 0;
925 netif_wake_queue(dev);
926 }
927
928 pDB = aup->tx_db_inuse[aup->tx_head];
929 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
930 if (skb->len < ETH_ZLEN) {
931 for (i = skb->len; i < ETH_ZLEN; i++)
932 ((char *)pDB->vaddr)[i] = 0;
933
934 ptxd->len = ETH_ZLEN;
935 } else
936 ptxd->len = skb->len;
937
938 ps->tx_packets++;
939 ps->tx_bytes += ptxd->len;
940
941 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
942 au_sync();
943 dev_kfree_skb(skb);
944 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
945 return NETDEV_TX_OK;
946 }
947
948 /*
949 * The Tx ring has been full longer than the watchdog timeout
950 * value. The transmitter must be hung?
951 */
952 static void au1000_tx_timeout(struct net_device *dev)
953 {
954 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
955 au1000_reset_mac(dev);
956 au1000_init(dev);
957 dev->trans_start = jiffies; /* prevent tx timeout */
958 netif_wake_queue(dev);
959 }
960
961 static void au1000_multicast_list(struct net_device *dev)
962 {
963 struct au1000_private *aup = netdev_priv(dev);
964 u32 reg;
965
966 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
967 reg = readl(&aup->mac->control);
968 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
969 reg |= MAC_PROMISCUOUS;
970 } else if ((dev->flags & IFF_ALLMULTI) ||
971 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
972 reg |= MAC_PASS_ALL_MULTI;
973 reg &= ~MAC_PROMISCUOUS;
974 netdev_info(dev, "Pass all multicast\n");
975 } else {
976 struct netdev_hw_addr *ha;
977 u32 mc_filter[2]; /* Multicast hash filter */
978
979 mc_filter[1] = mc_filter[0] = 0;
980 netdev_for_each_mc_addr(ha, dev)
981 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
982 (long *)mc_filter);
983 writel(mc_filter[1], &aup->mac->multi_hash_high);
984 writel(mc_filter[0], &aup->mac->multi_hash_low);
985 reg &= ~MAC_PROMISCUOUS;
986 reg |= MAC_HASH_MODE;
987 }
988 writel(reg, &aup->mac->control);
989 }
990
991 static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
992 {
993 struct au1000_private *aup = netdev_priv(dev);
994
995 if (!netif_running(dev))
996 return -EINVAL;
997
998 if (!aup->phy_dev)
999 return -EINVAL; /* PHY not controllable */
1000
1001 return phy_mii_ioctl(aup->phy_dev, rq, cmd);
1002 }
1003
1004 static const struct net_device_ops au1000_netdev_ops = {
1005 .ndo_open = au1000_open,
1006 .ndo_stop = au1000_close,
1007 .ndo_start_xmit = au1000_tx,
1008 .ndo_set_rx_mode = au1000_multicast_list,
1009 .ndo_do_ioctl = au1000_ioctl,
1010 .ndo_tx_timeout = au1000_tx_timeout,
1011 .ndo_set_mac_address = eth_mac_addr,
1012 .ndo_validate_addr = eth_validate_addr,
1013 .ndo_change_mtu = eth_change_mtu,
1014 };
1015
1016 static int au1000_probe(struct platform_device *pdev)
1017 {
1018 static unsigned version_printed;
1019 struct au1000_private *aup = NULL;
1020 struct au1000_eth_platform_data *pd;
1021 struct net_device *dev = NULL;
1022 struct db_dest *pDB, *pDBfree;
1023 int irq, i, err = 0;
1024 struct resource *base, *macen, *macdma;
1025
1026 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1027 if (!base) {
1028 dev_err(&pdev->dev, "failed to retrieve base register\n");
1029 err = -ENODEV;
1030 goto out;
1031 }
1032
1033 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1034 if (!macen) {
1035 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1036 err = -ENODEV;
1037 goto out;
1038 }
1039
1040 irq = platform_get_irq(pdev, 0);
1041 if (irq < 0) {
1042 dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1043 err = -ENODEV;
1044 goto out;
1045 }
1046
1047 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1048 if (!macdma) {
1049 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1050 err = -ENODEV;
1051 goto out;
1052 }
1053
1054 if (!request_mem_region(base->start, resource_size(base),
1055 pdev->name)) {
1056 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1057 err = -ENXIO;
1058 goto out;
1059 }
1060
1061 if (!request_mem_region(macen->start, resource_size(macen),
1062 pdev->name)) {
1063 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1064 err = -ENXIO;
1065 goto err_request;
1066 }
1067
1068 if (!request_mem_region(macdma->start, resource_size(macdma),
1069 pdev->name)) {
1070 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1071 err = -ENXIO;
1072 goto err_macdma;
1073 }
1074
1075 dev = alloc_etherdev(sizeof(struct au1000_private));
1076 if (!dev) {
1077 err = -ENOMEM;
1078 goto err_alloc;
1079 }
1080
1081 SET_NETDEV_DEV(dev, &pdev->dev);
1082 platform_set_drvdata(pdev, dev);
1083 aup = netdev_priv(dev);
1084
1085 spin_lock_init(&aup->lock);
1086 aup->msg_enable = (au1000_debug < 4 ?
1087 AU1000_DEF_MSG_ENABLE : au1000_debug);
1088
1089 /* Allocate the data buffers
1090 * Snooping works fine with eth on all au1xxx
1091 */
1092 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1093 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1094 &aup->dma_addr, 0);
1095 if (!aup->vaddr) {
1096 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1097 err = -ENOMEM;
1098 goto err_vaddr;
1099 }
1100
1101 /* aup->mac is the base address of the MAC's registers */
1102 aup->mac = (struct mac_reg *)
1103 ioremap_nocache(base->start, resource_size(base));
1104 if (!aup->mac) {
1105 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1106 err = -ENXIO;
1107 goto err_remap1;
1108 }
1109
1110 /* Setup some variables for quick register address access */
1111 aup->enable = (u32 *)ioremap_nocache(macen->start,
1112 resource_size(macen));
1113 if (!aup->enable) {
1114 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1115 err = -ENXIO;
1116 goto err_remap2;
1117 }
1118 aup->mac_id = pdev->id;
1119
1120 aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1121 if (!aup->macdma) {
1122 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1123 err = -ENXIO;
1124 goto err_remap3;
1125 }
1126
1127 au1000_setup_hw_rings(aup, aup->macdma);
1128
1129 writel(0, aup->enable);
1130 aup->mac_enabled = 0;
1131
1132 pd = dev_get_platdata(&pdev->dev);
1133 if (!pd) {
1134 dev_info(&pdev->dev, "no platform_data passed,"
1135 " PHY search on MAC0\n");
1136 aup->phy1_search_mac0 = 1;
1137 } else {
1138 if (is_valid_ether_addr(pd->mac)) {
1139 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1140 } else {
1141 /* Set a random MAC since no valid provided by platform_data. */
1142 eth_hw_addr_random(dev);
1143 }
1144
1145 aup->phy_static_config = pd->phy_static_config;
1146 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1147 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1148 aup->phy_addr = pd->phy_addr;
1149 aup->phy_busid = pd->phy_busid;
1150 aup->phy_irq = pd->phy_irq;
1151 }
1152
1153 if (aup->phy_busid && aup->phy_busid > 0) {
1154 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1155 err = -ENODEV;
1156 goto err_mdiobus_alloc;
1157 }
1158
1159 aup->mii_bus = mdiobus_alloc();
1160 if (aup->mii_bus == NULL) {
1161 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1162 err = -ENOMEM;
1163 goto err_mdiobus_alloc;
1164 }
1165
1166 aup->mii_bus->priv = dev;
1167 aup->mii_bus->read = au1000_mdiobus_read;
1168 aup->mii_bus->write = au1000_mdiobus_write;
1169 aup->mii_bus->reset = au1000_mdiobus_reset;
1170 aup->mii_bus->name = "au1000_eth_mii";
1171 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1172 pdev->name, aup->mac_id);
1173 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1174 if (aup->mii_bus->irq == NULL) {
1175 err = -ENOMEM;
1176 goto err_out;
1177 }
1178
1179 for (i = 0; i < PHY_MAX_ADDR; ++i)
1180 aup->mii_bus->irq[i] = PHY_POLL;
1181 /* if known, set corresponding PHY IRQs */
1182 if (aup->phy_static_config)
1183 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1184 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1185
1186 err = mdiobus_register(aup->mii_bus);
1187 if (err) {
1188 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1189 goto err_mdiobus_reg;
1190 }
1191
1192 err = au1000_mii_probe(dev);
1193 if (err != 0)
1194 goto err_out;
1195
1196 pDBfree = NULL;
1197 /* setup the data buffer descriptors and attach a buffer to each one */
1198 pDB = aup->db;
1199 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1200 pDB->pnext = pDBfree;
1201 pDBfree = pDB;
1202 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1203 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1204 pDB++;
1205 }
1206 aup->pDBfree = pDBfree;
1207
1208 err = -ENODEV;
1209 for (i = 0; i < NUM_RX_DMA; i++) {
1210 pDB = au1000_GetFreeDB(aup);
1211 if (!pDB)
1212 goto err_out;
1213
1214 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1215 aup->rx_db_inuse[i] = pDB;
1216 }
1217
1218 err = -ENODEV;
1219 for (i = 0; i < NUM_TX_DMA; i++) {
1220 pDB = au1000_GetFreeDB(aup);
1221 if (!pDB)
1222 goto err_out;
1223
1224 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1225 aup->tx_dma_ring[i]->len = 0;
1226 aup->tx_db_inuse[i] = pDB;
1227 }
1228
1229 dev->base_addr = base->start;
1230 dev->irq = irq;
1231 dev->netdev_ops = &au1000_netdev_ops;
1232 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1233 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1234
1235 /*
1236 * The boot code uses the ethernet controller, so reset it to start
1237 * fresh. au1000_init() expects that the device is in reset state.
1238 */
1239 au1000_reset_mac(dev);
1240
1241 err = register_netdev(dev);
1242 if (err) {
1243 netdev_err(dev, "Cannot register net device, aborting.\n");
1244 goto err_out;
1245 }
1246
1247 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1248 (unsigned long)base->start, irq);
1249 if (version_printed++ == 0)
1250 pr_info("%s version %s %s\n",
1251 DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1252
1253 return 0;
1254
1255 err_out:
1256 if (aup->mii_bus != NULL)
1257 mdiobus_unregister(aup->mii_bus);
1258
1259 /* here we should have a valid dev plus aup-> register addresses
1260 * so we can reset the mac properly.
1261 */
1262 au1000_reset_mac(dev);
1263
1264 for (i = 0; i < NUM_RX_DMA; i++) {
1265 if (aup->rx_db_inuse[i])
1266 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1267 }
1268 for (i = 0; i < NUM_TX_DMA; i++) {
1269 if (aup->tx_db_inuse[i])
1270 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1271 }
1272 err_mdiobus_reg:
1273 mdiobus_free(aup->mii_bus);
1274 err_mdiobus_alloc:
1275 iounmap(aup->macdma);
1276 err_remap3:
1277 iounmap(aup->enable);
1278 err_remap2:
1279 iounmap(aup->mac);
1280 err_remap1:
1281 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1282 (void *)aup->vaddr, aup->dma_addr);
1283 err_vaddr:
1284 free_netdev(dev);
1285 err_alloc:
1286 release_mem_region(macdma->start, resource_size(macdma));
1287 err_macdma:
1288 release_mem_region(macen->start, resource_size(macen));
1289 err_request:
1290 release_mem_region(base->start, resource_size(base));
1291 out:
1292 return err;
1293 }
1294
1295 static int au1000_remove(struct platform_device *pdev)
1296 {
1297 struct net_device *dev = platform_get_drvdata(pdev);
1298 struct au1000_private *aup = netdev_priv(dev);
1299 int i;
1300 struct resource *base, *macen;
1301
1302 unregister_netdev(dev);
1303 mdiobus_unregister(aup->mii_bus);
1304 mdiobus_free(aup->mii_bus);
1305
1306 for (i = 0; i < NUM_RX_DMA; i++)
1307 if (aup->rx_db_inuse[i])
1308 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1309
1310 for (i = 0; i < NUM_TX_DMA; i++)
1311 if (aup->tx_db_inuse[i])
1312 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1313
1314 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1315 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1316 (void *)aup->vaddr, aup->dma_addr);
1317
1318 iounmap(aup->macdma);
1319 iounmap(aup->mac);
1320 iounmap(aup->enable);
1321
1322 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1323 release_mem_region(base->start, resource_size(base));
1324
1325 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1326 release_mem_region(base->start, resource_size(base));
1327
1328 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1329 release_mem_region(macen->start, resource_size(macen));
1330
1331 free_netdev(dev);
1332
1333 return 0;
1334 }
1335
1336 static struct platform_driver au1000_eth_driver = {
1337 .probe = au1000_probe,
1338 .remove = au1000_remove,
1339 .driver = {
1340 .name = "au1000-eth",
1341 .owner = THIS_MODULE,
1342 },
1343 };
1344
1345 module_platform_driver(au1000_eth_driver);
1346
1347 MODULE_ALIAS("platform:au1000-eth");