]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/amd/au1000_eth.c
ethernet: Fix FSF address in file headers
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / amd / au1000_eth.c
1 /*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001-2003, 2006 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
13 * converted to use linux-2.6.x's PHY framework
14 *
15 * Author: MontaVista Software, Inc.
16 * ppopov@mvista.com or source@mvista.com
17 *
18 * ########################################################################
19 *
20 * This program is free software; you can distribute it and/or modify it
21 * under the terms of the GNU General Public License (Version 2) as
22 * published by the Free Software Foundation.
23 *
24 * This program is distributed in the hope it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, see <http://www.gnu.org/licenses/>.
31 *
32 * ########################################################################
33 *
34 *
35 */
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
38 #include <linux/capability.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/string.h>
43 #include <linux/timer.h>
44 #include <linux/errno.h>
45 #include <linux/in.h>
46 #include <linux/ioport.h>
47 #include <linux/bitops.h>
48 #include <linux/slab.h>
49 #include <linux/interrupt.h>
50 #include <linux/init.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/ethtool.h>
54 #include <linux/mii.h>
55 #include <linux/skbuff.h>
56 #include <linux/delay.h>
57 #include <linux/crc32.h>
58 #include <linux/phy.h>
59 #include <linux/platform_device.h>
60 #include <linux/cpu.h>
61 #include <linux/io.h>
62
63 #include <asm/mipsregs.h>
64 #include <asm/irq.h>
65 #include <asm/processor.h>
66
67 #include <au1000.h>
68 #include <au1xxx_eth.h>
69 #include <prom.h>
70
71 #include "au1000_eth.h"
72
73 #ifdef AU1000_ETH_DEBUG
74 static int au1000_debug = 5;
75 #else
76 static int au1000_debug = 3;
77 #endif
78
79 #define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK)
82
83 #define DRV_NAME "au1000_eth"
84 #define DRV_VERSION "1.7"
85 #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
86 #define DRV_DESC "Au1xxx on-chip Ethernet driver"
87
88 MODULE_AUTHOR(DRV_AUTHOR);
89 MODULE_DESCRIPTION(DRV_DESC);
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(DRV_VERSION);
92
93 /*
94 * Theory of operation
95 *
96 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
97 * There are four receive and four transmit descriptors. These
98 * descriptors are not in memory; rather, they are just a set of
99 * hardware registers.
100 *
101 * Since the Au1000 has a coherent data cache, the receive and
102 * transmit buffers are allocated from the KSEG0 segment. The
103 * hardware registers, however, are still mapped at KSEG1 to
104 * make sure there's no out-of-order writes, and that all writes
105 * complete immediately.
106 */
107
108 /*
109 * board-specific configurations
110 *
111 * PHY detection algorithm
112 *
113 * If phy_static_config is undefined, the PHY setup is
114 * autodetected:
115 *
116 * mii_probe() first searches the current MAC's MII bus for a PHY,
117 * selecting the first (or last, if phy_search_highest_addr is
118 * defined) PHY address not already claimed by another netdev.
119 *
120 * If nothing was found that way when searching for the 2nd ethernet
121 * controller's PHY and phy1_search_mac0 is defined, then
122 * the first MII bus is searched as well for an unclaimed PHY; this is
123 * needed in case of a dual-PHY accessible only through the MAC0's MII
124 * bus.
125 *
126 * Finally, if no PHY is found, then the corresponding ethernet
127 * controller is not registered to the network subsystem.
128 */
129
130 /* autodetection defaults: phy1_search_mac0 */
131
132 /* static PHY setup
133 *
134 * most boards PHY setup should be detectable properly with the
135 * autodetection algorithm in mii_probe(), but in some cases (e.g. if
136 * you have a switch attached, or want to use the PHY's interrupt
137 * notification capabilities) you can provide a static PHY
138 * configuration here
139 *
140 * IRQs may only be set, if a PHY address was configured
141 * If a PHY address is given, also a bus id is required to be set
142 *
143 * ps: make sure the used irqs are configured properly in the board
144 * specific irq-map
145 */
146
147 static void au1000_enable_mac(struct net_device *dev, int force_reset)
148 {
149 unsigned long flags;
150 struct au1000_private *aup = netdev_priv(dev);
151
152 spin_lock_irqsave(&aup->lock, flags);
153
154 if (force_reset || (!aup->mac_enabled)) {
155 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
156 au_sync_delay(2);
157 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
158 | MAC_EN_CLOCK_ENABLE), aup->enable);
159 au_sync_delay(2);
160
161 aup->mac_enabled = 1;
162 }
163
164 spin_unlock_irqrestore(&aup->lock, flags);
165 }
166
167 /*
168 * MII operations
169 */
170 static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
171 {
172 struct au1000_private *aup = netdev_priv(dev);
173 u32 *const mii_control_reg = &aup->mac->mii_control;
174 u32 *const mii_data_reg = &aup->mac->mii_data;
175 u32 timedout = 20;
176 u32 mii_control;
177
178 while (readl(mii_control_reg) & MAC_MII_BUSY) {
179 mdelay(1);
180 if (--timedout == 0) {
181 netdev_err(dev, "read_MII busy timeout!!\n");
182 return -1;
183 }
184 }
185
186 mii_control = MAC_SET_MII_SELECT_REG(reg) |
187 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
188
189 writel(mii_control, mii_control_reg);
190
191 timedout = 20;
192 while (readl(mii_control_reg) & MAC_MII_BUSY) {
193 mdelay(1);
194 if (--timedout == 0) {
195 netdev_err(dev, "mdio_read busy timeout!!\n");
196 return -1;
197 }
198 }
199 return readl(mii_data_reg);
200 }
201
202 static void au1000_mdio_write(struct net_device *dev, int phy_addr,
203 int reg, u16 value)
204 {
205 struct au1000_private *aup = netdev_priv(dev);
206 u32 *const mii_control_reg = &aup->mac->mii_control;
207 u32 *const mii_data_reg = &aup->mac->mii_data;
208 u32 timedout = 20;
209 u32 mii_control;
210
211 while (readl(mii_control_reg) & MAC_MII_BUSY) {
212 mdelay(1);
213 if (--timedout == 0) {
214 netdev_err(dev, "mdio_write busy timeout!!\n");
215 return;
216 }
217 }
218
219 mii_control = MAC_SET_MII_SELECT_REG(reg) |
220 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
221
222 writel(value, mii_data_reg);
223 writel(mii_control, mii_control_reg);
224 }
225
226 static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
227 {
228 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
229 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
230 */
231 struct net_device *const dev = bus->priv;
232
233 /* make sure the MAC associated with this
234 * mii_bus is enabled
235 */
236 au1000_enable_mac(dev, 0);
237
238 return au1000_mdio_read(dev, phy_addr, regnum);
239 }
240
241 static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
242 u16 value)
243 {
244 struct net_device *const dev = bus->priv;
245
246 /* make sure the MAC associated with this
247 * mii_bus is enabled
248 */
249 au1000_enable_mac(dev, 0);
250
251 au1000_mdio_write(dev, phy_addr, regnum, value);
252 return 0;
253 }
254
255 static int au1000_mdiobus_reset(struct mii_bus *bus)
256 {
257 struct net_device *const dev = bus->priv;
258
259 /* make sure the MAC associated with this
260 * mii_bus is enabled
261 */
262 au1000_enable_mac(dev, 0);
263
264 return 0;
265 }
266
267 static void au1000_hard_stop(struct net_device *dev)
268 {
269 struct au1000_private *aup = netdev_priv(dev);
270 u32 reg;
271
272 netif_dbg(aup, drv, dev, "hard stop\n");
273
274 reg = readl(&aup->mac->control);
275 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
276 writel(reg, &aup->mac->control);
277 au_sync_delay(10);
278 }
279
280 static void au1000_enable_rx_tx(struct net_device *dev)
281 {
282 struct au1000_private *aup = netdev_priv(dev);
283 u32 reg;
284
285 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
286
287 reg = readl(&aup->mac->control);
288 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
289 writel(reg, &aup->mac->control);
290 au_sync_delay(10);
291 }
292
293 static void
294 au1000_adjust_link(struct net_device *dev)
295 {
296 struct au1000_private *aup = netdev_priv(dev);
297 struct phy_device *phydev = aup->phy_dev;
298 unsigned long flags;
299 u32 reg;
300
301 int status_change = 0;
302
303 BUG_ON(!aup->phy_dev);
304
305 spin_lock_irqsave(&aup->lock, flags);
306
307 if (phydev->link && (aup->old_speed != phydev->speed)) {
308 /* speed changed */
309
310 switch (phydev->speed) {
311 case SPEED_10:
312 case SPEED_100:
313 break;
314 default:
315 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
316 phydev->speed);
317 break;
318 }
319
320 aup->old_speed = phydev->speed;
321
322 status_change = 1;
323 }
324
325 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
326 /* duplex mode changed */
327
328 /* switching duplex mode requires to disable rx and tx! */
329 au1000_hard_stop(dev);
330
331 reg = readl(&aup->mac->control);
332 if (DUPLEX_FULL == phydev->duplex) {
333 reg |= MAC_FULL_DUPLEX;
334 reg &= ~MAC_DISABLE_RX_OWN;
335 } else {
336 reg &= ~MAC_FULL_DUPLEX;
337 reg |= MAC_DISABLE_RX_OWN;
338 }
339 writel(reg, &aup->mac->control);
340 au_sync_delay(1);
341
342 au1000_enable_rx_tx(dev);
343 aup->old_duplex = phydev->duplex;
344
345 status_change = 1;
346 }
347
348 if (phydev->link != aup->old_link) {
349 /* link state changed */
350
351 if (!phydev->link) {
352 /* link went down */
353 aup->old_speed = 0;
354 aup->old_duplex = -1;
355 }
356
357 aup->old_link = phydev->link;
358 status_change = 1;
359 }
360
361 spin_unlock_irqrestore(&aup->lock, flags);
362
363 if (status_change) {
364 if (phydev->link)
365 netdev_info(dev, "link up (%d/%s)\n",
366 phydev->speed,
367 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
368 else
369 netdev_info(dev, "link down\n");
370 }
371 }
372
373 static int au1000_mii_probe(struct net_device *dev)
374 {
375 struct au1000_private *const aup = netdev_priv(dev);
376 struct phy_device *phydev = NULL;
377 int phy_addr;
378
379 if (aup->phy_static_config) {
380 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
381
382 if (aup->phy_addr)
383 phydev = aup->mii_bus->phy_map[aup->phy_addr];
384 else
385 netdev_info(dev, "using PHY-less setup\n");
386 return 0;
387 }
388
389 /* find the first (lowest address) PHY
390 * on the current MAC's MII bus
391 */
392 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
393 if (aup->mii_bus->phy_map[phy_addr]) {
394 phydev = aup->mii_bus->phy_map[phy_addr];
395 if (!aup->phy_search_highest_addr)
396 /* break out with first one found */
397 break;
398 }
399
400 if (aup->phy1_search_mac0) {
401 /* try harder to find a PHY */
402 if (!phydev && (aup->mac_id == 1)) {
403 /* no PHY found, maybe we have a dual PHY? */
404 dev_info(&dev->dev, ": no PHY found on MAC1, "
405 "let's see if it's attached to MAC0...\n");
406
407 /* find the first (lowest address) non-attached
408 * PHY on the MAC0 MII bus
409 */
410 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
411 struct phy_device *const tmp_phydev =
412 aup->mii_bus->phy_map[phy_addr];
413
414 if (aup->mac_id == 1)
415 break;
416
417 /* no PHY here... */
418 if (!tmp_phydev)
419 continue;
420
421 /* already claimed by MAC0 */
422 if (tmp_phydev->attached_dev)
423 continue;
424
425 phydev = tmp_phydev;
426 break; /* found it */
427 }
428 }
429 }
430
431 if (!phydev) {
432 netdev_err(dev, "no PHY found\n");
433 return -1;
434 }
435
436 /* now we are supposed to have a proper phydev, to attach to... */
437 BUG_ON(phydev->attached_dev);
438
439 phydev = phy_connect(dev, dev_name(&phydev->dev),
440 &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
441
442 if (IS_ERR(phydev)) {
443 netdev_err(dev, "Could not attach to PHY\n");
444 return PTR_ERR(phydev);
445 }
446
447 /* mask with MAC supported features */
448 phydev->supported &= (SUPPORTED_10baseT_Half
449 | SUPPORTED_10baseT_Full
450 | SUPPORTED_100baseT_Half
451 | SUPPORTED_100baseT_Full
452 | SUPPORTED_Autoneg
453 /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
454 | SUPPORTED_MII
455 | SUPPORTED_TP);
456
457 phydev->advertising = phydev->supported;
458
459 aup->old_link = 0;
460 aup->old_speed = 0;
461 aup->old_duplex = -1;
462 aup->phy_dev = phydev;
463
464 netdev_info(dev, "attached PHY driver [%s] "
465 "(mii_bus:phy_addr=%s, irq=%d)\n",
466 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
467
468 return 0;
469 }
470
471
472 /*
473 * Buffer allocation/deallocation routines. The buffer descriptor returned
474 * has the virtual and dma address of a buffer suitable for
475 * both, receive and transmit operations.
476 */
477 static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
478 {
479 struct db_dest *pDB;
480 pDB = aup->pDBfree;
481
482 if (pDB)
483 aup->pDBfree = pDB->pnext;
484
485 return pDB;
486 }
487
488 void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
489 {
490 struct db_dest *pDBfree = aup->pDBfree;
491 if (pDBfree)
492 pDBfree->pnext = pDB;
493 aup->pDBfree = pDB;
494 }
495
496 static void au1000_reset_mac_unlocked(struct net_device *dev)
497 {
498 struct au1000_private *const aup = netdev_priv(dev);
499 int i;
500
501 au1000_hard_stop(dev);
502
503 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
504 au_sync_delay(2);
505 writel(0, aup->enable);
506 au_sync_delay(2);
507
508 aup->tx_full = 0;
509 for (i = 0; i < NUM_RX_DMA; i++) {
510 /* reset control bits */
511 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
512 }
513 for (i = 0; i < NUM_TX_DMA; i++) {
514 /* reset control bits */
515 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
516 }
517
518 aup->mac_enabled = 0;
519
520 }
521
522 static void au1000_reset_mac(struct net_device *dev)
523 {
524 struct au1000_private *const aup = netdev_priv(dev);
525 unsigned long flags;
526
527 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
528 (unsigned)aup);
529
530 spin_lock_irqsave(&aup->lock, flags);
531
532 au1000_reset_mac_unlocked(dev);
533
534 spin_unlock_irqrestore(&aup->lock, flags);
535 }
536
537 /*
538 * Setup the receive and transmit "rings". These pointers are the addresses
539 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
540 * these are not descriptors sitting in memory.
541 */
542 static void
543 au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
544 {
545 int i;
546
547 for (i = 0; i < NUM_RX_DMA; i++) {
548 aup->rx_dma_ring[i] = (struct rx_dma *)
549 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
550 }
551 for (i = 0; i < NUM_TX_DMA; i++) {
552 aup->tx_dma_ring[i] = (struct tx_dma *)
553 (tx_base + sizeof(struct tx_dma) * i);
554 }
555 }
556
557 /*
558 * ethtool operations
559 */
560
561 static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
562 {
563 struct au1000_private *aup = netdev_priv(dev);
564
565 if (aup->phy_dev)
566 return phy_ethtool_gset(aup->phy_dev, cmd);
567
568 return -EINVAL;
569 }
570
571 static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
572 {
573 struct au1000_private *aup = netdev_priv(dev);
574
575 if (!capable(CAP_NET_ADMIN))
576 return -EPERM;
577
578 if (aup->phy_dev)
579 return phy_ethtool_sset(aup->phy_dev, cmd);
580
581 return -EINVAL;
582 }
583
584 static void
585 au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
586 {
587 struct au1000_private *aup = netdev_priv(dev);
588
589 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
590 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
591 snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
592 aup->mac_id);
593 info->regdump_len = 0;
594 }
595
596 static void au1000_set_msglevel(struct net_device *dev, u32 value)
597 {
598 struct au1000_private *aup = netdev_priv(dev);
599 aup->msg_enable = value;
600 }
601
602 static u32 au1000_get_msglevel(struct net_device *dev)
603 {
604 struct au1000_private *aup = netdev_priv(dev);
605 return aup->msg_enable;
606 }
607
608 static const struct ethtool_ops au1000_ethtool_ops = {
609 .get_settings = au1000_get_settings,
610 .set_settings = au1000_set_settings,
611 .get_drvinfo = au1000_get_drvinfo,
612 .get_link = ethtool_op_get_link,
613 .get_msglevel = au1000_get_msglevel,
614 .set_msglevel = au1000_set_msglevel,
615 };
616
617
618 /*
619 * Initialize the interface.
620 *
621 * When the device powers up, the clocks are disabled and the
622 * mac is in reset state. When the interface is closed, we
623 * do the same -- reset the device and disable the clocks to
624 * conserve power. Thus, whenever au1000_init() is called,
625 * the device should already be in reset state.
626 */
627 static int au1000_init(struct net_device *dev)
628 {
629 struct au1000_private *aup = netdev_priv(dev);
630 unsigned long flags;
631 int i;
632 u32 control;
633
634 netif_dbg(aup, hw, dev, "au1000_init\n");
635
636 /* bring the device out of reset */
637 au1000_enable_mac(dev, 1);
638
639 spin_lock_irqsave(&aup->lock, flags);
640
641 writel(0, &aup->mac->control);
642 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
643 aup->tx_tail = aup->tx_head;
644 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
645
646 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
647 &aup->mac->mac_addr_high);
648 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
649 dev->dev_addr[1]<<8 | dev->dev_addr[0],
650 &aup->mac->mac_addr_low);
651
652
653 for (i = 0; i < NUM_RX_DMA; i++)
654 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
655
656 au_sync();
657
658 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
659 #ifndef CONFIG_CPU_LITTLE_ENDIAN
660 control |= MAC_BIG_ENDIAN;
661 #endif
662 if (aup->phy_dev) {
663 if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
664 control |= MAC_FULL_DUPLEX;
665 else
666 control |= MAC_DISABLE_RX_OWN;
667 } else { /* PHY-less op, assume full-duplex */
668 control |= MAC_FULL_DUPLEX;
669 }
670
671 writel(control, &aup->mac->control);
672 writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
673 au_sync();
674
675 spin_unlock_irqrestore(&aup->lock, flags);
676 return 0;
677 }
678
679 static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
680 {
681 struct net_device_stats *ps = &dev->stats;
682
683 ps->rx_packets++;
684 if (status & RX_MCAST_FRAME)
685 ps->multicast++;
686
687 if (status & RX_ERROR) {
688 ps->rx_errors++;
689 if (status & RX_MISSED_FRAME)
690 ps->rx_missed_errors++;
691 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
692 ps->rx_length_errors++;
693 if (status & RX_CRC_ERROR)
694 ps->rx_crc_errors++;
695 if (status & RX_COLL)
696 ps->collisions++;
697 } else
698 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
699
700 }
701
702 /*
703 * Au1000 receive routine.
704 */
705 static int au1000_rx(struct net_device *dev)
706 {
707 struct au1000_private *aup = netdev_priv(dev);
708 struct sk_buff *skb;
709 struct rx_dma *prxd;
710 u32 buff_stat, status;
711 struct db_dest *pDB;
712 u32 frmlen;
713
714 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
715
716 prxd = aup->rx_dma_ring[aup->rx_head];
717 buff_stat = prxd->buff_stat;
718 while (buff_stat & RX_T_DONE) {
719 status = prxd->status;
720 pDB = aup->rx_db_inuse[aup->rx_head];
721 au1000_update_rx_stats(dev, status);
722 if (!(status & RX_ERROR)) {
723
724 /* good frame */
725 frmlen = (status & RX_FRAME_LEN_MASK);
726 frmlen -= 4; /* Remove FCS */
727 skb = netdev_alloc_skb(dev, frmlen + 2);
728 if (skb == NULL) {
729 dev->stats.rx_dropped++;
730 continue;
731 }
732 skb_reserve(skb, 2); /* 16 byte IP header align */
733 skb_copy_to_linear_data(skb,
734 (unsigned char *)pDB->vaddr, frmlen);
735 skb_put(skb, frmlen);
736 skb->protocol = eth_type_trans(skb, dev);
737 netif_rx(skb); /* pass the packet to upper layers */
738 } else {
739 if (au1000_debug > 4) {
740 pr_err("rx_error(s):");
741 if (status & RX_MISSED_FRAME)
742 pr_cont(" miss");
743 if (status & RX_WDOG_TIMER)
744 pr_cont(" wdog");
745 if (status & RX_RUNT)
746 pr_cont(" runt");
747 if (status & RX_OVERLEN)
748 pr_cont(" overlen");
749 if (status & RX_COLL)
750 pr_cont(" coll");
751 if (status & RX_MII_ERROR)
752 pr_cont(" mii error");
753 if (status & RX_CRC_ERROR)
754 pr_cont(" crc error");
755 if (status & RX_LEN_ERROR)
756 pr_cont(" len error");
757 if (status & RX_U_CNTRL_FRAME)
758 pr_cont(" u control frame");
759 pr_cont("\n");
760 }
761 }
762 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
763 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
764 au_sync();
765
766 /* next descriptor */
767 prxd = aup->rx_dma_ring[aup->rx_head];
768 buff_stat = prxd->buff_stat;
769 }
770 return 0;
771 }
772
773 static void au1000_update_tx_stats(struct net_device *dev, u32 status)
774 {
775 struct au1000_private *aup = netdev_priv(dev);
776 struct net_device_stats *ps = &dev->stats;
777
778 if (status & TX_FRAME_ABORTED) {
779 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
780 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
781 /* any other tx errors are only valid
782 * in half duplex mode
783 */
784 ps->tx_errors++;
785 ps->tx_aborted_errors++;
786 }
787 } else {
788 ps->tx_errors++;
789 ps->tx_aborted_errors++;
790 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
791 ps->tx_carrier_errors++;
792 }
793 }
794 }
795
796 /*
797 * Called from the interrupt service routine to acknowledge
798 * the TX DONE bits. This is a must if the irq is setup as
799 * edge triggered.
800 */
801 static void au1000_tx_ack(struct net_device *dev)
802 {
803 struct au1000_private *aup = netdev_priv(dev);
804 struct tx_dma *ptxd;
805
806 ptxd = aup->tx_dma_ring[aup->tx_tail];
807
808 while (ptxd->buff_stat & TX_T_DONE) {
809 au1000_update_tx_stats(dev, ptxd->status);
810 ptxd->buff_stat &= ~TX_T_DONE;
811 ptxd->len = 0;
812 au_sync();
813
814 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
815 ptxd = aup->tx_dma_ring[aup->tx_tail];
816
817 if (aup->tx_full) {
818 aup->tx_full = 0;
819 netif_wake_queue(dev);
820 }
821 }
822 }
823
824 /*
825 * Au1000 interrupt service routine.
826 */
827 static irqreturn_t au1000_interrupt(int irq, void *dev_id)
828 {
829 struct net_device *dev = dev_id;
830
831 /* Handle RX interrupts first to minimize chance of overrun */
832
833 au1000_rx(dev);
834 au1000_tx_ack(dev);
835 return IRQ_RETVAL(1);
836 }
837
838 static int au1000_open(struct net_device *dev)
839 {
840 int retval;
841 struct au1000_private *aup = netdev_priv(dev);
842
843 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
844
845 retval = request_irq(dev->irq, au1000_interrupt, 0,
846 dev->name, dev);
847 if (retval) {
848 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
849 return retval;
850 }
851
852 retval = au1000_init(dev);
853 if (retval) {
854 netdev_err(dev, "error in au1000_init\n");
855 free_irq(dev->irq, dev);
856 return retval;
857 }
858
859 if (aup->phy_dev) {
860 /* cause the PHY state machine to schedule a link state check */
861 aup->phy_dev->state = PHY_CHANGELINK;
862 phy_start(aup->phy_dev);
863 }
864
865 netif_start_queue(dev);
866
867 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
868
869 return 0;
870 }
871
872 static int au1000_close(struct net_device *dev)
873 {
874 unsigned long flags;
875 struct au1000_private *const aup = netdev_priv(dev);
876
877 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
878
879 if (aup->phy_dev)
880 phy_stop(aup->phy_dev);
881
882 spin_lock_irqsave(&aup->lock, flags);
883
884 au1000_reset_mac_unlocked(dev);
885
886 /* stop the device */
887 netif_stop_queue(dev);
888
889 /* disable the interrupt */
890 free_irq(dev->irq, dev);
891 spin_unlock_irqrestore(&aup->lock, flags);
892
893 return 0;
894 }
895
896 /*
897 * Au1000 transmit routine.
898 */
899 static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
900 {
901 struct au1000_private *aup = netdev_priv(dev);
902 struct net_device_stats *ps = &dev->stats;
903 struct tx_dma *ptxd;
904 u32 buff_stat;
905 struct db_dest *pDB;
906 int i;
907
908 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
909 (unsigned)aup, skb->len,
910 skb->data, aup->tx_head);
911
912 ptxd = aup->tx_dma_ring[aup->tx_head];
913 buff_stat = ptxd->buff_stat;
914 if (buff_stat & TX_DMA_ENABLE) {
915 /* We've wrapped around and the transmitter is still busy */
916 netif_stop_queue(dev);
917 aup->tx_full = 1;
918 return NETDEV_TX_BUSY;
919 } else if (buff_stat & TX_T_DONE) {
920 au1000_update_tx_stats(dev, ptxd->status);
921 ptxd->len = 0;
922 }
923
924 if (aup->tx_full) {
925 aup->tx_full = 0;
926 netif_wake_queue(dev);
927 }
928
929 pDB = aup->tx_db_inuse[aup->tx_head];
930 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
931 if (skb->len < ETH_ZLEN) {
932 for (i = skb->len; i < ETH_ZLEN; i++)
933 ((char *)pDB->vaddr)[i] = 0;
934
935 ptxd->len = ETH_ZLEN;
936 } else
937 ptxd->len = skb->len;
938
939 ps->tx_packets++;
940 ps->tx_bytes += ptxd->len;
941
942 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
943 au_sync();
944 dev_kfree_skb(skb);
945 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
946 return NETDEV_TX_OK;
947 }
948
949 /*
950 * The Tx ring has been full longer than the watchdog timeout
951 * value. The transmitter must be hung?
952 */
953 static void au1000_tx_timeout(struct net_device *dev)
954 {
955 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
956 au1000_reset_mac(dev);
957 au1000_init(dev);
958 dev->trans_start = jiffies; /* prevent tx timeout */
959 netif_wake_queue(dev);
960 }
961
962 static void au1000_multicast_list(struct net_device *dev)
963 {
964 struct au1000_private *aup = netdev_priv(dev);
965 u32 reg;
966
967 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
968 reg = readl(&aup->mac->control);
969 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
970 reg |= MAC_PROMISCUOUS;
971 } else if ((dev->flags & IFF_ALLMULTI) ||
972 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
973 reg |= MAC_PASS_ALL_MULTI;
974 reg &= ~MAC_PROMISCUOUS;
975 netdev_info(dev, "Pass all multicast\n");
976 } else {
977 struct netdev_hw_addr *ha;
978 u32 mc_filter[2]; /* Multicast hash filter */
979
980 mc_filter[1] = mc_filter[0] = 0;
981 netdev_for_each_mc_addr(ha, dev)
982 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
983 (long *)mc_filter);
984 writel(mc_filter[1], &aup->mac->multi_hash_high);
985 writel(mc_filter[0], &aup->mac->multi_hash_low);
986 reg &= ~MAC_PROMISCUOUS;
987 reg |= MAC_HASH_MODE;
988 }
989 writel(reg, &aup->mac->control);
990 }
991
992 static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
993 {
994 struct au1000_private *aup = netdev_priv(dev);
995
996 if (!netif_running(dev))
997 return -EINVAL;
998
999 if (!aup->phy_dev)
1000 return -EINVAL; /* PHY not controllable */
1001
1002 return phy_mii_ioctl(aup->phy_dev, rq, cmd);
1003 }
1004
1005 static const struct net_device_ops au1000_netdev_ops = {
1006 .ndo_open = au1000_open,
1007 .ndo_stop = au1000_close,
1008 .ndo_start_xmit = au1000_tx,
1009 .ndo_set_rx_mode = au1000_multicast_list,
1010 .ndo_do_ioctl = au1000_ioctl,
1011 .ndo_tx_timeout = au1000_tx_timeout,
1012 .ndo_set_mac_address = eth_mac_addr,
1013 .ndo_validate_addr = eth_validate_addr,
1014 .ndo_change_mtu = eth_change_mtu,
1015 };
1016
1017 static int au1000_probe(struct platform_device *pdev)
1018 {
1019 static unsigned version_printed;
1020 struct au1000_private *aup = NULL;
1021 struct au1000_eth_platform_data *pd;
1022 struct net_device *dev = NULL;
1023 struct db_dest *pDB, *pDBfree;
1024 int irq, i, err = 0;
1025 struct resource *base, *macen, *macdma;
1026
1027 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1028 if (!base) {
1029 dev_err(&pdev->dev, "failed to retrieve base register\n");
1030 err = -ENODEV;
1031 goto out;
1032 }
1033
1034 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1035 if (!macen) {
1036 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1037 err = -ENODEV;
1038 goto out;
1039 }
1040
1041 irq = platform_get_irq(pdev, 0);
1042 if (irq < 0) {
1043 dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1044 err = -ENODEV;
1045 goto out;
1046 }
1047
1048 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1049 if (!macdma) {
1050 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1051 err = -ENODEV;
1052 goto out;
1053 }
1054
1055 if (!request_mem_region(base->start, resource_size(base),
1056 pdev->name)) {
1057 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1058 err = -ENXIO;
1059 goto out;
1060 }
1061
1062 if (!request_mem_region(macen->start, resource_size(macen),
1063 pdev->name)) {
1064 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1065 err = -ENXIO;
1066 goto err_request;
1067 }
1068
1069 if (!request_mem_region(macdma->start, resource_size(macdma),
1070 pdev->name)) {
1071 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1072 err = -ENXIO;
1073 goto err_macdma;
1074 }
1075
1076 dev = alloc_etherdev(sizeof(struct au1000_private));
1077 if (!dev) {
1078 err = -ENOMEM;
1079 goto err_alloc;
1080 }
1081
1082 SET_NETDEV_DEV(dev, &pdev->dev);
1083 platform_set_drvdata(pdev, dev);
1084 aup = netdev_priv(dev);
1085
1086 spin_lock_init(&aup->lock);
1087 aup->msg_enable = (au1000_debug < 4 ?
1088 AU1000_DEF_MSG_ENABLE : au1000_debug);
1089
1090 /* Allocate the data buffers
1091 * Snooping works fine with eth on all au1xxx
1092 */
1093 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1094 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1095 &aup->dma_addr, 0);
1096 if (!aup->vaddr) {
1097 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1098 err = -ENOMEM;
1099 goto err_vaddr;
1100 }
1101
1102 /* aup->mac is the base address of the MAC's registers */
1103 aup->mac = (struct mac_reg *)
1104 ioremap_nocache(base->start, resource_size(base));
1105 if (!aup->mac) {
1106 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1107 err = -ENXIO;
1108 goto err_remap1;
1109 }
1110
1111 /* Setup some variables for quick register address access */
1112 aup->enable = (u32 *)ioremap_nocache(macen->start,
1113 resource_size(macen));
1114 if (!aup->enable) {
1115 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1116 err = -ENXIO;
1117 goto err_remap2;
1118 }
1119 aup->mac_id = pdev->id;
1120
1121 aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1122 if (!aup->macdma) {
1123 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1124 err = -ENXIO;
1125 goto err_remap3;
1126 }
1127
1128 au1000_setup_hw_rings(aup, aup->macdma);
1129
1130 writel(0, aup->enable);
1131 aup->mac_enabled = 0;
1132
1133 pd = dev_get_platdata(&pdev->dev);
1134 if (!pd) {
1135 dev_info(&pdev->dev, "no platform_data passed,"
1136 " PHY search on MAC0\n");
1137 aup->phy1_search_mac0 = 1;
1138 } else {
1139 if (is_valid_ether_addr(pd->mac)) {
1140 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1141 } else {
1142 /* Set a random MAC since no valid provided by platform_data. */
1143 eth_hw_addr_random(dev);
1144 }
1145
1146 aup->phy_static_config = pd->phy_static_config;
1147 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1148 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1149 aup->phy_addr = pd->phy_addr;
1150 aup->phy_busid = pd->phy_busid;
1151 aup->phy_irq = pd->phy_irq;
1152 }
1153
1154 if (aup->phy_busid && aup->phy_busid > 0) {
1155 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1156 err = -ENODEV;
1157 goto err_mdiobus_alloc;
1158 }
1159
1160 aup->mii_bus = mdiobus_alloc();
1161 if (aup->mii_bus == NULL) {
1162 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1163 err = -ENOMEM;
1164 goto err_mdiobus_alloc;
1165 }
1166
1167 aup->mii_bus->priv = dev;
1168 aup->mii_bus->read = au1000_mdiobus_read;
1169 aup->mii_bus->write = au1000_mdiobus_write;
1170 aup->mii_bus->reset = au1000_mdiobus_reset;
1171 aup->mii_bus->name = "au1000_eth_mii";
1172 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1173 pdev->name, aup->mac_id);
1174 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1175 if (aup->mii_bus->irq == NULL) {
1176 err = -ENOMEM;
1177 goto err_out;
1178 }
1179
1180 for (i = 0; i < PHY_MAX_ADDR; ++i)
1181 aup->mii_bus->irq[i] = PHY_POLL;
1182 /* if known, set corresponding PHY IRQs */
1183 if (aup->phy_static_config)
1184 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1185 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1186
1187 err = mdiobus_register(aup->mii_bus);
1188 if (err) {
1189 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1190 goto err_mdiobus_reg;
1191 }
1192
1193 err = au1000_mii_probe(dev);
1194 if (err != 0)
1195 goto err_out;
1196
1197 pDBfree = NULL;
1198 /* setup the data buffer descriptors and attach a buffer to each one */
1199 pDB = aup->db;
1200 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1201 pDB->pnext = pDBfree;
1202 pDBfree = pDB;
1203 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1204 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1205 pDB++;
1206 }
1207 aup->pDBfree = pDBfree;
1208
1209 err = -ENODEV;
1210 for (i = 0; i < NUM_RX_DMA; i++) {
1211 pDB = au1000_GetFreeDB(aup);
1212 if (!pDB)
1213 goto err_out;
1214
1215 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1216 aup->rx_db_inuse[i] = pDB;
1217 }
1218
1219 err = -ENODEV;
1220 for (i = 0; i < NUM_TX_DMA; i++) {
1221 pDB = au1000_GetFreeDB(aup);
1222 if (!pDB)
1223 goto err_out;
1224
1225 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1226 aup->tx_dma_ring[i]->len = 0;
1227 aup->tx_db_inuse[i] = pDB;
1228 }
1229
1230 dev->base_addr = base->start;
1231 dev->irq = irq;
1232 dev->netdev_ops = &au1000_netdev_ops;
1233 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1234 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1235
1236 /*
1237 * The boot code uses the ethernet controller, so reset it to start
1238 * fresh. au1000_init() expects that the device is in reset state.
1239 */
1240 au1000_reset_mac(dev);
1241
1242 err = register_netdev(dev);
1243 if (err) {
1244 netdev_err(dev, "Cannot register net device, aborting.\n");
1245 goto err_out;
1246 }
1247
1248 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1249 (unsigned long)base->start, irq);
1250 if (version_printed++ == 0)
1251 pr_info("%s version %s %s\n",
1252 DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1253
1254 return 0;
1255
1256 err_out:
1257 if (aup->mii_bus != NULL)
1258 mdiobus_unregister(aup->mii_bus);
1259
1260 /* here we should have a valid dev plus aup-> register addresses
1261 * so we can reset the mac properly.
1262 */
1263 au1000_reset_mac(dev);
1264
1265 for (i = 0; i < NUM_RX_DMA; i++) {
1266 if (aup->rx_db_inuse[i])
1267 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1268 }
1269 for (i = 0; i < NUM_TX_DMA; i++) {
1270 if (aup->tx_db_inuse[i])
1271 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1272 }
1273 err_mdiobus_reg:
1274 mdiobus_free(aup->mii_bus);
1275 err_mdiobus_alloc:
1276 iounmap(aup->macdma);
1277 err_remap3:
1278 iounmap(aup->enable);
1279 err_remap2:
1280 iounmap(aup->mac);
1281 err_remap1:
1282 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1283 (void *)aup->vaddr, aup->dma_addr);
1284 err_vaddr:
1285 free_netdev(dev);
1286 err_alloc:
1287 release_mem_region(macdma->start, resource_size(macdma));
1288 err_macdma:
1289 release_mem_region(macen->start, resource_size(macen));
1290 err_request:
1291 release_mem_region(base->start, resource_size(base));
1292 out:
1293 return err;
1294 }
1295
1296 static int au1000_remove(struct platform_device *pdev)
1297 {
1298 struct net_device *dev = platform_get_drvdata(pdev);
1299 struct au1000_private *aup = netdev_priv(dev);
1300 int i;
1301 struct resource *base, *macen;
1302
1303 unregister_netdev(dev);
1304 mdiobus_unregister(aup->mii_bus);
1305 mdiobus_free(aup->mii_bus);
1306
1307 for (i = 0; i < NUM_RX_DMA; i++)
1308 if (aup->rx_db_inuse[i])
1309 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1310
1311 for (i = 0; i < NUM_TX_DMA; i++)
1312 if (aup->tx_db_inuse[i])
1313 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1314
1315 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1316 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1317 (void *)aup->vaddr, aup->dma_addr);
1318
1319 iounmap(aup->macdma);
1320 iounmap(aup->mac);
1321 iounmap(aup->enable);
1322
1323 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1324 release_mem_region(base->start, resource_size(base));
1325
1326 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1327 release_mem_region(base->start, resource_size(base));
1328
1329 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1330 release_mem_region(macen->start, resource_size(macen));
1331
1332 free_netdev(dev);
1333
1334 return 0;
1335 }
1336
1337 static struct platform_driver au1000_eth_driver = {
1338 .probe = au1000_probe,
1339 .remove = au1000_remove,
1340 .driver = {
1341 .name = "au1000-eth",
1342 .owner = THIS_MODULE,
1343 },
1344 };
1345
1346 module_platform_driver(au1000_eth_driver);
1347
1348 MODULE_ALIAS("platform:au1000-eth");