]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/ibm/emac/core.c
net:drivers/net: Miscellaneous conversions to ETH_ALEN
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / ibm / emac / core.c
CommitLineData
1d3bb996 1/*
3396c782 2 * drivers/net/ethernet/ibm/emac/core.c
1d3bb996
DG
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
17cf803a
BH
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
1d3bb996
DG
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
e72701ac 27#include <linux/module.h>
1d3bb996
DG
28#include <linux/sched.h>
29#include <linux/string.h>
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/pci.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/bitops.h>
40#include <linux/workqueue.h>
283029d1 41#include <linux/of.h>
4157ef1b 42#include <linux/of_net.h>
5a0e3ad6 43#include <linux/slab.h>
1d3bb996
DG
44
45#include <asm/processor.h>
46#include <asm/io.h>
47#include <asm/dma.h>
48#include <asm/uaccess.h>
0925ab5d
VB
49#include <asm/dcr.h>
50#include <asm/dcr-regs.h>
1d3bb996
DG
51
52#include "core.h"
53
54/*
55 * Lack of dma_unmap_???? calls is intentional.
56 *
57 * API-correct usage requires additional support state information to be
58 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59 * EMAC design (e.g. TX buffer passed from network stack can be split into
60 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61 * maintaining such information will add additional overhead.
62 * Current DMA API implementation for 4xx processors only ensures cache coherency
63 * and dma_unmap_???? routines are empty and are likely to stay this way.
64 * I decided to omit dma_unmap_??? calls because I don't want to add additional
65 * complexity just for the sake of following some abstract API, when it doesn't
66 * add any real benefit to the driver. I understand that this decision maybe
67 * controversial, but I really tried to make code API-correct and efficient
68 * at the same time and didn't come up with code I liked :(. --ebs
69 */
70
71#define DRV_NAME "emac"
72#define DRV_VERSION "3.54"
73#define DRV_DESC "PPC 4xx OCP EMAC driver"
74
75MODULE_DESCRIPTION(DRV_DESC);
76MODULE_AUTHOR
77 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
78MODULE_LICENSE("GPL");
79
80/*
81 * PPC64 doesn't (yet) have a cacheable_memcpy
82 */
83#ifdef CONFIG_PPC64
84#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
85#endif
86
87/* minimum number of free TX descriptors required to wake up TX process */
88#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
89
90/* If packet size is less than this number, we allocate small skb and copy packet
91 * contents into it instead of just sending original big skb up
92 */
3b3bceef 93#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
1d3bb996
DG
94
95/* Since multiple EMACs share MDIO lines in various ways, we need
96 * to avoid re-using the same PHY ID in cases where the arch didn't
97 * setup precise phy_map entries
98 *
99 * XXX This is something that needs to be reworked as we can have multiple
100 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
101 * probably require in that case to have explicit PHY IDs in the device-tree
102 */
103static u32 busy_phy_map;
104static DEFINE_MUTEX(emac_phy_map_lock);
105
106/* This is the wait queue used to wait on any event related to probe, that
107 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
108 */
109static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
110
111/* Having stable interface names is a doomed idea. However, it would be nice
112 * if we didn't have completely random interface names at boot too :-) It's
113 * just a matter of making everybody's life easier. Since we are doing
114 * threaded probing, it's a bit harder though. The base idea here is that
115 * we make up a list of all emacs in the device-tree before we register the
116 * driver. Every emac will then wait for the previous one in the list to
117 * initialize before itself. We should also keep that list ordered by
118 * cell_index.
119 * That list is only 4 entries long, meaning that additional EMACs don't
120 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
121 */
122
123#define EMAC_BOOT_LIST_SIZE 4
124static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
125
126/* How long should I wait for dependent devices ? */
127#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
128
129/* I don't want to litter system log with timeout errors
130 * when we have brain-damaged PHY.
131 */
132static inline void emac_report_timeout_error(struct emac_instance *dev,
133 const char *error)
134{
11121e30 135 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
9e3cb294 136 EMAC_FTR_460EX_PHY_CLK_FIX |
11121e30
VB
137 EMAC_FTR_440EP_PHY_CLK_FIX))
138 DBG(dev, "%s" NL, error);
139 else if (net_ratelimit())
61c7a080
GL
140 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
141 error);
1d3bb996
DG
142}
143
11121e30
VB
144/* EMAC PHY clock workaround:
145 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
146 * which allows controlling each EMAC clock
147 */
148static inline void emac_rx_clk_tx(struct emac_instance *dev)
149{
150#ifdef CONFIG_PPC_DCR_NATIVE
151 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
152 dcri_clrset(SDR0, SDR0_MFR,
153 0, SDR0_MFR_ECS >> dev->cell_index);
154#endif
155}
156
157static inline void emac_rx_clk_default(struct emac_instance *dev)
158{
159#ifdef CONFIG_PPC_DCR_NATIVE
160 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
161 dcri_clrset(SDR0, SDR0_MFR,
162 SDR0_MFR_ECS >> dev->cell_index, 0);
163#endif
164}
165
1d3bb996
DG
166/* PHY polling intervals */
167#define PHY_POLL_LINK_ON HZ
168#define PHY_POLL_LINK_OFF (HZ / 5)
169
170/* Graceful stop timeouts in us.
171 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
172 */
173#define STOP_TIMEOUT_10 1230
174#define STOP_TIMEOUT_100 124
175#define STOP_TIMEOUT_1000 13
176#define STOP_TIMEOUT_1000_JUMBO 73
177
4373c932
PB
178static unsigned char default_mcast_addr[] = {
179 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
180};
181
1d3bb996
DG
182/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
183static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
184 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
185 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
186 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
187 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
188 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
189 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
190 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
191 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
192 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
193 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
194 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
195 "tx_bd_excessive_collisions", "tx_bd_late_collision",
196 "tx_bd_multple_collisions", "tx_bd_single_collision",
197 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
198 "tx_errors"
199};
200
201static irqreturn_t emac_irq(int irq, void *dev_instance);
202static void emac_clean_tx_ring(struct emac_instance *dev);
203static void __emac_set_multicast_list(struct emac_instance *dev);
204
205static inline int emac_phy_supports_gige(int phy_mode)
206{
207 return phy_mode == PHY_MODE_GMII ||
208 phy_mode == PHY_MODE_RGMII ||
9e3cb294 209 phy_mode == PHY_MODE_SGMII ||
1d3bb996
DG
210 phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
212}
213
214static inline int emac_phy_gpcs(int phy_mode)
215{
9e3cb294
VG
216 return phy_mode == PHY_MODE_SGMII ||
217 phy_mode == PHY_MODE_TBI ||
1d3bb996
DG
218 phy_mode == PHY_MODE_RTBI;
219}
220
221static inline void emac_tx_enable(struct emac_instance *dev)
222{
223 struct emac_regs __iomem *p = dev->emacp;
224 u32 r;
225
226 DBG(dev, "tx_enable" NL);
227
228 r = in_be32(&p->mr0);
229 if (!(r & EMAC_MR0_TXE))
230 out_be32(&p->mr0, r | EMAC_MR0_TXE);
231}
232
233static void emac_tx_disable(struct emac_instance *dev)
234{
235 struct emac_regs __iomem *p = dev->emacp;
236 u32 r;
237
238 DBG(dev, "tx_disable" NL);
239
240 r = in_be32(&p->mr0);
241 if (r & EMAC_MR0_TXE) {
242 int n = dev->stop_timeout;
243 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
244 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245 udelay(1);
246 --n;
247 }
248 if (unlikely(!n))
249 emac_report_timeout_error(dev, "TX disable timeout");
250 }
251}
252
253static void emac_rx_enable(struct emac_instance *dev)
254{
255 struct emac_regs __iomem *p = dev->emacp;
256 u32 r;
257
258 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
259 goto out;
260
261 DBG(dev, "rx_enable" NL);
262
263 r = in_be32(&p->mr0);
264 if (!(r & EMAC_MR0_RXE)) {
265 if (unlikely(!(r & EMAC_MR0_RXI))) {
266 /* Wait if previous async disable is still in progress */
267 int n = dev->stop_timeout;
268 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269 udelay(1);
270 --n;
271 }
272 if (unlikely(!n))
273 emac_report_timeout_error(dev,
274 "RX disable timeout");
275 }
276 out_be32(&p->mr0, r | EMAC_MR0_RXE);
277 }
278 out:
279 ;
280}
281
282static void emac_rx_disable(struct emac_instance *dev)
283{
284 struct emac_regs __iomem *p = dev->emacp;
285 u32 r;
286
287 DBG(dev, "rx_disable" NL);
288
289 r = in_be32(&p->mr0);
290 if (r & EMAC_MR0_RXE) {
291 int n = dev->stop_timeout;
292 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
293 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294 udelay(1);
295 --n;
296 }
297 if (unlikely(!n))
298 emac_report_timeout_error(dev, "RX disable timeout");
299 }
300}
301
302static inline void emac_netif_stop(struct emac_instance *dev)
303{
304 netif_tx_lock_bh(dev->ndev);
e308a5d8 305 netif_addr_lock(dev->ndev);
1d3bb996 306 dev->no_mcast = 1;
e308a5d8 307 netif_addr_unlock(dev->ndev);
1d3bb996
DG
308 netif_tx_unlock_bh(dev->ndev);
309 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
310 mal_poll_disable(dev->mal, &dev->commac);
311 netif_tx_disable(dev->ndev);
312}
313
314static inline void emac_netif_start(struct emac_instance *dev)
315{
316 netif_tx_lock_bh(dev->ndev);
e308a5d8 317 netif_addr_lock(dev->ndev);
1d3bb996
DG
318 dev->no_mcast = 0;
319 if (dev->mcast_pending && netif_running(dev->ndev))
320 __emac_set_multicast_list(dev);
e308a5d8 321 netif_addr_unlock(dev->ndev);
1d3bb996
DG
322 netif_tx_unlock_bh(dev->ndev);
323
324 netif_wake_queue(dev->ndev);
325
326 /* NOTE: unconditional netif_wake_queue is only appropriate
327 * so long as all callers are assured to have free tx slots
328 * (taken from tg3... though the case where that is wrong is
329 * not terribly harmful)
330 */
331 mal_poll_enable(dev->mal, &dev->commac);
332}
333
334static inline void emac_rx_disable_async(struct emac_instance *dev)
335{
336 struct emac_regs __iomem *p = dev->emacp;
337 u32 r;
338
339 DBG(dev, "rx_disable_async" NL);
340
341 r = in_be32(&p->mr0);
342 if (r & EMAC_MR0_RXE)
343 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
344}
345
346static int emac_reset(struct emac_instance *dev)
347{
348 struct emac_regs __iomem *p = dev->emacp;
349 int n = 20;
350
351 DBG(dev, "reset" NL);
352
353 if (!dev->reset_failed) {
354 /* 40x erratum suggests stopping RX channel before reset,
355 * we stop TX as well
356 */
357 emac_rx_disable(dev);
358 emac_tx_disable(dev);
359 }
360
9e3cb294 361#ifdef CONFIG_PPC_DCR_NATIVE
23fbb5a8
PG
362 /*
363 * PPC460EX/GT Embedded Processor Advanced User's Manual
364 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
365 * Note: The PHY must provide a TX Clk in order to perform a soft reset
366 * of the EMAC. If none is present, select the internal clock
367 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
368 * After a soft reset, select the external clock.
369 */
370 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
371 if (dev->phy_address == 0xffffffff &&
372 dev->phy_map == 0xffffffff) {
373 /* No PHY: select internal loop clock before reset */
374 dcri_clrset(SDR0, SDR0_ETH_CFG,
375 0, SDR0_ETH_CFG_ECS << dev->cell_index);
376 } else {
377 /* PHY present: select external clock before reset */
378 dcri_clrset(SDR0, SDR0_ETH_CFG,
379 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
380 }
381 }
9e3cb294
VG
382#endif
383
1d3bb996
DG
384 out_be32(&p->mr0, EMAC_MR0_SRST);
385 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
386 --n;
387
9e3cb294 388#ifdef CONFIG_PPC_DCR_NATIVE
23fbb5a8
PG
389 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
390 if (dev->phy_address == 0xffffffff &&
391 dev->phy_map == 0xffffffff) {
392 /* No PHY: restore external clock source after reset */
393 dcri_clrset(SDR0, SDR0_ETH_CFG,
394 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
395 }
396 }
9e3cb294
VG
397#endif
398
1d3bb996
DG
399 if (n) {
400 dev->reset_failed = 0;
401 return 0;
402 } else {
403 emac_report_timeout_error(dev, "reset timeout");
404 dev->reset_failed = 1;
405 return -ETIMEDOUT;
406 }
407}
408
409static void emac_hash_mc(struct emac_instance *dev)
410{
05781ccd
GE
411 const int regs = EMAC_XAHT_REGS(dev);
412 u32 *gaht_base = emac_gaht_base(dev);
413 u32 gaht_temp[regs];
22bedad3 414 struct netdev_hw_addr *ha;
05781ccd 415 int i;
1d3bb996 416
4cd24eaf 417 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
1d3bb996 418
05781ccd
GE
419 memset(gaht_temp, 0, sizeof (gaht_temp));
420
22bedad3 421 netdev_for_each_mc_addr(ha, dev->ndev) {
05781ccd 422 int slot, reg, mask;
22bedad3 423 DBG2(dev, "mc %pM" NL, ha->addr);
1d3bb996 424
22bedad3
JP
425 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
426 ether_crc(ETH_ALEN, ha->addr));
05781ccd
GE
427 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
428 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
429
430 gaht_temp[reg] |= mask;
1d3bb996 431 }
05781ccd
GE
432
433 for (i = 0; i < regs; i++)
434 out_be32(gaht_base + i, gaht_temp[i]);
1d3bb996
DG
435}
436
437static inline u32 emac_iff2rmr(struct net_device *ndev)
438{
439 struct emac_instance *dev = netdev_priv(ndev);
440 u32 r;
441
442 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
443
444 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
445 r |= EMAC4_RMR_BASE;
446 else
447 r |= EMAC_RMR_BASE;
448
449 if (ndev->flags & IFF_PROMISC)
450 r |= EMAC_RMR_PME;
05781ccd 451 else if (ndev->flags & IFF_ALLMULTI ||
4cd24eaf 452 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
1d3bb996 453 r |= EMAC_RMR_PMME;
4cd24eaf 454 else if (!netdev_mc_empty(ndev))
1d3bb996
DG
455 r |= EMAC_RMR_MAE;
456
ae5d3372
DD
457 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
458 r &= ~EMAC4_RMR_MJS_MASK;
459 r |= EMAC4_RMR_MJS(ndev->mtu);
460 }
461
1d3bb996
DG
462 return r;
463}
464
465static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
466{
467 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
468
469 DBG2(dev, "__emac_calc_base_mr1" NL);
470
471 switch(tx_size) {
472 case 2048:
473 ret |= EMAC_MR1_TFS_2K;
474 break;
475 default:
3201fce0 476 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
1d3bb996
DG
477 dev->ndev->name, tx_size);
478 }
479
480 switch(rx_size) {
481 case 16384:
482 ret |= EMAC_MR1_RFS_16K;
483 break;
484 case 4096:
485 ret |= EMAC_MR1_RFS_4K;
486 break;
487 default:
488 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
489 dev->ndev->name, rx_size);
490 }
491
492 return ret;
493}
494
495static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
496{
497 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
4696c3c4 498 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
1d3bb996
DG
499
500 DBG2(dev, "__emac4_calc_base_mr1" NL);
501
502 switch(tx_size) {
3201fce0
DM
503 case 16384:
504 ret |= EMAC4_MR1_TFS_16K;
505 break;
1d3bb996
DG
506 case 4096:
507 ret |= EMAC4_MR1_TFS_4K;
508 break;
509 case 2048:
510 ret |= EMAC4_MR1_TFS_2K;
511 break;
512 default:
3201fce0 513 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
1d3bb996
DG
514 dev->ndev->name, tx_size);
515 }
516
517 switch(rx_size) {
518 case 16384:
519 ret |= EMAC4_MR1_RFS_16K;
520 break;
521 case 4096:
522 ret |= EMAC4_MR1_RFS_4K;
523 break;
524 case 2048:
525 ret |= EMAC4_MR1_RFS_2K;
526 break;
527 default:
528 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
529 dev->ndev->name, rx_size);
530 }
531
532 return ret;
533}
534
535static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
536{
537 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
538 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
539 __emac_calc_base_mr1(dev, tx_size, rx_size);
540}
541
542static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
543{
544 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
545 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
546 else
547 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
548}
549
550static inline u32 emac_calc_rwmr(struct emac_instance *dev,
551 unsigned int low, unsigned int high)
552{
553 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
554 return (low << 22) | ( (high & 0x3ff) << 6);
555 else
556 return (low << 23) | ( (high & 0x1ff) << 7);
557}
558
559static int emac_configure(struct emac_instance *dev)
560{
561 struct emac_regs __iomem *p = dev->emacp;
562 struct net_device *ndev = dev->ndev;
911b237d 563 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
1d3bb996
DG
564 u32 r, mr1 = 0;
565
566 DBG(dev, "configure" NL);
567
911b237d
BH
568 if (!link) {
569 out_be32(&p->mr1, in_be32(&p->mr1)
570 | EMAC_MR1_FDE | EMAC_MR1_ILE);
571 udelay(100);
572 } else if (emac_reset(dev) < 0)
1d3bb996
DG
573 return -ETIMEDOUT;
574
575 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
576 tah_reset(dev->tah_dev);
577
911b237d
BH
578 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
579 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
1d3bb996
DG
580
581 /* Default fifo sizes */
582 tx_size = dev->tx_fifo_size;
583 rx_size = dev->rx_fifo_size;
584
911b237d
BH
585 /* No link, force loopback */
586 if (!link)
587 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
588
1d3bb996 589 /* Check for full duplex */
911b237d 590 else if (dev->phy.duplex == DUPLEX_FULL)
1d3bb996
DG
591 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
592
593 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
594 dev->stop_timeout = STOP_TIMEOUT_10;
595 switch (dev->phy.speed) {
596 case SPEED_1000:
597 if (emac_phy_gpcs(dev->phy.mode)) {
9e3cb294
VG
598 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
599 (dev->phy.gpcs_address != 0xffffffff) ?
600 dev->phy.gpcs_address : dev->phy.address);
1d3bb996
DG
601
602 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
603 * identify this GPCS PHY later.
604 */
05781ccd 605 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
1d3bb996
DG
606 } else
607 mr1 |= EMAC_MR1_MF_1000;
608
609 /* Extended fifo sizes */
610 tx_size = dev->tx_fifo_size_gige;
611 rx_size = dev->rx_fifo_size_gige;
612
613 if (dev->ndev->mtu > ETH_DATA_LEN) {
f34ebab6
SR
614 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
615 mr1 |= EMAC4_MR1_JPSM;
616 else
617 mr1 |= EMAC_MR1_JPSM;
1d3bb996
DG
618 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
619 } else
620 dev->stop_timeout = STOP_TIMEOUT_1000;
621 break;
622 case SPEED_100:
623 mr1 |= EMAC_MR1_MF_100;
624 dev->stop_timeout = STOP_TIMEOUT_100;
625 break;
626 default: /* make gcc happy */
627 break;
628 }
629
630 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
631 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
632 dev->phy.speed);
633 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
634 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
635
636 /* on 40x erratum forces us to NOT use integrated flow control,
637 * let's hope it works on 44x ;)
638 */
639 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
640 dev->phy.duplex == DUPLEX_FULL) {
641 if (dev->phy.pause)
642 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
643 else if (dev->phy.asym_pause)
644 mr1 |= EMAC_MR1_APP;
645 }
646
647 /* Add base settings & fifo sizes & program MR1 */
648 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
649 out_be32(&p->mr1, mr1);
650
651 /* Set individual MAC address */
652 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
653 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
654 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
655 ndev->dev_addr[5]);
656
657 /* VLAN Tag Protocol ID */
658 out_be32(&p->vtpid, 0x8100);
659
660 /* Receive mode register */
661 r = emac_iff2rmr(ndev);
662 if (r & EMAC_RMR_MAE)
663 emac_hash_mc(dev);
664 out_be32(&p->rmr, r);
665
666 /* FIFOs thresholds */
667 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
668 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
669 tx_size / 2 / dev->fifo_entry_size);
670 else
671 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
672 tx_size / 2 / dev->fifo_entry_size);
673 out_be32(&p->tmr1, r);
674 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
675
676 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
677 there should be still enough space in FIFO to allow the our link
678 partner time to process this frame and also time to send PAUSE
679 frame itself.
680
681 Here is the worst case scenario for the RX FIFO "headroom"
682 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
683
684 1) One maximum-length frame on TX 1522 bytes
685 2) One PAUSE frame time 64 bytes
686 3) PAUSE frame decode time allowance 64 bytes
687 4) One maximum-length frame on RX 1522 bytes
688 5) Round-trip propagation delay of the link (100Mb) 15 bytes
689 ----------
690 3187 bytes
691
692 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
693 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
694 */
695 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
696 rx_size / 4 / dev->fifo_entry_size);
697 out_be32(&p->rwmr, r);
698
699 /* Set PAUSE timer to the maximum */
700 out_be32(&p->ptr, 0xffff);
701
702 /* IRQ sources */
703 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
704 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
705 EMAC_ISR_IRE | EMAC_ISR_TE;
706 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
707 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
708 EMAC4_ISR_RXOE | */;
709 out_be32(&p->iser, r);
710
711 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
9e3cb294
VG
712 if (emac_phy_gpcs(dev->phy.mode)) {
713 if (dev->phy.gpcs_address != 0xffffffff)
714 emac_mii_reset_gpcs(&dev->phy);
715 else
716 emac_mii_reset_phy(&dev->phy);
717 }
1d3bb996
DG
718
719 return 0;
720}
721
722static void emac_reinitialize(struct emac_instance *dev)
723{
724 DBG(dev, "reinitialize" NL);
725
726 emac_netif_stop(dev);
727 if (!emac_configure(dev)) {
728 emac_tx_enable(dev);
729 emac_rx_enable(dev);
730 }
731 emac_netif_start(dev);
732}
733
734static void emac_full_tx_reset(struct emac_instance *dev)
735{
736 DBG(dev, "full_tx_reset" NL);
737
738 emac_tx_disable(dev);
739 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
740 emac_clean_tx_ring(dev);
741 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
742
743 emac_configure(dev);
744
745 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
746 emac_tx_enable(dev);
747 emac_rx_enable(dev);
748}
749
750static void emac_reset_work(struct work_struct *work)
751{
752 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
753
754 DBG(dev, "reset_work" NL);
755
756 mutex_lock(&dev->link_lock);
61dbcece
BH
757 if (dev->opened) {
758 emac_netif_stop(dev);
759 emac_full_tx_reset(dev);
760 emac_netif_start(dev);
761 }
1d3bb996
DG
762 mutex_unlock(&dev->link_lock);
763}
764
765static void emac_tx_timeout(struct net_device *ndev)
766{
767 struct emac_instance *dev = netdev_priv(ndev);
768
769 DBG(dev, "tx_timeout" NL);
770
771 schedule_work(&dev->reset_work);
772}
773
774
775static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
776{
777 int done = !!(stacr & EMAC_STACR_OC);
778
779 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
780 done = !done;
781
782 return done;
783};
784
785static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
786{
787 struct emac_regs __iomem *p = dev->emacp;
788 u32 r = 0;
789 int n, err = -ETIMEDOUT;
790
791 mutex_lock(&dev->mdio_lock);
792
793 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
794
795 /* Enable proper MDIO port */
796 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
797 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
798 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
799 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
800
801 /* Wait for management interface to become idle */
cca87c18 802 n = 20;
1d3bb996
DG
803 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
804 udelay(1);
805 if (!--n) {
806 DBG2(dev, " -> timeout wait idle\n");
807 goto bail;
808 }
809 }
810
811 /* Issue read command */
812 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
813 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
814 else
815 r = EMAC_STACR_BASE(dev->opb_bus_freq);
816 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
817 r |= EMAC_STACR_OC;
bff713b5 818 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
819 r |= EMACX_STACR_STAC_READ;
820 else
821 r |= EMAC_STACR_STAC_READ;
822 r |= (reg & EMAC_STACR_PRA_MASK)
823 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
824 out_be32(&p->stacr, r);
825
826 /* Wait for read to complete */
cca87c18 827 n = 200;
1d3bb996
DG
828 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
829 udelay(1);
830 if (!--n) {
831 DBG2(dev, " -> timeout wait complete\n");
832 goto bail;
833 }
834 }
835
836 if (unlikely(r & EMAC_STACR_PHYE)) {
837 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
838 err = -EREMOTEIO;
839 goto bail;
840 }
841
842 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
843
844 DBG2(dev, "mdio_read -> %04x" NL, r);
845 err = 0;
846 bail:
847 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
848 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
849 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
850 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
851 mutex_unlock(&dev->mdio_lock);
852
853 return err == 0 ? r : err;
854}
855
856static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
857 u16 val)
858{
859 struct emac_regs __iomem *p = dev->emacp;
860 u32 r = 0;
861 int n, err = -ETIMEDOUT;
862
863 mutex_lock(&dev->mdio_lock);
864
865 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
866
867 /* Enable proper MDIO port */
868 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
869 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
870 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
871 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
872
873 /* Wait for management interface to be idle */
cca87c18 874 n = 20;
1d3bb996
DG
875 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
876 udelay(1);
877 if (!--n) {
878 DBG2(dev, " -> timeout wait idle\n");
879 goto bail;
880 }
881 }
882
883 /* Issue write command */
884 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
885 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
886 else
887 r = EMAC_STACR_BASE(dev->opb_bus_freq);
888 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
889 r |= EMAC_STACR_OC;
bff713b5 890 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
891 r |= EMACX_STACR_STAC_WRITE;
892 else
893 r |= EMAC_STACR_STAC_WRITE;
894 r |= (reg & EMAC_STACR_PRA_MASK) |
895 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
896 (val << EMAC_STACR_PHYD_SHIFT);
897 out_be32(&p->stacr, r);
898
899 /* Wait for write to complete */
cca87c18 900 n = 200;
1d3bb996
DG
901 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
902 udelay(1);
903 if (!--n) {
904 DBG2(dev, " -> timeout wait complete\n");
905 goto bail;
906 }
907 }
908 err = 0;
909 bail:
910 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
911 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
912 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
913 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
914 mutex_unlock(&dev->mdio_lock);
915}
916
917static int emac_mdio_read(struct net_device *ndev, int id, int reg)
918{
919 struct emac_instance *dev = netdev_priv(ndev);
920 int res;
921
9e3cb294
VG
922 res = __emac_mdio_read((dev->mdio_instance &&
923 dev->phy.gpcs_address != id) ?
924 dev->mdio_instance : dev,
1d3bb996
DG
925 (u8) id, (u8) reg);
926 return res;
927}
928
929static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
930{
931 struct emac_instance *dev = netdev_priv(ndev);
932
9e3cb294
VG
933 __emac_mdio_write((dev->mdio_instance &&
934 dev->phy.gpcs_address != id) ?
935 dev->mdio_instance : dev,
1d3bb996
DG
936 (u8) id, (u8) reg, (u16) val);
937}
938
939/* Tx lock BH */
940static void __emac_set_multicast_list(struct emac_instance *dev)
941{
942 struct emac_regs __iomem *p = dev->emacp;
943 u32 rmr = emac_iff2rmr(dev->ndev);
944
945 DBG(dev, "__multicast %08x" NL, rmr);
946
947 /* I decided to relax register access rules here to avoid
948 * full EMAC reset.
949 *
950 * There is a real problem with EMAC4 core if we use MWSW_001 bit
951 * in MR1 register and do a full EMAC reset.
952 * One TX BD status update is delayed and, after EMAC reset, it
953 * never happens, resulting in TX hung (it'll be recovered by TX
954 * timeout handler eventually, but this is just gross).
955 * So we either have to do full TX reset or try to cheat here :)
956 *
957 * The only required change is to RX mode register, so I *think* all
958 * we need is just to stop RX channel. This seems to work on all
959 * tested SoCs. --ebs
960 *
961 * If we need the full reset, we might just trigger the workqueue
962 * and do it async... a bit nasty but should work --BenH
963 */
964 dev->mcast_pending = 0;
965 emac_rx_disable(dev);
966 if (rmr & EMAC_RMR_MAE)
967 emac_hash_mc(dev);
968 out_be32(&p->rmr, rmr);
969 emac_rx_enable(dev);
970}
971
972/* Tx lock BH */
973static void emac_set_multicast_list(struct net_device *ndev)
974{
975 struct emac_instance *dev = netdev_priv(ndev);
976
977 DBG(dev, "multicast" NL);
978
979 BUG_ON(!netif_running(dev->ndev));
980
981 if (dev->no_mcast) {
982 dev->mcast_pending = 1;
983 return;
984 }
985 __emac_set_multicast_list(dev);
986}
987
988static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
989{
990 int rx_sync_size = emac_rx_sync_size(new_mtu);
991 int rx_skb_size = emac_rx_skb_size(new_mtu);
992 int i, ret = 0;
ae5d3372 993 int mr1_jumbo_bit_change = 0;
1d3bb996
DG
994
995 mutex_lock(&dev->link_lock);
996 emac_netif_stop(dev);
997 emac_rx_disable(dev);
998 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
999
1000 if (dev->rx_sg_skb) {
1001 ++dev->estats.rx_dropped_resize;
1002 dev_kfree_skb(dev->rx_sg_skb);
1003 dev->rx_sg_skb = NULL;
1004 }
1005
1006 /* Make a first pass over RX ring and mark BDs ready, dropping
1007 * non-processed packets on the way. We need this as a separate pass
1008 * to simplify error recovery in the case of allocation failure later.
1009 */
1010 for (i = 0; i < NUM_RX_BUFF; ++i) {
1011 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1012 ++dev->estats.rx_dropped_resize;
1013
1014 dev->rx_desc[i].data_len = 0;
1015 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1016 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1017 }
1018
1019 /* Reallocate RX ring only if bigger skb buffers are required */
1020 if (rx_skb_size <= dev->rx_skb_size)
1021 goto skip;
1022
1023 /* Second pass, allocate new skbs */
1024 for (i = 0; i < NUM_RX_BUFF; ++i) {
1025 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1026 if (!skb) {
1027 ret = -ENOMEM;
1028 goto oom;
1029 }
1030
1031 BUG_ON(!dev->rx_skb[i]);
1032 dev_kfree_skb(dev->rx_skb[i]);
1033
1034 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1035 dev->rx_desc[i].data_ptr =
1036 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1037 DMA_FROM_DEVICE) + 2;
1038 dev->rx_skb[i] = skb;
1039 }
1040 skip:
1041 /* Check if we need to change "Jumbo" bit in MR1 */
ae5d3372
DD
1042 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1043 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1044 (dev->ndev->mtu > ETH_DATA_LEN);
1045 } else {
1046 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1047 (dev->ndev->mtu > ETH_DATA_LEN);
1048 }
1049
1050 if (mr1_jumbo_bit_change) {
1d3bb996
DG
1051 /* This is to prevent starting RX channel in emac_rx_enable() */
1052 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1053
1054 dev->ndev->mtu = new_mtu;
1055 emac_full_tx_reset(dev);
1056 }
1057
1058 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1059 oom:
1060 /* Restart RX */
1061 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1062 dev->rx_slot = 0;
1063 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1064 emac_rx_enable(dev);
1065 emac_netif_start(dev);
1066 mutex_unlock(&dev->link_lock);
1067
1068 return ret;
1069}
1070
1071/* Process ctx, rtnl_lock semaphore */
1072static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1073{
1074 struct emac_instance *dev = netdev_priv(ndev);
1075 int ret = 0;
1076
1077 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1078 return -EINVAL;
1079
1080 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1081
1082 if (netif_running(ndev)) {
421f91d2 1083 /* Check if we really need to reinitialize RX ring */
1d3bb996
DG
1084 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1085 ret = emac_resize_rx_ring(dev, new_mtu);
1086 }
1087
1088 if (!ret) {
1089 ndev->mtu = new_mtu;
1090 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1091 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1092 }
1093
1094 return ret;
1095}
1096
1097static void emac_clean_tx_ring(struct emac_instance *dev)
1098{
1099 int i;
1100
1101 for (i = 0; i < NUM_TX_BUFF; ++i) {
1102 if (dev->tx_skb[i]) {
1103 dev_kfree_skb(dev->tx_skb[i]);
1104 dev->tx_skb[i] = NULL;
1105 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1106 ++dev->estats.tx_dropped;
1107 }
1108 dev->tx_desc[i].ctrl = 0;
1109 dev->tx_desc[i].data_ptr = 0;
1110 }
1111}
1112
1113static void emac_clean_rx_ring(struct emac_instance *dev)
1114{
1115 int i;
1116
1117 for (i = 0; i < NUM_RX_BUFF; ++i)
1118 if (dev->rx_skb[i]) {
1119 dev->rx_desc[i].ctrl = 0;
1120 dev_kfree_skb(dev->rx_skb[i]);
1121 dev->rx_skb[i] = NULL;
1122 dev->rx_desc[i].data_ptr = 0;
1123 }
1124
1125 if (dev->rx_sg_skb) {
1126 dev_kfree_skb(dev->rx_sg_skb);
1127 dev->rx_sg_skb = NULL;
1128 }
1129}
1130
1131static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1132 gfp_t flags)
1133{
1134 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1135 if (unlikely(!skb))
1136 return -ENOMEM;
1137
1138 dev->rx_skb[slot] = skb;
1139 dev->rx_desc[slot].data_len = 0;
1140
1141 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1142 dev->rx_desc[slot].data_ptr =
1143 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1144 DMA_FROM_DEVICE) + 2;
1145 wmb();
1146 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1147 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1148
1149 return 0;
1150}
1151
1152static void emac_print_link_status(struct emac_instance *dev)
1153{
1154 if (netif_carrier_ok(dev->ndev))
1155 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1156 dev->ndev->name, dev->phy.speed,
1157 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1158 dev->phy.pause ? ", pause enabled" :
1159 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1160 else
1161 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1162}
1163
1164/* Process ctx, rtnl_lock semaphore */
1165static int emac_open(struct net_device *ndev)
1166{
1167 struct emac_instance *dev = netdev_priv(ndev);
1168 int err, i;
1169
1170 DBG(dev, "open" NL);
1171
1172 /* Setup error IRQ handler */
1173 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1174 if (err) {
1175 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1176 ndev->name, dev->emac_irq);
1177 return err;
1178 }
1179
1180 /* Allocate RX ring */
1181 for (i = 0; i < NUM_RX_BUFF; ++i)
1182 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1183 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1184 ndev->name);
1185 goto oom;
1186 }
1187
1188 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1189 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1190 dev->rx_sg_skb = NULL;
1191
1192 mutex_lock(&dev->link_lock);
61dbcece 1193 dev->opened = 1;
1d3bb996 1194
61dbcece 1195 /* Start PHY polling now.
1d3bb996
DG
1196 */
1197 if (dev->phy.address >= 0) {
1198 int link_poll_interval;
1199 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1200 dev->phy.def->ops->read_link(&dev->phy);
11121e30 1201 emac_rx_clk_default(dev);
1d3bb996
DG
1202 netif_carrier_on(dev->ndev);
1203 link_poll_interval = PHY_POLL_LINK_ON;
1204 } else {
11121e30 1205 emac_rx_clk_tx(dev);
1d3bb996
DG
1206 netif_carrier_off(dev->ndev);
1207 link_poll_interval = PHY_POLL_LINK_OFF;
1208 }
1209 dev->link_polling = 1;
1210 wmb();
1211 schedule_delayed_work(&dev->link_work, link_poll_interval);
1212 emac_print_link_status(dev);
1213 } else
1214 netif_carrier_on(dev->ndev);
1215
e8296582 1216 /* Required for Pause packet support in EMAC */
22bedad3 1217 dev_mc_add_global(ndev, default_mcast_addr);
e8296582 1218
1d3bb996
DG
1219 emac_configure(dev);
1220 mal_poll_add(dev->mal, &dev->commac);
1221 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1222 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1223 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1224 emac_tx_enable(dev);
1225 emac_rx_enable(dev);
1226 emac_netif_start(dev);
1227
1228 mutex_unlock(&dev->link_lock);
1229
1230 return 0;
1231 oom:
1232 emac_clean_rx_ring(dev);
1233 free_irq(dev->emac_irq, dev);
1234
1235 return -ENOMEM;
1236}
1237
1238/* BHs disabled */
1239#if 0
1240static int emac_link_differs(struct emac_instance *dev)
1241{
1242 u32 r = in_be32(&dev->emacp->mr1);
1243
1244 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1245 int speed, pause, asym_pause;
1246
1247 if (r & EMAC_MR1_MF_1000)
1248 speed = SPEED_1000;
1249 else if (r & EMAC_MR1_MF_100)
1250 speed = SPEED_100;
1251 else
1252 speed = SPEED_10;
1253
1254 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1255 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1256 pause = 1;
1257 asym_pause = 0;
1258 break;
1259 case EMAC_MR1_APP:
1260 pause = 0;
1261 asym_pause = 1;
1262 break;
1263 default:
1264 pause = asym_pause = 0;
1265 }
1266 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1267 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1268}
1269#endif
1270
1271static void emac_link_timer(struct work_struct *work)
1272{
1273 struct emac_instance *dev =
10fd9d7e 1274 container_of(to_delayed_work(work),
1d3bb996
DG
1275 struct emac_instance, link_work);
1276 int link_poll_interval;
1277
1278 mutex_lock(&dev->link_lock);
1d3bb996
DG
1279 DBG2(dev, "link timer" NL);
1280
61dbcece
BH
1281 if (!dev->opened)
1282 goto bail;
1283
1d3bb996
DG
1284 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1285 if (!netif_carrier_ok(dev->ndev)) {
11121e30 1286 emac_rx_clk_default(dev);
1d3bb996
DG
1287 /* Get new link parameters */
1288 dev->phy.def->ops->read_link(&dev->phy);
1289
1290 netif_carrier_on(dev->ndev);
1291 emac_netif_stop(dev);
1292 emac_full_tx_reset(dev);
1293 emac_netif_start(dev);
1294 emac_print_link_status(dev);
1295 }
1296 link_poll_interval = PHY_POLL_LINK_ON;
1297 } else {
1298 if (netif_carrier_ok(dev->ndev)) {
11121e30 1299 emac_rx_clk_tx(dev);
1d3bb996
DG
1300 netif_carrier_off(dev->ndev);
1301 netif_tx_disable(dev->ndev);
911b237d 1302 emac_reinitialize(dev);
1d3bb996
DG
1303 emac_print_link_status(dev);
1304 }
1305 link_poll_interval = PHY_POLL_LINK_OFF;
1306 }
1307 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1308 bail:
1d3bb996
DG
1309 mutex_unlock(&dev->link_lock);
1310}
1311
1312static void emac_force_link_update(struct emac_instance *dev)
1313{
1314 netif_carrier_off(dev->ndev);
61dbcece 1315 smp_rmb();
1d3bb996 1316 if (dev->link_polling) {
afe2c511 1317 cancel_delayed_work_sync(&dev->link_work);
1d3bb996
DG
1318 if (dev->link_polling)
1319 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1320 }
1321}
1322
1323/* Process ctx, rtnl_lock semaphore */
1324static int emac_close(struct net_device *ndev)
1325{
1326 struct emac_instance *dev = netdev_priv(ndev);
1327
1328 DBG(dev, "close" NL);
1329
61dbcece
BH
1330 if (dev->phy.address >= 0) {
1331 dev->link_polling = 0;
afe2c511 1332 cancel_delayed_work_sync(&dev->link_work);
61dbcece
BH
1333 }
1334 mutex_lock(&dev->link_lock);
1d3bb996 1335 emac_netif_stop(dev);
61dbcece
BH
1336 dev->opened = 0;
1337 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1338
1339 emac_rx_disable(dev);
1340 emac_tx_disable(dev);
1341 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1342 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1343 mal_poll_del(dev->mal, &dev->commac);
1344
1345 emac_clean_tx_ring(dev);
1346 emac_clean_rx_ring(dev);
1347
1348 free_irq(dev->emac_irq, dev);
1349
d3b325f9
PG
1350 netif_carrier_off(ndev);
1351
1d3bb996
DG
1352 return 0;
1353}
1354
1355static inline u16 emac_tx_csum(struct emac_instance *dev,
1356 struct sk_buff *skb)
1357{
e66f4168
VB
1358 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1359 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1d3bb996
DG
1360 ++dev->stats.tx_packets_csum;
1361 return EMAC_TX_CTRL_TAH_CSUM;
1362 }
1363 return 0;
1364}
1365
1366static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1367{
1368 struct emac_regs __iomem *p = dev->emacp;
1369 struct net_device *ndev = dev->ndev;
1370
1371 /* Send the packet out. If the if makes a significant perf
1372 * difference, then we can store the TMR0 value in "dev"
1373 * instead
1374 */
1375 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1376 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1377 else
1378 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1379
1380 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1381 netif_stop_queue(ndev);
1382 DBG2(dev, "stopped TX queue" NL);
1383 }
1384
1385 ndev->trans_start = jiffies;
1386 ++dev->stats.tx_packets;
1387 dev->stats.tx_bytes += len;
1388
ec634fe3 1389 return NETDEV_TX_OK;
1d3bb996
DG
1390}
1391
1392/* Tx lock BH */
1393static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1394{
1395 struct emac_instance *dev = netdev_priv(ndev);
1396 unsigned int len = skb->len;
1397 int slot;
1398
1399 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1400 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1401
1402 slot = dev->tx_slot++;
1403 if (dev->tx_slot == NUM_TX_BUFF) {
1404 dev->tx_slot = 0;
1405 ctrl |= MAL_TX_CTRL_WRAP;
1406 }
1407
1408 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1409
1410 dev->tx_skb[slot] = skb;
1411 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1412 skb->data, len,
1413 DMA_TO_DEVICE);
1414 dev->tx_desc[slot].data_len = (u16) len;
1415 wmb();
1416 dev->tx_desc[slot].ctrl = ctrl;
1417
1418 return emac_xmit_finish(dev, len);
1419}
1420
1d3bb996
DG
1421static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1422 u32 pd, int len, int last, u16 base_ctrl)
1423{
1424 while (1) {
1425 u16 ctrl = base_ctrl;
1426 int chunk = min(len, MAL_MAX_TX_SIZE);
1427 len -= chunk;
1428
1429 slot = (slot + 1) % NUM_TX_BUFF;
1430
1431 if (last && !len)
1432 ctrl |= MAL_TX_CTRL_LAST;
1433 if (slot == NUM_TX_BUFF - 1)
1434 ctrl |= MAL_TX_CTRL_WRAP;
1435
1436 dev->tx_skb[slot] = NULL;
1437 dev->tx_desc[slot].data_ptr = pd;
1438 dev->tx_desc[slot].data_len = (u16) chunk;
1439 dev->tx_desc[slot].ctrl = ctrl;
1440 ++dev->tx_cnt;
1441
1442 if (!len)
1443 break;
1444
1445 pd += chunk;
1446 }
1447 return slot;
1448}
1449
1450/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1451static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1452{
1453 struct emac_instance *dev = netdev_priv(ndev);
1454 int nr_frags = skb_shinfo(skb)->nr_frags;
1455 int len = skb->len, chunk;
1456 int slot, i;
1457 u16 ctrl;
1458 u32 pd;
1459
1460 /* This is common "fast" path */
1461 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1462 return emac_start_xmit(skb, ndev);
1463
1464 len -= skb->data_len;
1465
1466 /* Note, this is only an *estimation*, we can still run out of empty
1467 * slots because of the additional fragmentation into
1468 * MAL_MAX_TX_SIZE-sized chunks
1469 */
1470 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1471 goto stop_queue;
1472
1473 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1474 emac_tx_csum(dev, skb);
1475 slot = dev->tx_slot;
1476
1477 /* skb data */
1478 dev->tx_skb[slot] = NULL;
1479 chunk = min(len, MAL_MAX_TX_SIZE);
1480 dev->tx_desc[slot].data_ptr = pd =
1481 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1482 dev->tx_desc[slot].data_len = (u16) chunk;
1483 len -= chunk;
1484 if (unlikely(len))
1485 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1486 ctrl);
1487 /* skb fragments */
1488 for (i = 0; i < nr_frags; ++i) {
1489 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
9e903e08 1490 len = skb_frag_size(frag);
1d3bb996
DG
1491
1492 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1493 goto undo_frame;
1494
f8f114c2
IC
1495 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1496 DMA_TO_DEVICE);
1d3bb996
DG
1497
1498 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1499 ctrl);
1500 }
1501
1502 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1503
1504 /* Attach skb to the last slot so we don't release it too early */
1505 dev->tx_skb[slot] = skb;
1506
1507 /* Send the packet out */
1508 if (dev->tx_slot == NUM_TX_BUFF - 1)
1509 ctrl |= MAL_TX_CTRL_WRAP;
1510 wmb();
1511 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1512 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1513
1514 return emac_xmit_finish(dev, skb->len);
1515
1516 undo_frame:
1517 /* Well, too bad. Our previous estimation was overly optimistic.
1518 * Undo everything.
1519 */
1520 while (slot != dev->tx_slot) {
1521 dev->tx_desc[slot].ctrl = 0;
1522 --dev->tx_cnt;
1523 if (--slot < 0)
1524 slot = NUM_TX_BUFF - 1;
1525 }
1526 ++dev->estats.tx_undo;
1527
1528 stop_queue:
1529 netif_stop_queue(ndev);
1530 DBG2(dev, "stopped TX queue" NL);
5b548140 1531 return NETDEV_TX_BUSY;
1d3bb996 1532}
1d3bb996
DG
1533
1534/* Tx lock BHs */
1535static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1536{
1537 struct emac_error_stats *st = &dev->estats;
1538
1539 DBG(dev, "BD TX error %04x" NL, ctrl);
1540
1541 ++st->tx_bd_errors;
1542 if (ctrl & EMAC_TX_ST_BFCS)
1543 ++st->tx_bd_bad_fcs;
1544 if (ctrl & EMAC_TX_ST_LCS)
1545 ++st->tx_bd_carrier_loss;
1546 if (ctrl & EMAC_TX_ST_ED)
1547 ++st->tx_bd_excessive_deferral;
1548 if (ctrl & EMAC_TX_ST_EC)
1549 ++st->tx_bd_excessive_collisions;
1550 if (ctrl & EMAC_TX_ST_LC)
1551 ++st->tx_bd_late_collision;
1552 if (ctrl & EMAC_TX_ST_MC)
1553 ++st->tx_bd_multple_collisions;
1554 if (ctrl & EMAC_TX_ST_SC)
1555 ++st->tx_bd_single_collision;
1556 if (ctrl & EMAC_TX_ST_UR)
1557 ++st->tx_bd_underrun;
1558 if (ctrl & EMAC_TX_ST_SQE)
1559 ++st->tx_bd_sqe;
1560}
1561
1562static void emac_poll_tx(void *param)
1563{
1564 struct emac_instance *dev = param;
1565 u32 bad_mask;
1566
1567 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1568
1569 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1570 bad_mask = EMAC_IS_BAD_TX_TAH;
1571 else
1572 bad_mask = EMAC_IS_BAD_TX;
1573
1574 netif_tx_lock_bh(dev->ndev);
1575 if (dev->tx_cnt) {
1576 u16 ctrl;
1577 int slot = dev->ack_slot, n = 0;
1578 again:
1579 ctrl = dev->tx_desc[slot].ctrl;
1580 if (!(ctrl & MAL_TX_CTRL_READY)) {
1581 struct sk_buff *skb = dev->tx_skb[slot];
1582 ++n;
1583
1584 if (skb) {
1585 dev_kfree_skb(skb);
1586 dev->tx_skb[slot] = NULL;
1587 }
1588 slot = (slot + 1) % NUM_TX_BUFF;
1589
1590 if (unlikely(ctrl & bad_mask))
1591 emac_parse_tx_error(dev, ctrl);
1592
1593 if (--dev->tx_cnt)
1594 goto again;
1595 }
1596 if (n) {
1597 dev->ack_slot = slot;
1598 if (netif_queue_stopped(dev->ndev) &&
1599 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1600 netif_wake_queue(dev->ndev);
1601
1602 DBG2(dev, "tx %d pkts" NL, n);
1603 }
1604 }
1605 netif_tx_unlock_bh(dev->ndev);
1606}
1607
1608static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1609 int len)
1610{
1611 struct sk_buff *skb = dev->rx_skb[slot];
1612
1613 DBG2(dev, "recycle %d %d" NL, slot, len);
1614
1615 if (len)
1616 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1617 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1618
1619 dev->rx_desc[slot].data_len = 0;
1620 wmb();
1621 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1622 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1623}
1624
1625static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1626{
1627 struct emac_error_stats *st = &dev->estats;
1628
1629 DBG(dev, "BD RX error %04x" NL, ctrl);
1630
1631 ++st->rx_bd_errors;
1632 if (ctrl & EMAC_RX_ST_OE)
1633 ++st->rx_bd_overrun;
1634 if (ctrl & EMAC_RX_ST_BP)
1635 ++st->rx_bd_bad_packet;
1636 if (ctrl & EMAC_RX_ST_RP)
1637 ++st->rx_bd_runt_packet;
1638 if (ctrl & EMAC_RX_ST_SE)
1639 ++st->rx_bd_short_event;
1640 if (ctrl & EMAC_RX_ST_AE)
1641 ++st->rx_bd_alignment_error;
1642 if (ctrl & EMAC_RX_ST_BFCS)
1643 ++st->rx_bd_bad_fcs;
1644 if (ctrl & EMAC_RX_ST_PTL)
1645 ++st->rx_bd_packet_too_long;
1646 if (ctrl & EMAC_RX_ST_ORE)
1647 ++st->rx_bd_out_of_range;
1648 if (ctrl & EMAC_RX_ST_IRE)
1649 ++st->rx_bd_in_range;
1650}
1651
1652static inline void emac_rx_csum(struct emac_instance *dev,
1653 struct sk_buff *skb, u16 ctrl)
1654{
3b3bceef 1655#ifdef CONFIG_IBM_EMAC_TAH
1d3bb996
DG
1656 if (!ctrl && dev->tah_dev) {
1657 skb->ip_summed = CHECKSUM_UNNECESSARY;
1658 ++dev->stats.rx_packets_csum;
1659 }
1660#endif
1661}
1662
1663static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1664{
1665 if (likely(dev->rx_sg_skb != NULL)) {
1666 int len = dev->rx_desc[slot].data_len;
1667 int tot_len = dev->rx_sg_skb->len + len;
1668
1669 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1670 ++dev->estats.rx_dropped_mtu;
1671 dev_kfree_skb(dev->rx_sg_skb);
1672 dev->rx_sg_skb = NULL;
1673 } else {
bef1bc95 1674 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1675 dev->rx_skb[slot]->data, len);
1676 skb_put(dev->rx_sg_skb, len);
1677 emac_recycle_rx_skb(dev, slot, len);
1678 return 0;
1679 }
1680 }
1681 emac_recycle_rx_skb(dev, slot, 0);
1682 return -1;
1683}
1684
1685/* NAPI poll context */
1686static int emac_poll_rx(void *param, int budget)
1687{
1688 struct emac_instance *dev = param;
1689 int slot = dev->rx_slot, received = 0;
1690
1691 DBG2(dev, "poll_rx(%d)" NL, budget);
1692
1693 again:
1694 while (budget > 0) {
1695 int len;
1696 struct sk_buff *skb;
1697 u16 ctrl = dev->rx_desc[slot].ctrl;
1698
1699 if (ctrl & MAL_RX_CTRL_EMPTY)
1700 break;
1701
1702 skb = dev->rx_skb[slot];
1703 mb();
1704 len = dev->rx_desc[slot].data_len;
1705
1706 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1707 goto sg;
1708
1709 ctrl &= EMAC_BAD_RX_MASK;
1710 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1711 emac_parse_rx_error(dev, ctrl);
1712 ++dev->estats.rx_dropped_error;
1713 emac_recycle_rx_skb(dev, slot, 0);
1714 len = 0;
1715 goto next;
1716 }
6c688f42
SN
1717
1718 if (len < ETH_HLEN) {
1719 ++dev->estats.rx_dropped_stack;
1720 emac_recycle_rx_skb(dev, slot, len);
1721 goto next;
1722 }
1d3bb996
DG
1723
1724 if (len && len < EMAC_RX_COPY_THRESH) {
1725 struct sk_buff *copy_skb =
1726 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1727 if (unlikely(!copy_skb))
1728 goto oom;
1729
1730 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1731 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1732 len + 2);
1733 emac_recycle_rx_skb(dev, slot, len);
1734 skb = copy_skb;
1735 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1736 goto oom;
1737
1738 skb_put(skb, len);
1739 push_packet:
1d3bb996
DG
1740 skb->protocol = eth_type_trans(skb, dev->ndev);
1741 emac_rx_csum(dev, skb, ctrl);
1742
1743 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1744 ++dev->estats.rx_dropped_stack;
1745 next:
1746 ++dev->stats.rx_packets;
1747 skip:
1748 dev->stats.rx_bytes += len;
1749 slot = (slot + 1) % NUM_RX_BUFF;
1750 --budget;
1751 ++received;
1752 continue;
1753 sg:
1754 if (ctrl & MAL_RX_CTRL_FIRST) {
1755 BUG_ON(dev->rx_sg_skb);
1756 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1757 DBG(dev, "rx OOM %d" NL, slot);
1758 ++dev->estats.rx_dropped_oom;
1759 emac_recycle_rx_skb(dev, slot, 0);
1760 } else {
1761 dev->rx_sg_skb = skb;
1762 skb_put(skb, len);
1763 }
1764 } else if (!emac_rx_sg_append(dev, slot) &&
1765 (ctrl & MAL_RX_CTRL_LAST)) {
1766
1767 skb = dev->rx_sg_skb;
1768 dev->rx_sg_skb = NULL;
1769
1770 ctrl &= EMAC_BAD_RX_MASK;
1771 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1772 emac_parse_rx_error(dev, ctrl);
1773 ++dev->estats.rx_dropped_error;
1774 dev_kfree_skb(skb);
1775 len = 0;
1776 } else
1777 goto push_packet;
1778 }
1779 goto skip;
1780 oom:
1781 DBG(dev, "rx OOM %d" NL, slot);
1782 /* Drop the packet and recycle skb */
1783 ++dev->estats.rx_dropped_oom;
1784 emac_recycle_rx_skb(dev, slot, 0);
1785 goto next;
1786 }
1787
1788 if (received) {
1789 DBG2(dev, "rx %d BDs" NL, received);
1790 dev->rx_slot = slot;
1791 }
1792
1793 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1794 mb();
1795 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1796 DBG2(dev, "rx restart" NL);
1797 received = 0;
1798 goto again;
1799 }
1800
1801 if (dev->rx_sg_skb) {
1802 DBG2(dev, "dropping partial rx packet" NL);
1803 ++dev->estats.rx_dropped_error;
1804 dev_kfree_skb(dev->rx_sg_skb);
1805 dev->rx_sg_skb = NULL;
1806 }
1807
1808 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1809 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1810 emac_rx_enable(dev);
1811 dev->rx_slot = 0;
1812 }
1813 return received;
1814}
1815
1816/* NAPI poll context */
1817static int emac_peek_rx(void *param)
1818{
1819 struct emac_instance *dev = param;
1820
1821 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1822}
1823
1824/* NAPI poll context */
1825static int emac_peek_rx_sg(void *param)
1826{
1827 struct emac_instance *dev = param;
1828
1829 int slot = dev->rx_slot;
1830 while (1) {
1831 u16 ctrl = dev->rx_desc[slot].ctrl;
1832 if (ctrl & MAL_RX_CTRL_EMPTY)
1833 return 0;
1834 else if (ctrl & MAL_RX_CTRL_LAST)
1835 return 1;
1836
1837 slot = (slot + 1) % NUM_RX_BUFF;
1838
1839 /* I'm just being paranoid here :) */
1840 if (unlikely(slot == dev->rx_slot))
1841 return 0;
1842 }
1843}
1844
1845/* Hard IRQ */
1846static void emac_rxde(void *param)
1847{
1848 struct emac_instance *dev = param;
1849
1850 ++dev->estats.rx_stopped;
1851 emac_rx_disable_async(dev);
1852}
1853
1854/* Hard IRQ */
1855static irqreturn_t emac_irq(int irq, void *dev_instance)
1856{
1857 struct emac_instance *dev = dev_instance;
1858 struct emac_regs __iomem *p = dev->emacp;
1859 struct emac_error_stats *st = &dev->estats;
1860 u32 isr;
1861
1862 spin_lock(&dev->lock);
1863
1864 isr = in_be32(&p->isr);
1865 out_be32(&p->isr, isr);
1866
1867 DBG(dev, "isr = %08x" NL, isr);
1868
1869 if (isr & EMAC4_ISR_TXPE)
1870 ++st->tx_parity;
1871 if (isr & EMAC4_ISR_RXPE)
1872 ++st->rx_parity;
1873 if (isr & EMAC4_ISR_TXUE)
1874 ++st->tx_underrun;
1875 if (isr & EMAC4_ISR_RXOE)
1876 ++st->rx_fifo_overrun;
1877 if (isr & EMAC_ISR_OVR)
1878 ++st->rx_overrun;
1879 if (isr & EMAC_ISR_BP)
1880 ++st->rx_bad_packet;
1881 if (isr & EMAC_ISR_RP)
1882 ++st->rx_runt_packet;
1883 if (isr & EMAC_ISR_SE)
1884 ++st->rx_short_event;
1885 if (isr & EMAC_ISR_ALE)
1886 ++st->rx_alignment_error;
1887 if (isr & EMAC_ISR_BFCS)
1888 ++st->rx_bad_fcs;
1889 if (isr & EMAC_ISR_PTLE)
1890 ++st->rx_packet_too_long;
1891 if (isr & EMAC_ISR_ORE)
1892 ++st->rx_out_of_range;
1893 if (isr & EMAC_ISR_IRE)
1894 ++st->rx_in_range;
1895 if (isr & EMAC_ISR_SQE)
1896 ++st->tx_sqe;
1897 if (isr & EMAC_ISR_TE)
1898 ++st->tx_errors;
1899
1900 spin_unlock(&dev->lock);
1901
1902 return IRQ_HANDLED;
1903}
1904
1905static struct net_device_stats *emac_stats(struct net_device *ndev)
1906{
1907 struct emac_instance *dev = netdev_priv(ndev);
1908 struct emac_stats *st = &dev->stats;
1909 struct emac_error_stats *est = &dev->estats;
1910 struct net_device_stats *nst = &dev->nstats;
1911 unsigned long flags;
1912
1913 DBG2(dev, "stats" NL);
1914
1915 /* Compute "legacy" statistics */
1916 spin_lock_irqsave(&dev->lock, flags);
1917 nst->rx_packets = (unsigned long)st->rx_packets;
1918 nst->rx_bytes = (unsigned long)st->rx_bytes;
1919 nst->tx_packets = (unsigned long)st->tx_packets;
1920 nst->tx_bytes = (unsigned long)st->tx_bytes;
1921 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1922 est->rx_dropped_error +
1923 est->rx_dropped_resize +
1924 est->rx_dropped_mtu);
1925 nst->tx_dropped = (unsigned long)est->tx_dropped;
1926
1927 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1928 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1929 est->rx_fifo_overrun +
1930 est->rx_overrun);
1931 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1932 est->rx_alignment_error);
1933 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1934 est->rx_bad_fcs);
1935 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1936 est->rx_bd_short_event +
1937 est->rx_bd_packet_too_long +
1938 est->rx_bd_out_of_range +
1939 est->rx_bd_in_range +
1940 est->rx_runt_packet +
1941 est->rx_short_event +
1942 est->rx_packet_too_long +
1943 est->rx_out_of_range +
1944 est->rx_in_range);
1945
1946 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1947 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1948 est->tx_underrun);
1949 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1950 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1951 est->tx_bd_excessive_collisions +
1952 est->tx_bd_late_collision +
1953 est->tx_bd_multple_collisions);
1954 spin_unlock_irqrestore(&dev->lock, flags);
1955 return nst;
1956}
1957
1958static struct mal_commac_ops emac_commac_ops = {
1959 .poll_tx = &emac_poll_tx,
1960 .poll_rx = &emac_poll_rx,
1961 .peek_rx = &emac_peek_rx,
1962 .rxde = &emac_rxde,
1963};
1964
1965static struct mal_commac_ops emac_commac_sg_ops = {
1966 .poll_tx = &emac_poll_tx,
1967 .poll_rx = &emac_poll_rx,
1968 .peek_rx = &emac_peek_rx_sg,
1969 .rxde = &emac_rxde,
1970};
1971
1972/* Ethtool support */
1973static int emac_ethtool_get_settings(struct net_device *ndev,
1974 struct ethtool_cmd *cmd)
1975{
1976 struct emac_instance *dev = netdev_priv(ndev);
1977
1978 cmd->supported = dev->phy.features;
1979 cmd->port = PORT_MII;
1980 cmd->phy_address = dev->phy.address;
1981 cmd->transceiver =
1982 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1983
1984 mutex_lock(&dev->link_lock);
1985 cmd->advertising = dev->phy.advertising;
1986 cmd->autoneg = dev->phy.autoneg;
1987 cmd->speed = dev->phy.speed;
1988 cmd->duplex = dev->phy.duplex;
1989 mutex_unlock(&dev->link_lock);
1990
1991 return 0;
1992}
1993
1994static int emac_ethtool_set_settings(struct net_device *ndev,
1995 struct ethtool_cmd *cmd)
1996{
1997 struct emac_instance *dev = netdev_priv(ndev);
1998 u32 f = dev->phy.features;
1999
2000 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2001 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
2002
2003 /* Basic sanity checks */
2004 if (dev->phy.address < 0)
2005 return -EOPNOTSUPP;
2006 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
2007 return -EINVAL;
2008 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
2009 return -EINVAL;
2010 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
2011 return -EINVAL;
2012
2013 if (cmd->autoneg == AUTONEG_DISABLE) {
2014 switch (cmd->speed) {
2015 case SPEED_10:
8e95a202
JP
2016 if (cmd->duplex == DUPLEX_HALF &&
2017 !(f & SUPPORTED_10baseT_Half))
1d3bb996 2018 return -EINVAL;
8e95a202
JP
2019 if (cmd->duplex == DUPLEX_FULL &&
2020 !(f & SUPPORTED_10baseT_Full))
1d3bb996
DG
2021 return -EINVAL;
2022 break;
2023 case SPEED_100:
8e95a202
JP
2024 if (cmd->duplex == DUPLEX_HALF &&
2025 !(f & SUPPORTED_100baseT_Half))
1d3bb996 2026 return -EINVAL;
8e95a202
JP
2027 if (cmd->duplex == DUPLEX_FULL &&
2028 !(f & SUPPORTED_100baseT_Full))
1d3bb996
DG
2029 return -EINVAL;
2030 break;
2031 case SPEED_1000:
8e95a202
JP
2032 if (cmd->duplex == DUPLEX_HALF &&
2033 !(f & SUPPORTED_1000baseT_Half))
1d3bb996 2034 return -EINVAL;
8e95a202
JP
2035 if (cmd->duplex == DUPLEX_FULL &&
2036 !(f & SUPPORTED_1000baseT_Full))
1d3bb996
DG
2037 return -EINVAL;
2038 break;
2039 default:
2040 return -EINVAL;
2041 }
2042
2043 mutex_lock(&dev->link_lock);
2044 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2045 cmd->duplex);
2046 mutex_unlock(&dev->link_lock);
2047
2048 } else {
2049 if (!(f & SUPPORTED_Autoneg))
2050 return -EINVAL;
2051
2052 mutex_lock(&dev->link_lock);
2053 dev->phy.def->ops->setup_aneg(&dev->phy,
2054 (cmd->advertising & f) |
2055 (dev->phy.advertising &
2056 (ADVERTISED_Pause |
2057 ADVERTISED_Asym_Pause)));
2058 mutex_unlock(&dev->link_lock);
2059 }
2060 emac_force_link_update(dev);
2061
2062 return 0;
2063}
2064
2065static void emac_ethtool_get_ringparam(struct net_device *ndev,
2066 struct ethtool_ringparam *rp)
2067{
2068 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2069 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2070}
2071
2072static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2073 struct ethtool_pauseparam *pp)
2074{
2075 struct emac_instance *dev = netdev_priv(ndev);
2076
2077 mutex_lock(&dev->link_lock);
2078 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2079 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2080 pp->autoneg = 1;
2081
2082 if (dev->phy.duplex == DUPLEX_FULL) {
2083 if (dev->phy.pause)
2084 pp->rx_pause = pp->tx_pause = 1;
2085 else if (dev->phy.asym_pause)
2086 pp->tx_pause = 1;
2087 }
2088 mutex_unlock(&dev->link_lock);
2089}
2090
1d3bb996
DG
2091static int emac_get_regs_len(struct emac_instance *dev)
2092{
2093 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2094 return sizeof(struct emac_ethtool_regs_subhdr) +
05781ccd 2095 EMAC4_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2096 else
2097 return sizeof(struct emac_ethtool_regs_subhdr) +
05781ccd 2098 EMAC_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2099}
2100
2101static int emac_ethtool_get_regs_len(struct net_device *ndev)
2102{
2103 struct emac_instance *dev = netdev_priv(ndev);
2104 int size;
2105
2106 size = sizeof(struct emac_ethtool_regs_hdr) +
2107 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2108 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2109 size += zmii_get_regs_len(dev->zmii_dev);
2110 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2111 size += rgmii_get_regs_len(dev->rgmii_dev);
2112 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2113 size += tah_get_regs_len(dev->tah_dev);
2114
2115 return size;
2116}
2117
2118static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2119{
2120 struct emac_ethtool_regs_subhdr *hdr = buf;
2121
2122 hdr->index = dev->cell_index;
2123 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2124 hdr->version = EMAC4_ETHTOOL_REGS_VER;
05781ccd 2125 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
807540ba 2126 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2127 } else {
2128 hdr->version = EMAC_ETHTOOL_REGS_VER;
05781ccd 2129 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
807540ba 2130 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2131 }
2132}
2133
2134static void emac_ethtool_get_regs(struct net_device *ndev,
2135 struct ethtool_regs *regs, void *buf)
2136{
2137 struct emac_instance *dev = netdev_priv(ndev);
2138 struct emac_ethtool_regs_hdr *hdr = buf;
2139
2140 hdr->components = 0;
2141 buf = hdr + 1;
2142
2143 buf = mal_dump_regs(dev->mal, buf);
2144 buf = emac_dump_regs(dev, buf);
2145 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2146 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2147 buf = zmii_dump_regs(dev->zmii_dev, buf);
2148 }
2149 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2150 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2151 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2152 }
2153 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2154 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2155 buf = tah_dump_regs(dev->tah_dev, buf);
2156 }
2157}
2158
2159static int emac_ethtool_nway_reset(struct net_device *ndev)
2160{
2161 struct emac_instance *dev = netdev_priv(ndev);
2162 int res = 0;
2163
2164 DBG(dev, "nway_reset" NL);
2165
2166 if (dev->phy.address < 0)
2167 return -EOPNOTSUPP;
2168
2169 mutex_lock(&dev->link_lock);
2170 if (!dev->phy.autoneg) {
2171 res = -EINVAL;
2172 goto out;
2173 }
2174
2175 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2176 out:
2177 mutex_unlock(&dev->link_lock);
2178 emac_force_link_update(dev);
2179 return res;
2180}
2181
15f0a394 2182static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
1d3bb996 2183{
15f0a394
BH
2184 if (stringset == ETH_SS_STATS)
2185 return EMAC_ETHTOOL_STATS_COUNT;
2186 else
2187 return -EINVAL;
1d3bb996
DG
2188}
2189
2190static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2191 u8 * buf)
2192{
2193 if (stringset == ETH_SS_STATS)
2194 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2195}
2196
2197static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2198 struct ethtool_stats *estats,
2199 u64 * tmp_stats)
2200{
2201 struct emac_instance *dev = netdev_priv(ndev);
2202
2203 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2204 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2205 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2206}
2207
2208static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2209 struct ethtool_drvinfo *info)
2210{
2211 struct emac_instance *dev = netdev_priv(ndev);
2212
7826d43f
JP
2213 strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2214 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2215 snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2216 dev->cell_index, dev->ofdev->dev.of_node->full_name);
1d3bb996
DG
2217 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2218}
2219
2220static const struct ethtool_ops emac_ethtool_ops = {
2221 .get_settings = emac_ethtool_get_settings,
2222 .set_settings = emac_ethtool_set_settings,
2223 .get_drvinfo = emac_ethtool_get_drvinfo,
2224
2225 .get_regs_len = emac_ethtool_get_regs_len,
2226 .get_regs = emac_ethtool_get_regs,
2227
2228 .nway_reset = emac_ethtool_nway_reset,
2229
2230 .get_ringparam = emac_ethtool_get_ringparam,
2231 .get_pauseparam = emac_ethtool_get_pauseparam,
2232
1d3bb996 2233 .get_strings = emac_ethtool_get_strings,
15f0a394 2234 .get_sset_count = emac_ethtool_get_sset_count,
1d3bb996
DG
2235 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2236
2237 .get_link = ethtool_op_get_link,
1d3bb996
DG
2238};
2239
2240static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2241{
2242 struct emac_instance *dev = netdev_priv(ndev);
0fa0ee05 2243 struct mii_ioctl_data *data = if_mii(rq);
1d3bb996
DG
2244
2245 DBG(dev, "ioctl %08x" NL, cmd);
2246
2247 if (dev->phy.address < 0)
2248 return -EOPNOTSUPP;
2249
2250 switch (cmd) {
2251 case SIOCGMIIPHY:
0fa0ee05 2252 data->phy_id = dev->phy.address;
1d3bb996
DG
2253 /* Fall through */
2254 case SIOCGMIIREG:
0fa0ee05
BH
2255 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2256 data->reg_num);
1d3bb996
DG
2257 return 0;
2258
2259 case SIOCSMIIREG:
0fa0ee05
BH
2260 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2261 data->val_in);
1d3bb996
DG
2262 return 0;
2263 default:
2264 return -EOPNOTSUPP;
2265 }
2266}
2267
2268struct emac_depentry {
2269 u32 phandle;
2270 struct device_node *node;
2dc11581 2271 struct platform_device *ofdev;
1d3bb996
DG
2272 void *drvdata;
2273};
2274
2275#define EMAC_DEP_MAL_IDX 0
2276#define EMAC_DEP_ZMII_IDX 1
2277#define EMAC_DEP_RGMII_IDX 2
2278#define EMAC_DEP_TAH_IDX 3
2279#define EMAC_DEP_MDIO_IDX 4
2280#define EMAC_DEP_PREV_IDX 5
2281#define EMAC_DEP_COUNT 6
2282
fe17dc1e 2283static int emac_check_deps(struct emac_instance *dev,
1dd06ae8 2284 struct emac_depentry *deps)
1d3bb996
DG
2285{
2286 int i, there = 0;
2287 struct device_node *np;
2288
2289 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2290 /* no dependency on that item, allright */
2291 if (deps[i].phandle == 0) {
2292 there++;
2293 continue;
2294 }
2295 /* special case for blist as the dependency might go away */
2296 if (i == EMAC_DEP_PREV_IDX) {
2297 np = *(dev->blist - 1);
2298 if (np == NULL) {
2299 deps[i].phandle = 0;
2300 there++;
2301 continue;
2302 }
2303 if (deps[i].node == NULL)
2304 deps[i].node = of_node_get(np);
2305 }
2306 if (deps[i].node == NULL)
2307 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2308 if (deps[i].node == NULL)
2309 continue;
2310 if (deps[i].ofdev == NULL)
2311 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2312 if (deps[i].ofdev == NULL)
2313 continue;
2314 if (deps[i].drvdata == NULL)
bc353832 2315 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
1d3bb996
DG
2316 if (deps[i].drvdata != NULL)
2317 there++;
2318 }
807540ba 2319 return there == EMAC_DEP_COUNT;
1d3bb996
DG
2320}
2321
2322static void emac_put_deps(struct emac_instance *dev)
2323{
2324 if (dev->mal_dev)
2325 of_dev_put(dev->mal_dev);
2326 if (dev->zmii_dev)
2327 of_dev_put(dev->zmii_dev);
2328 if (dev->rgmii_dev)
2329 of_dev_put(dev->rgmii_dev);
2330 if (dev->mdio_dev)
2331 of_dev_put(dev->mdio_dev);
2332 if (dev->tah_dev)
2333 of_dev_put(dev->tah_dev);
2334}
2335
1dd06ae8
GKH
2336static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2337 void *data)
1d3bb996
DG
2338{
2339 /* We are only intereted in device addition */
2340 if (action == BUS_NOTIFY_BOUND_DRIVER)
2341 wake_up_all(&emac_probe_wait);
2342 return 0;
2343}
2344
fe17dc1e 2345static struct notifier_block emac_of_bus_notifier = {
1d3bb996
DG
2346 .notifier_call = emac_of_bus_notify
2347};
2348
fe17dc1e 2349static int emac_wait_deps(struct emac_instance *dev)
1d3bb996
DG
2350{
2351 struct emac_depentry deps[EMAC_DEP_COUNT];
2352 int i, err;
2353
2354 memset(&deps, 0, sizeof(deps));
2355
2356 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2357 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2358 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2359 if (dev->tah_ph)
2360 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2361 if (dev->mdio_ph)
2362 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2363 if (dev->blist && dev->blist > emac_boot_list)
2364 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
1ab1d63a 2365 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
1d3bb996
DG
2366 wait_event_timeout(emac_probe_wait,
2367 emac_check_deps(dev, deps),
2368 EMAC_PROBE_DEP_TIMEOUT);
1ab1d63a 2369 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
1d3bb996
DG
2370 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2371 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2372 if (deps[i].node)
2373 of_node_put(deps[i].node);
2374 if (err && deps[i].ofdev)
2375 of_dev_put(deps[i].ofdev);
2376 }
2377 if (err == 0) {
2378 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2379 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2380 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2381 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2382 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2383 }
2384 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2385 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2386 return err;
2387}
2388
fe17dc1e 2389static int emac_read_uint_prop(struct device_node *np, const char *name,
1dd06ae8 2390 u32 *val, int fatal)
1d3bb996
DG
2391{
2392 int len;
2393 const u32 *prop = of_get_property(np, name, &len);
2394 if (prop == NULL || len < sizeof(u32)) {
2395 if (fatal)
2396 printk(KERN_ERR "%s: missing %s property\n",
2397 np->full_name, name);
2398 return -ENODEV;
2399 }
2400 *val = *prop;
2401 return 0;
2402}
2403
fe17dc1e 2404static int emac_init_phy(struct emac_instance *dev)
1d3bb996 2405{
61c7a080 2406 struct device_node *np = dev->ofdev->dev.of_node;
1d3bb996
DG
2407 struct net_device *ndev = dev->ndev;
2408 u32 phy_map, adv;
2409 int i;
2410
2411 dev->phy.dev = ndev;
2412 dev->phy.mode = dev->phy_mode;
2413
2414 /* PHY-less configuration.
2415 * XXX I probably should move these settings to the dev tree
2416 */
2417 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2418 emac_reset(dev);
2419
2420 /* PHY-less configuration.
2421 * XXX I probably should move these settings to the dev tree
2422 */
2423 dev->phy.address = -1;
9e3cb294
VG
2424 dev->phy.features = SUPPORTED_MII;
2425 if (emac_phy_supports_gige(dev->phy_mode))
2426 dev->phy.features |= SUPPORTED_1000baseT_Full;
2427 else
2428 dev->phy.features |= SUPPORTED_100baseT_Full;
1d3bb996
DG
2429 dev->phy.pause = 1;
2430
2431 return 0;
2432 }
2433
2434 mutex_lock(&emac_phy_map_lock);
2435 phy_map = dev->phy_map | busy_phy_map;
2436
2437 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2438
2439 dev->phy.mdio_read = emac_mdio_read;
2440 dev->phy.mdio_write = emac_mdio_write;
2441
0925ab5d
VB
2442 /* Enable internal clock source */
2443#ifdef CONFIG_PPC_DCR_NATIVE
2444 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2445 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
11121e30
VB
2446#endif
2447 /* PHY clock workaround */
2448 emac_rx_clk_tx(dev);
2449
2450 /* Enable internal clock source on 440GX*/
2451#ifdef CONFIG_PPC_DCR_NATIVE
2452 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2453 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
0925ab5d 2454#endif
1d3bb996
DG
2455 /* Configure EMAC with defaults so we can at least use MDIO
2456 * This is needed mostly for 440GX
2457 */
2458 if (emac_phy_gpcs(dev->phy.mode)) {
2459 /* XXX
2460 * Make GPCS PHY address equal to EMAC index.
2461 * We probably should take into account busy_phy_map
2462 * and/or phy_map here.
2463 *
2464 * Note that the busy_phy_map is currently global
2465 * while it should probably be per-ASIC...
2466 */
9e3cb294
VG
2467 dev->phy.gpcs_address = dev->gpcs_address;
2468 if (dev->phy.gpcs_address == 0xffffffff)
2469 dev->phy.address = dev->cell_index;
1d3bb996
DG
2470 }
2471
2472 emac_configure(dev);
2473
2474 if (dev->phy_address != 0xffffffff)
2475 phy_map = ~(1 << dev->phy_address);
2476
2477 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2478 if (!(phy_map & 1)) {
2479 int r;
2480 busy_phy_map |= 1 << i;
2481
2482 /* Quick check if there is a PHY at the address */
2483 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2484 if (r == 0xffff || r < 0)
2485 continue;
2486 if (!emac_mii_phy_probe(&dev->phy, i))
2487 break;
2488 }
0925ab5d
VB
2489
2490 /* Enable external clock source */
2491#ifdef CONFIG_PPC_DCR_NATIVE
2492 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2493 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2494#endif
1d3bb996
DG
2495 mutex_unlock(&emac_phy_map_lock);
2496 if (i == 0x20) {
2497 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2498 return -ENXIO;
2499 }
2500
2501 /* Init PHY */
2502 if (dev->phy.def->ops->init)
2503 dev->phy.def->ops->init(&dev->phy);
2504
2505 /* Disable any PHY features not supported by the platform */
2506 dev->phy.def->features &= ~dev->phy_feat_exc;
ae5d3372 2507 dev->phy.features &= ~dev->phy_feat_exc;
1d3bb996
DG
2508
2509 /* Setup initial link parameters */
2510 if (dev->phy.features & SUPPORTED_Autoneg) {
2511 adv = dev->phy.features;
2512 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2513 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2514 /* Restart autonegotiation */
2515 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2516 } else {
2517 u32 f = dev->phy.def->features;
2518 int speed = SPEED_10, fd = DUPLEX_HALF;
2519
2520 /* Select highest supported speed/duplex */
2521 if (f & SUPPORTED_1000baseT_Full) {
2522 speed = SPEED_1000;
2523 fd = DUPLEX_FULL;
2524 } else if (f & SUPPORTED_1000baseT_Half)
2525 speed = SPEED_1000;
2526 else if (f & SUPPORTED_100baseT_Full) {
2527 speed = SPEED_100;
2528 fd = DUPLEX_FULL;
2529 } else if (f & SUPPORTED_100baseT_Half)
2530 speed = SPEED_100;
2531 else if (f & SUPPORTED_10baseT_Full)
2532 fd = DUPLEX_FULL;
2533
2534 /* Force link parameters */
2535 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2536 }
2537 return 0;
2538}
2539
fe17dc1e 2540static int emac_init_config(struct emac_instance *dev)
1d3bb996 2541{
61c7a080 2542 struct device_node *np = dev->ofdev->dev.of_node;
1d3bb996 2543 const void *p;
1d3bb996
DG
2544
2545 /* Read config from device-tree */
2546 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2547 return -ENXIO;
2548 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2549 return -ENXIO;
2550 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2551 return -ENXIO;
2552 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2553 return -ENXIO;
2554 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2555 dev->max_mtu = 1500;
2556 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2557 dev->rx_fifo_size = 2048;
2558 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2559 dev->tx_fifo_size = 2048;
2560 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2561 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2562 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2563 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2564 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2565 dev->phy_address = 0xffffffff;
2566 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2567 dev->phy_map = 0xffffffff;
9e3cb294
VG
2568 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2569 dev->gpcs_address = 0xffffffff;
1d3bb996
DG
2570 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2571 return -ENXIO;
2572 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2573 dev->tah_ph = 0;
2574 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
63b6cad7 2575 dev->tah_port = 0;
1d3bb996
DG
2576 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2577 dev->mdio_ph = 0;
2578 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
a419aef8 2579 dev->zmii_ph = 0;
1d3bb996 2580 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
a419aef8 2581 dev->zmii_port = 0xffffffff;
1d3bb996 2582 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
a419aef8 2583 dev->rgmii_ph = 0;
1d3bb996 2584 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
a419aef8 2585 dev->rgmii_port = 0xffffffff;
1d3bb996
DG
2586 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2587 dev->fifo_entry_size = 16;
2588 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2589 dev->mal_burst_size = 256;
2590
2591 /* PHY mode needs some decoding */
4157ef1b
SG
2592 dev->phy_mode = of_get_phy_mode(np);
2593 if (dev->phy_mode < 0)
2594 dev->phy_mode = PHY_MODE_NA;
1d3bb996
DG
2595
2596 /* Check EMAC version */
05781ccd
GE
2597 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2598 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
9e3cb294
VG
2599 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2600 of_device_is_compatible(np, "ibm,emac-460gt"))
2601 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
9616a755
BH
2602 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2603 of_device_is_compatible(np, "ibm,emac-405exr"))
2604 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
ae5d3372
DD
2605 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2606 dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2607 EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2608 EMAC_FTR_460EX_PHY_CLK_FIX);
2609 }
05781ccd 2610 } else if (of_device_is_compatible(np, "ibm,emac4")) {
1d3bb996 2611 dev->features |= EMAC_FTR_EMAC4;
0925ab5d
VB
2612 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2613 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
11121e30
VB
2614 } else {
2615 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2616 of_device_is_compatible(np, "ibm,emac-440gr"))
2617 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
1ff0fcfc 2618 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
3b3bceef 2619#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
b68d185a 2620 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
1ff0fcfc
JB
2621#else
2622 printk(KERN_ERR "%s: Flow control not disabled!\n",
2623 np->full_name);
2624 return -ENXIO;
2625#endif
2626 }
2627
0925ab5d 2628 }
bff713b5
BH
2629
2630 /* Fixup some feature bits based on the device tree */
2631 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
1d3bb996 2632 dev->features |= EMAC_FTR_STACR_OC_INVERT;
bff713b5
BH
2633 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2634 dev->features |= EMAC_FTR_HAS_NEW_STACR;
1d3bb996 2635
bff713b5
BH
2636 /* CAB lacks the appropriate properties */
2637 if (of_device_is_compatible(np, "ibm,emac-axon"))
2638 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2639 EMAC_FTR_STACR_OC_INVERT;
2640
2641 /* Enable TAH/ZMII/RGMII features as found */
1d3bb996 2642 if (dev->tah_ph != 0) {
3b3bceef 2643#ifdef CONFIG_IBM_EMAC_TAH
1d3bb996
DG
2644 dev->features |= EMAC_FTR_HAS_TAH;
2645#else
2646 printk(KERN_ERR "%s: TAH support not enabled !\n",
2647 np->full_name);
2648 return -ENXIO;
2649#endif
2650 }
2651
2652 if (dev->zmii_ph != 0) {
3b3bceef 2653#ifdef CONFIG_IBM_EMAC_ZMII
1d3bb996
DG
2654 dev->features |= EMAC_FTR_HAS_ZMII;
2655#else
2656 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2657 np->full_name);
2658 return -ENXIO;
2659#endif
2660 }
2661
2662 if (dev->rgmii_ph != 0) {
3b3bceef 2663#ifdef CONFIG_IBM_EMAC_RGMII
1d3bb996
DG
2664 dev->features |= EMAC_FTR_HAS_RGMII;
2665#else
2666 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2667 np->full_name);
2668 return -ENXIO;
2669#endif
2670 }
2671
2672 /* Read MAC-address */
2673 p = of_get_property(np, "local-mac-address", NULL);
2674 if (p == NULL) {
2675 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2676 np->full_name);
2677 return -ENXIO;
2678 }
d458cdf7 2679 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
1d3bb996 2680
05781ccd
GE
2681 /* IAHT and GAHT filter parameterization */
2682 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2683 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2684 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2685 } else {
2686 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2687 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2688 }
2689
1d3bb996
DG
2690 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2691 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2692 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2693 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2694 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2695
2696 return 0;
2697}
2698
15efc02b
AB
2699static const struct net_device_ops emac_netdev_ops = {
2700 .ndo_open = emac_open,
2701 .ndo_stop = emac_close,
2702 .ndo_get_stats = emac_stats,
afc4b13d 2703 .ndo_set_rx_mode = emac_set_multicast_list,
15efc02b
AB
2704 .ndo_do_ioctl = emac_ioctl,
2705 .ndo_tx_timeout = emac_tx_timeout,
2706 .ndo_validate_addr = eth_validate_addr,
2707 .ndo_set_mac_address = eth_mac_addr,
2708 .ndo_start_xmit = emac_start_xmit,
2709 .ndo_change_mtu = eth_change_mtu,
2710};
2711
2712static const struct net_device_ops emac_gige_netdev_ops = {
2713 .ndo_open = emac_open,
2714 .ndo_stop = emac_close,
2715 .ndo_get_stats = emac_stats,
afc4b13d 2716 .ndo_set_rx_mode = emac_set_multicast_list,
15efc02b
AB
2717 .ndo_do_ioctl = emac_ioctl,
2718 .ndo_tx_timeout = emac_tx_timeout,
2719 .ndo_validate_addr = eth_validate_addr,
2720 .ndo_set_mac_address = eth_mac_addr,
2721 .ndo_start_xmit = emac_start_xmit_sg,
2722 .ndo_change_mtu = emac_change_mtu,
2723};
2724
fe17dc1e 2725static int emac_probe(struct platform_device *ofdev)
1d3bb996
DG
2726{
2727 struct net_device *ndev;
2728 struct emac_instance *dev;
61c7a080 2729 struct device_node *np = ofdev->dev.of_node;
1d3bb996
DG
2730 struct device_node **blist = NULL;
2731 int err, i;
2732
be63c09a
JB
2733 /* Skip unused/unwired EMACS. We leave the check for an unused
2734 * property here for now, but new flat device trees should set a
2735 * status property to "disabled" instead.
2736 */
2737 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3d722562
HB
2738 return -ENODEV;
2739
1d3bb996
DG
2740 /* Find ourselves in the bootlist if we are there */
2741 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2742 if (emac_boot_list[i] == np)
2743 blist = &emac_boot_list[i];
2744
2745 /* Allocate our net_device structure */
2746 err = -ENOMEM;
2747 ndev = alloc_etherdev(sizeof(struct emac_instance));
41de8d4c 2748 if (!ndev)
1d3bb996 2749 goto err_gone;
41de8d4c 2750
1d3bb996
DG
2751 dev = netdev_priv(ndev);
2752 dev->ndev = ndev;
2753 dev->ofdev = ofdev;
2754 dev->blist = blist;
1d3bb996
DG
2755 SET_NETDEV_DEV(ndev, &ofdev->dev);
2756
2757 /* Initialize some embedded data structures */
2758 mutex_init(&dev->mdio_lock);
2759 mutex_init(&dev->link_lock);
2760 spin_lock_init(&dev->lock);
2761 INIT_WORK(&dev->reset_work, emac_reset_work);
2762
2763 /* Init various config data based on device-tree */
2764 err = emac_init_config(dev);
2765 if (err != 0)
2766 goto err_free;
2767
2768 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2769 dev->emac_irq = irq_of_parse_and_map(np, 0);
2770 dev->wol_irq = irq_of_parse_and_map(np, 1);
2771 if (dev->emac_irq == NO_IRQ) {
2772 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2773 goto err_free;
2774 }
2775 ndev->irq = dev->emac_irq;
2776
2777 /* Map EMAC regs */
2778 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2779 printk(KERN_ERR "%s: Can't get registers address\n",
2780 np->full_name);
2781 goto err_irq_unmap;
2782 }
2783 // TODO : request_mem_region
05781ccd 2784 dev->emacp = ioremap(dev->rsrc_regs.start,
28f65c11 2785 resource_size(&dev->rsrc_regs));
1d3bb996
DG
2786 if (dev->emacp == NULL) {
2787 printk(KERN_ERR "%s: Can't map device registers!\n",
2788 np->full_name);
2789 err = -ENOMEM;
2790 goto err_irq_unmap;
2791 }
2792
2793 /* Wait for dependent devices */
2794 err = emac_wait_deps(dev);
2795 if (err) {
2796 printk(KERN_ERR
2797 "%s: Timeout waiting for dependent devices\n",
2798 np->full_name);
2799 /* display more info about what's missing ? */
2800 goto err_reg_unmap;
2801 }
bc353832 2802 dev->mal = platform_get_drvdata(dev->mal_dev);
1d3bb996 2803 if (dev->mdio_dev != NULL)
bc353832 2804 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
1d3bb996
DG
2805
2806 /* Register with MAL */
2807 dev->commac.ops = &emac_commac_ops;
2808 dev->commac.dev = dev;
2809 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2810 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2811 err = mal_register_commac(dev->mal, &dev->commac);
2812 if (err) {
2813 printk(KERN_ERR "%s: failed to register with mal %s!\n",
61c7a080 2814 np->full_name, dev->mal_dev->dev.of_node->full_name);
1d3bb996
DG
2815 goto err_rel_deps;
2816 }
2817 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2818 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2819
2820 /* Get pointers to BD rings */
2821 dev->tx_desc =
2822 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2823 dev->rx_desc =
2824 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2825
2826 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2827 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2828
2829 /* Clean rings */
2830 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2831 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
ab9b30cc
SN
2832 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2833 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
1d3bb996
DG
2834
2835 /* Attach to ZMII, if needed */
2836 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2837 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2838 goto err_unreg_commac;
2839
2840 /* Attach to RGMII, if needed */
2841 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2842 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2843 goto err_detach_zmii;
2844
2845 /* Attach to TAH, if needed */
2846 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2847 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2848 goto err_detach_rgmii;
2849
2850 /* Set some link defaults before we can find out real parameters */
2851 dev->phy.speed = SPEED_100;
2852 dev->phy.duplex = DUPLEX_FULL;
2853 dev->phy.autoneg = AUTONEG_DISABLE;
2854 dev->phy.pause = dev->phy.asym_pause = 0;
2855 dev->stop_timeout = STOP_TIMEOUT_100;
2856 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2857
ae5d3372
DD
2858 /* Some SoCs like APM821xx does not support Half Duplex mode. */
2859 if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2860 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2861 SUPPORTED_100baseT_Half |
2862 SUPPORTED_10baseT_Half);
2863 }
2864
1d3bb996
DG
2865 /* Find PHY if any */
2866 err = emac_init_phy(dev);
2867 if (err != 0)
2868 goto err_detach_tah;
2869
5e4011e2
MM
2870 if (dev->tah_dev) {
2871 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2872 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2873 }
1d3bb996 2874 ndev->watchdog_timeo = 5 * HZ;
1d3bb996 2875 if (emac_phy_supports_gige(dev->phy_mode)) {
15efc02b 2876 ndev->netdev_ops = &emac_gige_netdev_ops;
1d3bb996 2877 dev->commac.ops = &emac_commac_sg_ops;
15efc02b
AB
2878 } else
2879 ndev->netdev_ops = &emac_netdev_ops;
1d3bb996
DG
2880 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2881
2882 netif_carrier_off(ndev);
1d3bb996
DG
2883
2884 err = register_netdev(ndev);
2885 if (err) {
2886 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2887 np->full_name, err);
2888 goto err_detach_tah;
2889 }
2890
2891 /* Set our drvdata last as we don't want them visible until we are
2892 * fully initialized
2893 */
2894 wmb();
bc353832 2895 platform_set_drvdata(ofdev, dev);
1d3bb996
DG
2896
2897 /* There's a new kid in town ! Let's tell everybody */
2898 wake_up_all(&emac_probe_wait);
2899
2900
7c510e4b
JB
2901 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2902 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
1d3bb996 2903
9e3cb294
VG
2904 if (dev->phy_mode == PHY_MODE_SGMII)
2905 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2906
1d3bb996
DG
2907 if (dev->phy.address >= 0)
2908 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2909 dev->phy.def->name, dev->phy.address);
2910
2911 emac_dbg_register(dev);
2912
2913 /* Life is good */
2914 return 0;
2915
2916 /* I have a bad feeling about this ... */
2917
2918 err_detach_tah:
2919 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2920 tah_detach(dev->tah_dev, dev->tah_port);
2921 err_detach_rgmii:
2922 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2923 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2924 err_detach_zmii:
2925 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2926 zmii_detach(dev->zmii_dev, dev->zmii_port);
2927 err_unreg_commac:
2928 mal_unregister_commac(dev->mal, &dev->commac);
2929 err_rel_deps:
2930 emac_put_deps(dev);
2931 err_reg_unmap:
2932 iounmap(dev->emacp);
2933 err_irq_unmap:
2934 if (dev->wol_irq != NO_IRQ)
2935 irq_dispose_mapping(dev->wol_irq);
2936 if (dev->emac_irq != NO_IRQ)
2937 irq_dispose_mapping(dev->emac_irq);
2938 err_free:
52933f05 2939 free_netdev(ndev);
1d3bb996
DG
2940 err_gone:
2941 /* if we were on the bootlist, remove us as we won't show up and
2942 * wake up all waiters to notify them in case they were waiting
2943 * on us
2944 */
2945 if (blist) {
2946 *blist = NULL;
2947 wake_up_all(&emac_probe_wait);
2948 }
2949 return err;
2950}
2951
fe17dc1e 2952static int emac_remove(struct platform_device *ofdev)
1d3bb996 2953{
bc353832 2954 struct emac_instance *dev = platform_get_drvdata(ofdev);
1d3bb996
DG
2955
2956 DBG(dev, "remove" NL);
2957
1d3bb996
DG
2958 unregister_netdev(dev->ndev);
2959
23f333a2 2960 cancel_work_sync(&dev->reset_work);
61dbcece 2961
1d3bb996
DG
2962 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2963 tah_detach(dev->tah_dev, dev->tah_port);
2964 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2965 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2966 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2967 zmii_detach(dev->zmii_dev, dev->zmii_port);
2968
d6f14483
WG
2969 busy_phy_map &= ~(1 << dev->phy.address);
2970 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
2971
1d3bb996
DG
2972 mal_unregister_commac(dev->mal, &dev->commac);
2973 emac_put_deps(dev);
2974
2975 emac_dbg_unregister(dev);
2976 iounmap(dev->emacp);
2977
2978 if (dev->wol_irq != NO_IRQ)
2979 irq_dispose_mapping(dev->wol_irq);
2980 if (dev->emac_irq != NO_IRQ)
2981 irq_dispose_mapping(dev->emac_irq);
2982
52933f05 2983 free_netdev(dev->ndev);
1d3bb996
DG
2984
2985 return 0;
2986}
2987
2988/* XXX Features in here should be replaced by properties... */
2989static struct of_device_id emac_match[] =
2990{
2991 {
2992 .type = "network",
2993 .compatible = "ibm,emac",
2994 },
2995 {
2996 .type = "network",
2997 .compatible = "ibm,emac4",
2998 },
05781ccd
GE
2999 {
3000 .type = "network",
3001 .compatible = "ibm,emac4sync",
3002 },
1d3bb996
DG
3003 {},
3004};
e72701ac 3005MODULE_DEVICE_TABLE(of, emac_match);
1d3bb996 3006
74888760 3007static struct platform_driver emac_driver = {
4018294b
GL
3008 .driver = {
3009 .name = "emac",
3010 .owner = THIS_MODULE,
3011 .of_match_table = emac_match,
3012 },
1d3bb996
DG
3013 .probe = emac_probe,
3014 .remove = emac_remove,
3015};
3016
3017static void __init emac_make_bootlist(void)
3018{
3019 struct device_node *np = NULL;
3020 int j, max, i = 0, k;
3021 int cell_indices[EMAC_BOOT_LIST_SIZE];
3022
3023 /* Collect EMACs */
3024 while((np = of_find_all_nodes(np)) != NULL) {
3025 const u32 *idx;
3026
3027 if (of_match_node(emac_match, np) == NULL)
3028 continue;
3029 if (of_get_property(np, "unused", NULL))
3030 continue;
3031 idx = of_get_property(np, "cell-index", NULL);
3032 if (idx == NULL)
3033 continue;
3034 cell_indices[i] = *idx;
3035 emac_boot_list[i++] = of_node_get(np);
3036 if (i >= EMAC_BOOT_LIST_SIZE) {
3037 of_node_put(np);
3038 break;
3039 }
3040 }
3041 max = i;
3042
3043 /* Bubble sort them (doh, what a creative algorithm :-) */
3044 for (i = 0; max > 1 && (i < (max - 1)); i++)
3045 for (j = i; j < max; j++) {
3046 if (cell_indices[i] > cell_indices[j]) {
3047 np = emac_boot_list[i];
3048 emac_boot_list[i] = emac_boot_list[j];
3049 emac_boot_list[j] = np;
3050 k = cell_indices[i];
3051 cell_indices[i] = cell_indices[j];
3052 cell_indices[j] = k;
3053 }
3054 }
3055}
3056
3057static int __init emac_init(void)
3058{
3059 int rc;
3060
3061 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3062
3063 /* Init debug stuff */
3064 emac_init_debug();
3065
3066 /* Build EMAC boot list */
3067 emac_make_bootlist();
3068
3069 /* Init submodules */
3070 rc = mal_init();
3071 if (rc)
3072 goto err;
3073 rc = zmii_init();
3074 if (rc)
3075 goto err_mal;
3076 rc = rgmii_init();
3077 if (rc)
3078 goto err_zmii;
3079 rc = tah_init();
3080 if (rc)
3081 goto err_rgmii;
74888760 3082 rc = platform_driver_register(&emac_driver);
1d3bb996
DG
3083 if (rc)
3084 goto err_tah;
3085
3086 return 0;
3087
3088 err_tah:
3089 tah_exit();
3090 err_rgmii:
3091 rgmii_exit();
3092 err_zmii:
3093 zmii_exit();
3094 err_mal:
3095 mal_exit();
3096 err:
3097 return rc;
3098}
3099
3100static void __exit emac_exit(void)
3101{
3102 int i;
3103
74888760 3104 platform_driver_unregister(&emac_driver);
1d3bb996
DG
3105
3106 tah_exit();
3107 rgmii_exit();
3108 zmii_exit();
3109 mal_exit();
3110 emac_fini_debug();
3111
3112 /* Destroy EMAC boot list */
3113 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3114 if (emac_boot_list[i])
3115 of_node_put(emac_boot_list[i]);
3116}
3117
3118module_init(emac_init);
3119module_exit(emac_exit);