]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/ibm/emac/core.c
Merge remote-tracking branches 'asoc/topic/msm8916', 'asoc/topic/mtk', 'asoc/topic...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / ibm / emac / core.c
CommitLineData
1d3bb996 1/*
3396c782 2 * drivers/net/ethernet/ibm/emac/core.c
1d3bb996
DG
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
17cf803a
BH
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
1d3bb996
DG
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
e72701ac 27#include <linux/module.h>
1d3bb996
DG
28#include <linux/sched.h>
29#include <linux/string.h>
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/pci.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/bitops.h>
40#include <linux/workqueue.h>
283029d1 41#include <linux/of.h>
5af50730
RH
42#include <linux/of_address.h>
43#include <linux/of_irq.h>
4157ef1b 44#include <linux/of_net.h>
a577ca6b 45#include <linux/of_mdio.h>
5a0e3ad6 46#include <linux/slab.h>
1d3bb996
DG
47
48#include <asm/processor.h>
49#include <asm/io.h>
50#include <asm/dma.h>
7c0f6ba6 51#include <linux/uaccess.h>
0925ab5d
VB
52#include <asm/dcr.h>
53#include <asm/dcr-regs.h>
1d3bb996
DG
54
55#include "core.h"
56
57/*
58 * Lack of dma_unmap_???? calls is intentional.
59 *
60 * API-correct usage requires additional support state information to be
61 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
62 * EMAC design (e.g. TX buffer passed from network stack can be split into
63 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
64 * maintaining such information will add additional overhead.
65 * Current DMA API implementation for 4xx processors only ensures cache coherency
66 * and dma_unmap_???? routines are empty and are likely to stay this way.
67 * I decided to omit dma_unmap_??? calls because I don't want to add additional
68 * complexity just for the sake of following some abstract API, when it doesn't
69 * add any real benefit to the driver. I understand that this decision maybe
70 * controversial, but I really tried to make code API-correct and efficient
71 * at the same time and didn't come up with code I liked :(. --ebs
72 */
73
74#define DRV_NAME "emac"
75#define DRV_VERSION "3.54"
76#define DRV_DESC "PPC 4xx OCP EMAC driver"
77
78MODULE_DESCRIPTION(DRV_DESC);
79MODULE_AUTHOR
80 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
81MODULE_LICENSE("GPL");
82
1d3bb996
DG
83/* minimum number of free TX descriptors required to wake up TX process */
84#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85
86/* If packet size is less than this number, we allocate small skb and copy packet
87 * contents into it instead of just sending original big skb up
88 */
3b3bceef 89#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
1d3bb996
DG
90
91/* Since multiple EMACs share MDIO lines in various ways, we need
92 * to avoid re-using the same PHY ID in cases where the arch didn't
93 * setup precise phy_map entries
94 *
95 * XXX This is something that needs to be reworked as we can have multiple
96 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
97 * probably require in that case to have explicit PHY IDs in the device-tree
98 */
99static u32 busy_phy_map;
100static DEFINE_MUTEX(emac_phy_map_lock);
101
102/* This is the wait queue used to wait on any event related to probe, that
103 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 */
105static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106
107/* Having stable interface names is a doomed idea. However, it would be nice
108 * if we didn't have completely random interface names at boot too :-) It's
109 * just a matter of making everybody's life easier. Since we are doing
110 * threaded probing, it's a bit harder though. The base idea here is that
111 * we make up a list of all emacs in the device-tree before we register the
112 * driver. Every emac will then wait for the previous one in the list to
113 * initialize before itself. We should also keep that list ordered by
114 * cell_index.
115 * That list is only 4 entries long, meaning that additional EMACs don't
116 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
117 */
118
119#define EMAC_BOOT_LIST_SIZE 4
120static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121
122/* How long should I wait for dependent devices ? */
123#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124
125/* I don't want to litter system log with timeout errors
126 * when we have brain-damaged PHY.
127 */
128static inline void emac_report_timeout_error(struct emac_instance *dev,
129 const char *error)
130{
11121e30 131 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
9e3cb294 132 EMAC_FTR_460EX_PHY_CLK_FIX |
11121e30
VB
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
61c7a080
GL
136 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
137 error);
1d3bb996
DG
138}
139
11121e30
VB
140/* EMAC PHY clock workaround:
141 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142 * which allows controlling each EMAC clock
143 */
144static inline void emac_rx_clk_tx(struct emac_instance *dev)
145{
146#ifdef CONFIG_PPC_DCR_NATIVE
147 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148 dcri_clrset(SDR0, SDR0_MFR,
149 0, SDR0_MFR_ECS >> dev->cell_index);
150#endif
151}
152
153static inline void emac_rx_clk_default(struct emac_instance *dev)
154{
155#ifdef CONFIG_PPC_DCR_NATIVE
156 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157 dcri_clrset(SDR0, SDR0_MFR,
158 SDR0_MFR_ECS >> dev->cell_index, 0);
159#endif
160}
161
1d3bb996
DG
162/* PHY polling intervals */
163#define PHY_POLL_LINK_ON HZ
164#define PHY_POLL_LINK_OFF (HZ / 5)
165
166/* Graceful stop timeouts in us.
167 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 */
169#define STOP_TIMEOUT_10 1230
170#define STOP_TIMEOUT_100 124
171#define STOP_TIMEOUT_1000 13
172#define STOP_TIMEOUT_1000_JUMBO 73
173
4373c932
PB
174static unsigned char default_mcast_addr[] = {
175 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
176};
177
1d3bb996
DG
178/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
188 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191 "tx_bd_excessive_collisions", "tx_bd_late_collision",
192 "tx_bd_multple_collisions", "tx_bd_single_collision",
193 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
194 "tx_errors"
195};
196
197static irqreturn_t emac_irq(int irq, void *dev_instance);
198static void emac_clean_tx_ring(struct emac_instance *dev);
199static void __emac_set_multicast_list(struct emac_instance *dev);
200
201static inline int emac_phy_supports_gige(int phy_mode)
202{
203 return phy_mode == PHY_MODE_GMII ||
204 phy_mode == PHY_MODE_RGMII ||
9e3cb294 205 phy_mode == PHY_MODE_SGMII ||
1d3bb996
DG
206 phy_mode == PHY_MODE_TBI ||
207 phy_mode == PHY_MODE_RTBI;
208}
209
210static inline int emac_phy_gpcs(int phy_mode)
211{
9e3cb294
VG
212 return phy_mode == PHY_MODE_SGMII ||
213 phy_mode == PHY_MODE_TBI ||
1d3bb996
DG
214 phy_mode == PHY_MODE_RTBI;
215}
216
217static inline void emac_tx_enable(struct emac_instance *dev)
218{
219 struct emac_regs __iomem *p = dev->emacp;
220 u32 r;
221
222 DBG(dev, "tx_enable" NL);
223
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_TXE))
226 out_be32(&p->mr0, r | EMAC_MR0_TXE);
227}
228
229static void emac_tx_disable(struct emac_instance *dev)
230{
231 struct emac_regs __iomem *p = dev->emacp;
232 u32 r;
233
234 DBG(dev, "tx_disable" NL);
235
236 r = in_be32(&p->mr0);
237 if (r & EMAC_MR0_TXE) {
238 int n = dev->stop_timeout;
239 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
240 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
241 udelay(1);
242 --n;
243 }
244 if (unlikely(!n))
245 emac_report_timeout_error(dev, "TX disable timeout");
246 }
247}
248
249static void emac_rx_enable(struct emac_instance *dev)
250{
251 struct emac_regs __iomem *p = dev->emacp;
252 u32 r;
253
254 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
255 goto out;
256
257 DBG(dev, "rx_enable" NL);
258
259 r = in_be32(&p->mr0);
260 if (!(r & EMAC_MR0_RXE)) {
261 if (unlikely(!(r & EMAC_MR0_RXI))) {
262 /* Wait if previous async disable is still in progress */
263 int n = dev->stop_timeout;
264 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
265 udelay(1);
266 --n;
267 }
268 if (unlikely(!n))
269 emac_report_timeout_error(dev,
270 "RX disable timeout");
271 }
272 out_be32(&p->mr0, r | EMAC_MR0_RXE);
273 }
274 out:
275 ;
276}
277
278static void emac_rx_disable(struct emac_instance *dev)
279{
280 struct emac_regs __iomem *p = dev->emacp;
281 u32 r;
282
283 DBG(dev, "rx_disable" NL);
284
285 r = in_be32(&p->mr0);
286 if (r & EMAC_MR0_RXE) {
287 int n = dev->stop_timeout;
288 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
289 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
290 udelay(1);
291 --n;
292 }
293 if (unlikely(!n))
294 emac_report_timeout_error(dev, "RX disable timeout");
295 }
296}
297
298static inline void emac_netif_stop(struct emac_instance *dev)
299{
300 netif_tx_lock_bh(dev->ndev);
e308a5d8 301 netif_addr_lock(dev->ndev);
1d3bb996 302 dev->no_mcast = 1;
e308a5d8 303 netif_addr_unlock(dev->ndev);
1d3bb996 304 netif_tx_unlock_bh(dev->ndev);
860e9538 305 netif_trans_update(dev->ndev); /* prevent tx timeout */
1d3bb996
DG
306 mal_poll_disable(dev->mal, &dev->commac);
307 netif_tx_disable(dev->ndev);
308}
309
310static inline void emac_netif_start(struct emac_instance *dev)
311{
312 netif_tx_lock_bh(dev->ndev);
e308a5d8 313 netif_addr_lock(dev->ndev);
1d3bb996
DG
314 dev->no_mcast = 0;
315 if (dev->mcast_pending && netif_running(dev->ndev))
316 __emac_set_multicast_list(dev);
e308a5d8 317 netif_addr_unlock(dev->ndev);
1d3bb996
DG
318 netif_tx_unlock_bh(dev->ndev);
319
320 netif_wake_queue(dev->ndev);
321
322 /* NOTE: unconditional netif_wake_queue is only appropriate
323 * so long as all callers are assured to have free tx slots
324 * (taken from tg3... though the case where that is wrong is
325 * not terribly harmful)
326 */
327 mal_poll_enable(dev->mal, &dev->commac);
328}
329
330static inline void emac_rx_disable_async(struct emac_instance *dev)
331{
332 struct emac_regs __iomem *p = dev->emacp;
333 u32 r;
334
335 DBG(dev, "rx_disable_async" NL);
336
337 r = in_be32(&p->mr0);
338 if (r & EMAC_MR0_RXE)
339 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
340}
341
342static int emac_reset(struct emac_instance *dev)
343{
344 struct emac_regs __iomem *p = dev->emacp;
345 int n = 20;
19d90ece 346 bool __maybe_unused try_internal_clock = false;
1d3bb996
DG
347
348 DBG(dev, "reset" NL);
349
350 if (!dev->reset_failed) {
351 /* 40x erratum suggests stopping RX channel before reset,
352 * we stop TX as well
353 */
354 emac_rx_disable(dev);
355 emac_tx_disable(dev);
356 }
357
9e3cb294 358#ifdef CONFIG_PPC_DCR_NATIVE
19d90ece 359do_retry:
23fbb5a8
PG
360 /*
361 * PPC460EX/GT Embedded Processor Advanced User's Manual
362 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
363 * Note: The PHY must provide a TX Clk in order to perform a soft reset
364 * of the EMAC. If none is present, select the internal clock
365 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
366 * After a soft reset, select the external clock.
19d90ece
CL
367 *
368 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
369 * ethernet cable is not attached. This causes the reset to timeout
370 * and the PHY detection code in emac_init_phy() is unable to
371 * communicate and detect the AR8035-A PHY. As a result, the emac
372 * driver bails out early and the user has no ethernet.
373 * In order to stay compatible with existing configurations, the
374 * driver will temporarily switch to the internal clock, after
375 * the first reset fails.
23fbb5a8
PG
376 */
377 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
19d90ece
CL
378 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
379 dev->phy_map == 0xffffffff)) {
23fbb5a8
PG
380 /* No PHY: select internal loop clock before reset */
381 dcri_clrset(SDR0, SDR0_ETH_CFG,
382 0, SDR0_ETH_CFG_ECS << dev->cell_index);
383 } else {
384 /* PHY present: select external clock before reset */
385 dcri_clrset(SDR0, SDR0_ETH_CFG,
386 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
387 }
388 }
9e3cb294
VG
389#endif
390
1d3bb996
DG
391 out_be32(&p->mr0, EMAC_MR0_SRST);
392 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
393 --n;
394
9e3cb294 395#ifdef CONFIG_PPC_DCR_NATIVE
23fbb5a8 396 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
19d90ece
CL
397 if (!n && !try_internal_clock) {
398 /* first attempt has timed out. */
399 n = 20;
400 try_internal_clock = true;
401 goto do_retry;
402 }
403
404 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
405 dev->phy_map == 0xffffffff)) {
23fbb5a8
PG
406 /* No PHY: restore external clock source after reset */
407 dcri_clrset(SDR0, SDR0_ETH_CFG,
408 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
409 }
410 }
9e3cb294
VG
411#endif
412
1d3bb996
DG
413 if (n) {
414 dev->reset_failed = 0;
415 return 0;
416 } else {
417 emac_report_timeout_error(dev, "reset timeout");
418 dev->reset_failed = 1;
419 return -ETIMEDOUT;
420 }
421}
422
423static void emac_hash_mc(struct emac_instance *dev)
424{
05781ccd
GE
425 const int regs = EMAC_XAHT_REGS(dev);
426 u32 *gaht_base = emac_gaht_base(dev);
427 u32 gaht_temp[regs];
22bedad3 428 struct netdev_hw_addr *ha;
05781ccd 429 int i;
1d3bb996 430
4cd24eaf 431 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
1d3bb996 432
05781ccd
GE
433 memset(gaht_temp, 0, sizeof (gaht_temp));
434
22bedad3 435 netdev_for_each_mc_addr(ha, dev->ndev) {
05781ccd 436 int slot, reg, mask;
22bedad3 437 DBG2(dev, "mc %pM" NL, ha->addr);
1d3bb996 438
22bedad3
JP
439 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
440 ether_crc(ETH_ALEN, ha->addr));
05781ccd
GE
441 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
442 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
443
444 gaht_temp[reg] |= mask;
1d3bb996 445 }
05781ccd
GE
446
447 for (i = 0; i < regs; i++)
448 out_be32(gaht_base + i, gaht_temp[i]);
1d3bb996
DG
449}
450
451static inline u32 emac_iff2rmr(struct net_device *ndev)
452{
453 struct emac_instance *dev = netdev_priv(ndev);
454 u32 r;
455
456 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
457
458 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
459 r |= EMAC4_RMR_BASE;
460 else
461 r |= EMAC_RMR_BASE;
462
463 if (ndev->flags & IFF_PROMISC)
464 r |= EMAC_RMR_PME;
05781ccd 465 else if (ndev->flags & IFF_ALLMULTI ||
4cd24eaf 466 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
1d3bb996 467 r |= EMAC_RMR_PMME;
4cd24eaf 468 else if (!netdev_mc_empty(ndev))
1d3bb996
DG
469 r |= EMAC_RMR_MAE;
470
ae5d3372
DD
471 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
472 r &= ~EMAC4_RMR_MJS_MASK;
473 r |= EMAC4_RMR_MJS(ndev->mtu);
474 }
475
1d3bb996
DG
476 return r;
477}
478
479static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
480{
481 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
482
483 DBG2(dev, "__emac_calc_base_mr1" NL);
484
485 switch(tx_size) {
486 case 2048:
487 ret |= EMAC_MR1_TFS_2K;
488 break;
489 default:
3201fce0 490 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
1d3bb996
DG
491 dev->ndev->name, tx_size);
492 }
493
494 switch(rx_size) {
495 case 16384:
496 ret |= EMAC_MR1_RFS_16K;
497 break;
498 case 4096:
499 ret |= EMAC_MR1_RFS_4K;
500 break;
501 default:
502 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
503 dev->ndev->name, rx_size);
504 }
505
506 return ret;
507}
508
509static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
510{
511 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
4696c3c4 512 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
1d3bb996
DG
513
514 DBG2(dev, "__emac4_calc_base_mr1" NL);
515
516 switch(tx_size) {
3201fce0
DM
517 case 16384:
518 ret |= EMAC4_MR1_TFS_16K;
519 break;
1d3bb996
DG
520 case 4096:
521 ret |= EMAC4_MR1_TFS_4K;
522 break;
523 case 2048:
524 ret |= EMAC4_MR1_TFS_2K;
525 break;
526 default:
3201fce0 527 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
1d3bb996
DG
528 dev->ndev->name, tx_size);
529 }
530
531 switch(rx_size) {
532 case 16384:
533 ret |= EMAC4_MR1_RFS_16K;
534 break;
535 case 4096:
536 ret |= EMAC4_MR1_RFS_4K;
537 break;
538 case 2048:
539 ret |= EMAC4_MR1_RFS_2K;
540 break;
541 default:
542 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
543 dev->ndev->name, rx_size);
544 }
545
546 return ret;
547}
548
549static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
550{
551 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
552 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
553 __emac_calc_base_mr1(dev, tx_size, rx_size);
554}
555
556static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
557{
558 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
559 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
560 else
561 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
562}
563
564static inline u32 emac_calc_rwmr(struct emac_instance *dev,
565 unsigned int low, unsigned int high)
566{
567 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
568 return (low << 22) | ( (high & 0x3ff) << 6);
569 else
570 return (low << 23) | ( (high & 0x1ff) << 7);
571}
572
573static int emac_configure(struct emac_instance *dev)
574{
575 struct emac_regs __iomem *p = dev->emacp;
576 struct net_device *ndev = dev->ndev;
911b237d 577 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
1d3bb996
DG
578 u32 r, mr1 = 0;
579
580 DBG(dev, "configure" NL);
581
911b237d
BH
582 if (!link) {
583 out_be32(&p->mr1, in_be32(&p->mr1)
584 | EMAC_MR1_FDE | EMAC_MR1_ILE);
585 udelay(100);
586 } else if (emac_reset(dev) < 0)
1d3bb996
DG
587 return -ETIMEDOUT;
588
589 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
590 tah_reset(dev->tah_dev);
591
911b237d
BH
592 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
593 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
1d3bb996
DG
594
595 /* Default fifo sizes */
596 tx_size = dev->tx_fifo_size;
597 rx_size = dev->rx_fifo_size;
598
911b237d
BH
599 /* No link, force loopback */
600 if (!link)
601 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
602
1d3bb996 603 /* Check for full duplex */
911b237d 604 else if (dev->phy.duplex == DUPLEX_FULL)
1d3bb996
DG
605 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
606
607 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
608 dev->stop_timeout = STOP_TIMEOUT_10;
609 switch (dev->phy.speed) {
610 case SPEED_1000:
611 if (emac_phy_gpcs(dev->phy.mode)) {
9e3cb294
VG
612 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
613 (dev->phy.gpcs_address != 0xffffffff) ?
614 dev->phy.gpcs_address : dev->phy.address);
1d3bb996
DG
615
616 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
617 * identify this GPCS PHY later.
618 */
05781ccd 619 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
1d3bb996
DG
620 } else
621 mr1 |= EMAC_MR1_MF_1000;
622
623 /* Extended fifo sizes */
624 tx_size = dev->tx_fifo_size_gige;
625 rx_size = dev->rx_fifo_size_gige;
626
627 if (dev->ndev->mtu > ETH_DATA_LEN) {
f34ebab6
SR
628 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
629 mr1 |= EMAC4_MR1_JPSM;
630 else
631 mr1 |= EMAC_MR1_JPSM;
1d3bb996
DG
632 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
633 } else
634 dev->stop_timeout = STOP_TIMEOUT_1000;
635 break;
636 case SPEED_100:
637 mr1 |= EMAC_MR1_MF_100;
638 dev->stop_timeout = STOP_TIMEOUT_100;
639 break;
640 default: /* make gcc happy */
641 break;
642 }
643
644 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
645 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
646 dev->phy.speed);
647 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
648 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
649
650 /* on 40x erratum forces us to NOT use integrated flow control,
651 * let's hope it works on 44x ;)
652 */
653 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
654 dev->phy.duplex == DUPLEX_FULL) {
655 if (dev->phy.pause)
656 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
657 else if (dev->phy.asym_pause)
658 mr1 |= EMAC_MR1_APP;
659 }
660
661 /* Add base settings & fifo sizes & program MR1 */
662 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
663 out_be32(&p->mr1, mr1);
664
665 /* Set individual MAC address */
666 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
667 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
668 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
669 ndev->dev_addr[5]);
670
671 /* VLAN Tag Protocol ID */
672 out_be32(&p->vtpid, 0x8100);
673
674 /* Receive mode register */
675 r = emac_iff2rmr(ndev);
676 if (r & EMAC_RMR_MAE)
677 emac_hash_mc(dev);
678 out_be32(&p->rmr, r);
679
680 /* FIFOs thresholds */
681 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
682 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
683 tx_size / 2 / dev->fifo_entry_size);
684 else
685 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
686 tx_size / 2 / dev->fifo_entry_size);
687 out_be32(&p->tmr1, r);
688 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
689
690 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
691 there should be still enough space in FIFO to allow the our link
692 partner time to process this frame and also time to send PAUSE
693 frame itself.
694
695 Here is the worst case scenario for the RX FIFO "headroom"
696 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
697
698 1) One maximum-length frame on TX 1522 bytes
699 2) One PAUSE frame time 64 bytes
700 3) PAUSE frame decode time allowance 64 bytes
701 4) One maximum-length frame on RX 1522 bytes
702 5) Round-trip propagation delay of the link (100Mb) 15 bytes
703 ----------
704 3187 bytes
705
706 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
707 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
708 */
709 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
710 rx_size / 4 / dev->fifo_entry_size);
711 out_be32(&p->rwmr, r);
712
713 /* Set PAUSE timer to the maximum */
714 out_be32(&p->ptr, 0xffff);
715
716 /* IRQ sources */
717 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
718 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
719 EMAC_ISR_IRE | EMAC_ISR_TE;
720 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
721 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
722 EMAC4_ISR_RXOE | */;
723 out_be32(&p->iser, r);
724
725 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
9e3cb294
VG
726 if (emac_phy_gpcs(dev->phy.mode)) {
727 if (dev->phy.gpcs_address != 0xffffffff)
728 emac_mii_reset_gpcs(&dev->phy);
729 else
730 emac_mii_reset_phy(&dev->phy);
731 }
1d3bb996
DG
732
733 return 0;
734}
735
736static void emac_reinitialize(struct emac_instance *dev)
737{
738 DBG(dev, "reinitialize" NL);
739
740 emac_netif_stop(dev);
741 if (!emac_configure(dev)) {
742 emac_tx_enable(dev);
743 emac_rx_enable(dev);
744 }
745 emac_netif_start(dev);
746}
747
748static void emac_full_tx_reset(struct emac_instance *dev)
749{
750 DBG(dev, "full_tx_reset" NL);
751
752 emac_tx_disable(dev);
753 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
754 emac_clean_tx_ring(dev);
755 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
756
757 emac_configure(dev);
758
759 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
760 emac_tx_enable(dev);
761 emac_rx_enable(dev);
762}
763
764static void emac_reset_work(struct work_struct *work)
765{
766 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
767
768 DBG(dev, "reset_work" NL);
769
770 mutex_lock(&dev->link_lock);
61dbcece
BH
771 if (dev->opened) {
772 emac_netif_stop(dev);
773 emac_full_tx_reset(dev);
774 emac_netif_start(dev);
775 }
1d3bb996
DG
776 mutex_unlock(&dev->link_lock);
777}
778
779static void emac_tx_timeout(struct net_device *ndev)
780{
781 struct emac_instance *dev = netdev_priv(ndev);
782
783 DBG(dev, "tx_timeout" NL);
784
785 schedule_work(&dev->reset_work);
786}
787
788
789static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
790{
791 int done = !!(stacr & EMAC_STACR_OC);
792
793 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
794 done = !done;
795
796 return done;
797};
798
799static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
800{
801 struct emac_regs __iomem *p = dev->emacp;
802 u32 r = 0;
803 int n, err = -ETIMEDOUT;
804
805 mutex_lock(&dev->mdio_lock);
806
807 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
808
809 /* Enable proper MDIO port */
810 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
811 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
812 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
813 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
814
815 /* Wait for management interface to become idle */
cca87c18 816 n = 20;
1d3bb996
DG
817 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
818 udelay(1);
819 if (!--n) {
820 DBG2(dev, " -> timeout wait idle\n");
821 goto bail;
822 }
823 }
824
825 /* Issue read command */
826 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
827 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
828 else
829 r = EMAC_STACR_BASE(dev->opb_bus_freq);
830 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
831 r |= EMAC_STACR_OC;
bff713b5 832 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
833 r |= EMACX_STACR_STAC_READ;
834 else
835 r |= EMAC_STACR_STAC_READ;
836 r |= (reg & EMAC_STACR_PRA_MASK)
837 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
838 out_be32(&p->stacr, r);
839
840 /* Wait for read to complete */
cca87c18 841 n = 200;
1d3bb996
DG
842 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
843 udelay(1);
844 if (!--n) {
845 DBG2(dev, " -> timeout wait complete\n");
846 goto bail;
847 }
848 }
849
850 if (unlikely(r & EMAC_STACR_PHYE)) {
851 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
852 err = -EREMOTEIO;
853 goto bail;
854 }
855
856 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
857
858 DBG2(dev, "mdio_read -> %04x" NL, r);
859 err = 0;
860 bail:
861 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
862 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
863 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
864 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
865 mutex_unlock(&dev->mdio_lock);
866
867 return err == 0 ? r : err;
868}
869
870static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
871 u16 val)
872{
873 struct emac_regs __iomem *p = dev->emacp;
874 u32 r = 0;
875 int n, err = -ETIMEDOUT;
876
877 mutex_lock(&dev->mdio_lock);
878
879 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
880
881 /* Enable proper MDIO port */
882 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
883 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
884 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
885 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
886
887 /* Wait for management interface to be idle */
cca87c18 888 n = 20;
1d3bb996
DG
889 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
890 udelay(1);
891 if (!--n) {
892 DBG2(dev, " -> timeout wait idle\n");
893 goto bail;
894 }
895 }
896
897 /* Issue write command */
898 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
899 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
900 else
901 r = EMAC_STACR_BASE(dev->opb_bus_freq);
902 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
903 r |= EMAC_STACR_OC;
bff713b5 904 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
905 r |= EMACX_STACR_STAC_WRITE;
906 else
907 r |= EMAC_STACR_STAC_WRITE;
908 r |= (reg & EMAC_STACR_PRA_MASK) |
909 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
910 (val << EMAC_STACR_PHYD_SHIFT);
911 out_be32(&p->stacr, r);
912
913 /* Wait for write to complete */
cca87c18 914 n = 200;
1d3bb996
DG
915 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
916 udelay(1);
917 if (!--n) {
918 DBG2(dev, " -> timeout wait complete\n");
919 goto bail;
920 }
921 }
922 err = 0;
923 bail:
924 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
925 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
926 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
927 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
928 mutex_unlock(&dev->mdio_lock);
929}
930
931static int emac_mdio_read(struct net_device *ndev, int id, int reg)
932{
933 struct emac_instance *dev = netdev_priv(ndev);
934 int res;
935
9e3cb294
VG
936 res = __emac_mdio_read((dev->mdio_instance &&
937 dev->phy.gpcs_address != id) ?
938 dev->mdio_instance : dev,
1d3bb996
DG
939 (u8) id, (u8) reg);
940 return res;
941}
942
943static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
944{
945 struct emac_instance *dev = netdev_priv(ndev);
946
9e3cb294
VG
947 __emac_mdio_write((dev->mdio_instance &&
948 dev->phy.gpcs_address != id) ?
949 dev->mdio_instance : dev,
1d3bb996
DG
950 (u8) id, (u8) reg, (u16) val);
951}
952
953/* Tx lock BH */
954static void __emac_set_multicast_list(struct emac_instance *dev)
955{
956 struct emac_regs __iomem *p = dev->emacp;
957 u32 rmr = emac_iff2rmr(dev->ndev);
958
959 DBG(dev, "__multicast %08x" NL, rmr);
960
961 /* I decided to relax register access rules here to avoid
962 * full EMAC reset.
963 *
964 * There is a real problem with EMAC4 core if we use MWSW_001 bit
965 * in MR1 register and do a full EMAC reset.
966 * One TX BD status update is delayed and, after EMAC reset, it
967 * never happens, resulting in TX hung (it'll be recovered by TX
968 * timeout handler eventually, but this is just gross).
969 * So we either have to do full TX reset or try to cheat here :)
970 *
971 * The only required change is to RX mode register, so I *think* all
972 * we need is just to stop RX channel. This seems to work on all
973 * tested SoCs. --ebs
974 *
975 * If we need the full reset, we might just trigger the workqueue
976 * and do it async... a bit nasty but should work --BenH
977 */
978 dev->mcast_pending = 0;
979 emac_rx_disable(dev);
980 if (rmr & EMAC_RMR_MAE)
981 emac_hash_mc(dev);
982 out_be32(&p->rmr, rmr);
983 emac_rx_enable(dev);
984}
985
986/* Tx lock BH */
987static void emac_set_multicast_list(struct net_device *ndev)
988{
989 struct emac_instance *dev = netdev_priv(ndev);
990
991 DBG(dev, "multicast" NL);
992
993 BUG_ON(!netif_running(dev->ndev));
994
995 if (dev->no_mcast) {
996 dev->mcast_pending = 1;
997 return;
998 }
7106a069
IM
999
1000 mutex_lock(&dev->link_lock);
1d3bb996 1001 __emac_set_multicast_list(dev);
7106a069 1002 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1003}
1004
01afd972
IM
1005static int emac_set_mac_address(struct net_device *ndev, void *sa)
1006{
1007 struct emac_instance *dev = netdev_priv(ndev);
1008 struct sockaddr *addr = sa;
1009 struct emac_regs __iomem *p = dev->emacp;
1010
1011 if (!is_valid_ether_addr(addr->sa_data))
1012 return -EADDRNOTAVAIL;
1013
1014 mutex_lock(&dev->link_lock);
1015
1016 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1017
1018 emac_rx_disable(dev);
1019 emac_tx_disable(dev);
1020 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1021 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1022 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1023 ndev->dev_addr[5]);
1024 emac_tx_enable(dev);
1025 emac_rx_enable(dev);
1026
1027 mutex_unlock(&dev->link_lock);
1028
1029 return 0;
1d3bb996
DG
1030}
1031
1032static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1033{
1034 int rx_sync_size = emac_rx_sync_size(new_mtu);
1035 int rx_skb_size = emac_rx_skb_size(new_mtu);
1036 int i, ret = 0;
ae5d3372 1037 int mr1_jumbo_bit_change = 0;
1d3bb996
DG
1038
1039 mutex_lock(&dev->link_lock);
1040 emac_netif_stop(dev);
1041 emac_rx_disable(dev);
1042 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1043
1044 if (dev->rx_sg_skb) {
1045 ++dev->estats.rx_dropped_resize;
1046 dev_kfree_skb(dev->rx_sg_skb);
1047 dev->rx_sg_skb = NULL;
1048 }
1049
1050 /* Make a first pass over RX ring and mark BDs ready, dropping
1051 * non-processed packets on the way. We need this as a separate pass
1052 * to simplify error recovery in the case of allocation failure later.
1053 */
1054 for (i = 0; i < NUM_RX_BUFF; ++i) {
1055 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1056 ++dev->estats.rx_dropped_resize;
1057
1058 dev->rx_desc[i].data_len = 0;
1059 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1060 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1061 }
1062
1063 /* Reallocate RX ring only if bigger skb buffers are required */
1064 if (rx_skb_size <= dev->rx_skb_size)
1065 goto skip;
1066
1067 /* Second pass, allocate new skbs */
1068 for (i = 0; i < NUM_RX_BUFF; ++i) {
1069 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1070 if (!skb) {
1071 ret = -ENOMEM;
1072 goto oom;
1073 }
1074
1075 BUG_ON(!dev->rx_skb[i]);
1076 dev_kfree_skb(dev->rx_skb[i]);
1077
1078 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1079 dev->rx_desc[i].data_ptr =
1080 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1081 DMA_FROM_DEVICE) + 2;
1082 dev->rx_skb[i] = skb;
1083 }
1084 skip:
1085 /* Check if we need to change "Jumbo" bit in MR1 */
ae5d3372
DD
1086 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1087 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1088 (dev->ndev->mtu > ETH_DATA_LEN);
1089 } else {
1090 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1091 (dev->ndev->mtu > ETH_DATA_LEN);
1092 }
1093
1094 if (mr1_jumbo_bit_change) {
1d3bb996
DG
1095 /* This is to prevent starting RX channel in emac_rx_enable() */
1096 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1097
1098 dev->ndev->mtu = new_mtu;
1099 emac_full_tx_reset(dev);
1100 }
1101
1102 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1103 oom:
1104 /* Restart RX */
1105 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1106 dev->rx_slot = 0;
1107 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1108 emac_rx_enable(dev);
1109 emac_netif_start(dev);
1110 mutex_unlock(&dev->link_lock);
1111
1112 return ret;
1113}
1114
1115/* Process ctx, rtnl_lock semaphore */
1116static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1117{
1118 struct emac_instance *dev = netdev_priv(ndev);
1119 int ret = 0;
1120
1d3bb996
DG
1121 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1122
1123 if (netif_running(ndev)) {
421f91d2 1124 /* Check if we really need to reinitialize RX ring */
1d3bb996
DG
1125 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1126 ret = emac_resize_rx_ring(dev, new_mtu);
1127 }
1128
1129 if (!ret) {
1130 ndev->mtu = new_mtu;
1131 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1132 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1133 }
1134
1135 return ret;
1136}
1137
1138static void emac_clean_tx_ring(struct emac_instance *dev)
1139{
1140 int i;
1141
1142 for (i = 0; i < NUM_TX_BUFF; ++i) {
1143 if (dev->tx_skb[i]) {
1144 dev_kfree_skb(dev->tx_skb[i]);
1145 dev->tx_skb[i] = NULL;
1146 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1147 ++dev->estats.tx_dropped;
1148 }
1149 dev->tx_desc[i].ctrl = 0;
1150 dev->tx_desc[i].data_ptr = 0;
1151 }
1152}
1153
1154static void emac_clean_rx_ring(struct emac_instance *dev)
1155{
1156 int i;
1157
1158 for (i = 0; i < NUM_RX_BUFF; ++i)
1159 if (dev->rx_skb[i]) {
1160 dev->rx_desc[i].ctrl = 0;
1161 dev_kfree_skb(dev->rx_skb[i]);
1162 dev->rx_skb[i] = NULL;
1163 dev->rx_desc[i].data_ptr = 0;
1164 }
1165
1166 if (dev->rx_sg_skb) {
1167 dev_kfree_skb(dev->rx_sg_skb);
1168 dev->rx_sg_skb = NULL;
1169 }
1170}
1171
1172static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1173 gfp_t flags)
1174{
1175 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1176 if (unlikely(!skb))
1177 return -ENOMEM;
1178
1179 dev->rx_skb[slot] = skb;
1180 dev->rx_desc[slot].data_len = 0;
1181
1182 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1183 dev->rx_desc[slot].data_ptr =
1184 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1185 DMA_FROM_DEVICE) + 2;
1186 wmb();
1187 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1188 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1189
1190 return 0;
1191}
1192
1193static void emac_print_link_status(struct emac_instance *dev)
1194{
1195 if (netif_carrier_ok(dev->ndev))
1196 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1197 dev->ndev->name, dev->phy.speed,
1198 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1199 dev->phy.pause ? ", pause enabled" :
1200 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1201 else
1202 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1203}
1204
1205/* Process ctx, rtnl_lock semaphore */
1206static int emac_open(struct net_device *ndev)
1207{
1208 struct emac_instance *dev = netdev_priv(ndev);
1209 int err, i;
1210
1211 DBG(dev, "open" NL);
1212
1213 /* Setup error IRQ handler */
1214 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1215 if (err) {
1216 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1217 ndev->name, dev->emac_irq);
1218 return err;
1219 }
1220
1221 /* Allocate RX ring */
1222 for (i = 0; i < NUM_RX_BUFF; ++i)
1223 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1224 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1225 ndev->name);
1226 goto oom;
1227 }
1228
1229 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1230 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1231 dev->rx_sg_skb = NULL;
1232
1233 mutex_lock(&dev->link_lock);
61dbcece 1234 dev->opened = 1;
1d3bb996 1235
61dbcece 1236 /* Start PHY polling now.
1d3bb996
DG
1237 */
1238 if (dev->phy.address >= 0) {
1239 int link_poll_interval;
1240 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1241 dev->phy.def->ops->read_link(&dev->phy);
11121e30 1242 emac_rx_clk_default(dev);
1d3bb996
DG
1243 netif_carrier_on(dev->ndev);
1244 link_poll_interval = PHY_POLL_LINK_ON;
1245 } else {
11121e30 1246 emac_rx_clk_tx(dev);
1d3bb996
DG
1247 netif_carrier_off(dev->ndev);
1248 link_poll_interval = PHY_POLL_LINK_OFF;
1249 }
1250 dev->link_polling = 1;
1251 wmb();
1252 schedule_delayed_work(&dev->link_work, link_poll_interval);
1253 emac_print_link_status(dev);
1254 } else
1255 netif_carrier_on(dev->ndev);
1256
e8296582 1257 /* Required for Pause packet support in EMAC */
22bedad3 1258 dev_mc_add_global(ndev, default_mcast_addr);
e8296582 1259
1d3bb996
DG
1260 emac_configure(dev);
1261 mal_poll_add(dev->mal, &dev->commac);
1262 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1263 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1264 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1265 emac_tx_enable(dev);
1266 emac_rx_enable(dev);
1267 emac_netif_start(dev);
1268
1269 mutex_unlock(&dev->link_lock);
1270
1271 return 0;
1272 oom:
1273 emac_clean_rx_ring(dev);
1274 free_irq(dev->emac_irq, dev);
1275
1276 return -ENOMEM;
1277}
1278
1279/* BHs disabled */
1280#if 0
1281static int emac_link_differs(struct emac_instance *dev)
1282{
1283 u32 r = in_be32(&dev->emacp->mr1);
1284
1285 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1286 int speed, pause, asym_pause;
1287
1288 if (r & EMAC_MR1_MF_1000)
1289 speed = SPEED_1000;
1290 else if (r & EMAC_MR1_MF_100)
1291 speed = SPEED_100;
1292 else
1293 speed = SPEED_10;
1294
1295 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1296 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1297 pause = 1;
1298 asym_pause = 0;
1299 break;
1300 case EMAC_MR1_APP:
1301 pause = 0;
1302 asym_pause = 1;
1303 break;
1304 default:
1305 pause = asym_pause = 0;
1306 }
1307 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1308 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1309}
1310#endif
1311
1312static void emac_link_timer(struct work_struct *work)
1313{
1314 struct emac_instance *dev =
10fd9d7e 1315 container_of(to_delayed_work(work),
1d3bb996
DG
1316 struct emac_instance, link_work);
1317 int link_poll_interval;
1318
1319 mutex_lock(&dev->link_lock);
1d3bb996
DG
1320 DBG2(dev, "link timer" NL);
1321
61dbcece
BH
1322 if (!dev->opened)
1323 goto bail;
1324
1d3bb996
DG
1325 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1326 if (!netif_carrier_ok(dev->ndev)) {
11121e30 1327 emac_rx_clk_default(dev);
1d3bb996
DG
1328 /* Get new link parameters */
1329 dev->phy.def->ops->read_link(&dev->phy);
1330
1331 netif_carrier_on(dev->ndev);
1332 emac_netif_stop(dev);
1333 emac_full_tx_reset(dev);
1334 emac_netif_start(dev);
1335 emac_print_link_status(dev);
1336 }
1337 link_poll_interval = PHY_POLL_LINK_ON;
1338 } else {
1339 if (netif_carrier_ok(dev->ndev)) {
11121e30 1340 emac_rx_clk_tx(dev);
1d3bb996
DG
1341 netif_carrier_off(dev->ndev);
1342 netif_tx_disable(dev->ndev);
911b237d 1343 emac_reinitialize(dev);
1d3bb996
DG
1344 emac_print_link_status(dev);
1345 }
1346 link_poll_interval = PHY_POLL_LINK_OFF;
1347 }
1348 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1349 bail:
1d3bb996
DG
1350 mutex_unlock(&dev->link_lock);
1351}
1352
1353static void emac_force_link_update(struct emac_instance *dev)
1354{
1355 netif_carrier_off(dev->ndev);
61dbcece 1356 smp_rmb();
1d3bb996 1357 if (dev->link_polling) {
afe2c511 1358 cancel_delayed_work_sync(&dev->link_work);
1d3bb996
DG
1359 if (dev->link_polling)
1360 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1361 }
1362}
1363
1364/* Process ctx, rtnl_lock semaphore */
1365static int emac_close(struct net_device *ndev)
1366{
1367 struct emac_instance *dev = netdev_priv(ndev);
1368
1369 DBG(dev, "close" NL);
1370
61dbcece
BH
1371 if (dev->phy.address >= 0) {
1372 dev->link_polling = 0;
afe2c511 1373 cancel_delayed_work_sync(&dev->link_work);
61dbcece
BH
1374 }
1375 mutex_lock(&dev->link_lock);
1d3bb996 1376 emac_netif_stop(dev);
61dbcece
BH
1377 dev->opened = 0;
1378 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1379
1380 emac_rx_disable(dev);
1381 emac_tx_disable(dev);
1382 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1383 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1384 mal_poll_del(dev->mal, &dev->commac);
1385
1386 emac_clean_tx_ring(dev);
1387 emac_clean_rx_ring(dev);
1388
1389 free_irq(dev->emac_irq, dev);
1390
d3b325f9
PG
1391 netif_carrier_off(ndev);
1392
1d3bb996
DG
1393 return 0;
1394}
1395
1396static inline u16 emac_tx_csum(struct emac_instance *dev,
1397 struct sk_buff *skb)
1398{
e66f4168
VB
1399 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1400 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1d3bb996
DG
1401 ++dev->stats.tx_packets_csum;
1402 return EMAC_TX_CTRL_TAH_CSUM;
1403 }
1404 return 0;
1405}
1406
1407static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1408{
1409 struct emac_regs __iomem *p = dev->emacp;
1410 struct net_device *ndev = dev->ndev;
1411
1412 /* Send the packet out. If the if makes a significant perf
1413 * difference, then we can store the TMR0 value in "dev"
1414 * instead
1415 */
1416 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1417 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1418 else
1419 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1420
1421 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1422 netif_stop_queue(ndev);
1423 DBG2(dev, "stopped TX queue" NL);
1424 }
1425
860e9538 1426 netif_trans_update(ndev);
1d3bb996
DG
1427 ++dev->stats.tx_packets;
1428 dev->stats.tx_bytes += len;
1429
ec634fe3 1430 return NETDEV_TX_OK;
1d3bb996
DG
1431}
1432
1433/* Tx lock BH */
1434static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1435{
1436 struct emac_instance *dev = netdev_priv(ndev);
1437 unsigned int len = skb->len;
1438 int slot;
1439
1440 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1441 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1442
1443 slot = dev->tx_slot++;
1444 if (dev->tx_slot == NUM_TX_BUFF) {
1445 dev->tx_slot = 0;
1446 ctrl |= MAL_TX_CTRL_WRAP;
1447 }
1448
1449 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1450
1451 dev->tx_skb[slot] = skb;
1452 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1453 skb->data, len,
1454 DMA_TO_DEVICE);
1455 dev->tx_desc[slot].data_len = (u16) len;
1456 wmb();
1457 dev->tx_desc[slot].ctrl = ctrl;
1458
1459 return emac_xmit_finish(dev, len);
1460}
1461
1d3bb996
DG
1462static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1463 u32 pd, int len, int last, u16 base_ctrl)
1464{
1465 while (1) {
1466 u16 ctrl = base_ctrl;
1467 int chunk = min(len, MAL_MAX_TX_SIZE);
1468 len -= chunk;
1469
1470 slot = (slot + 1) % NUM_TX_BUFF;
1471
1472 if (last && !len)
1473 ctrl |= MAL_TX_CTRL_LAST;
1474 if (slot == NUM_TX_BUFF - 1)
1475 ctrl |= MAL_TX_CTRL_WRAP;
1476
1477 dev->tx_skb[slot] = NULL;
1478 dev->tx_desc[slot].data_ptr = pd;
1479 dev->tx_desc[slot].data_len = (u16) chunk;
1480 dev->tx_desc[slot].ctrl = ctrl;
1481 ++dev->tx_cnt;
1482
1483 if (!len)
1484 break;
1485
1486 pd += chunk;
1487 }
1488 return slot;
1489}
1490
1491/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1492static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1493{
1494 struct emac_instance *dev = netdev_priv(ndev);
1495 int nr_frags = skb_shinfo(skb)->nr_frags;
1496 int len = skb->len, chunk;
1497 int slot, i;
1498 u16 ctrl;
1499 u32 pd;
1500
1501 /* This is common "fast" path */
1502 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1503 return emac_start_xmit(skb, ndev);
1504
1505 len -= skb->data_len;
1506
1507 /* Note, this is only an *estimation*, we can still run out of empty
1508 * slots because of the additional fragmentation into
1509 * MAL_MAX_TX_SIZE-sized chunks
1510 */
1511 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1512 goto stop_queue;
1513
1514 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1515 emac_tx_csum(dev, skb);
1516 slot = dev->tx_slot;
1517
1518 /* skb data */
1519 dev->tx_skb[slot] = NULL;
1520 chunk = min(len, MAL_MAX_TX_SIZE);
1521 dev->tx_desc[slot].data_ptr = pd =
1522 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1523 dev->tx_desc[slot].data_len = (u16) chunk;
1524 len -= chunk;
1525 if (unlikely(len))
1526 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1527 ctrl);
1528 /* skb fragments */
1529 for (i = 0; i < nr_frags; ++i) {
1530 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
9e903e08 1531 len = skb_frag_size(frag);
1d3bb996
DG
1532
1533 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1534 goto undo_frame;
1535
f8f114c2
IC
1536 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1537 DMA_TO_DEVICE);
1d3bb996
DG
1538
1539 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1540 ctrl);
1541 }
1542
1543 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1544
1545 /* Attach skb to the last slot so we don't release it too early */
1546 dev->tx_skb[slot] = skb;
1547
1548 /* Send the packet out */
1549 if (dev->tx_slot == NUM_TX_BUFF - 1)
1550 ctrl |= MAL_TX_CTRL_WRAP;
1551 wmb();
1552 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1553 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1554
1555 return emac_xmit_finish(dev, skb->len);
1556
1557 undo_frame:
1558 /* Well, too bad. Our previous estimation was overly optimistic.
1559 * Undo everything.
1560 */
1561 while (slot != dev->tx_slot) {
1562 dev->tx_desc[slot].ctrl = 0;
1563 --dev->tx_cnt;
1564 if (--slot < 0)
1565 slot = NUM_TX_BUFF - 1;
1566 }
1567 ++dev->estats.tx_undo;
1568
1569 stop_queue:
1570 netif_stop_queue(ndev);
1571 DBG2(dev, "stopped TX queue" NL);
5b548140 1572 return NETDEV_TX_BUSY;
1d3bb996 1573}
1d3bb996
DG
1574
1575/* Tx lock BHs */
1576static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1577{
1578 struct emac_error_stats *st = &dev->estats;
1579
1580 DBG(dev, "BD TX error %04x" NL, ctrl);
1581
1582 ++st->tx_bd_errors;
1583 if (ctrl & EMAC_TX_ST_BFCS)
1584 ++st->tx_bd_bad_fcs;
1585 if (ctrl & EMAC_TX_ST_LCS)
1586 ++st->tx_bd_carrier_loss;
1587 if (ctrl & EMAC_TX_ST_ED)
1588 ++st->tx_bd_excessive_deferral;
1589 if (ctrl & EMAC_TX_ST_EC)
1590 ++st->tx_bd_excessive_collisions;
1591 if (ctrl & EMAC_TX_ST_LC)
1592 ++st->tx_bd_late_collision;
1593 if (ctrl & EMAC_TX_ST_MC)
1594 ++st->tx_bd_multple_collisions;
1595 if (ctrl & EMAC_TX_ST_SC)
1596 ++st->tx_bd_single_collision;
1597 if (ctrl & EMAC_TX_ST_UR)
1598 ++st->tx_bd_underrun;
1599 if (ctrl & EMAC_TX_ST_SQE)
1600 ++st->tx_bd_sqe;
1601}
1602
1603static void emac_poll_tx(void *param)
1604{
1605 struct emac_instance *dev = param;
1606 u32 bad_mask;
1607
1608 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1609
1610 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1611 bad_mask = EMAC_IS_BAD_TX_TAH;
1612 else
1613 bad_mask = EMAC_IS_BAD_TX;
1614
1615 netif_tx_lock_bh(dev->ndev);
1616 if (dev->tx_cnt) {
1617 u16 ctrl;
1618 int slot = dev->ack_slot, n = 0;
1619 again:
1620 ctrl = dev->tx_desc[slot].ctrl;
1621 if (!(ctrl & MAL_TX_CTRL_READY)) {
1622 struct sk_buff *skb = dev->tx_skb[slot];
1623 ++n;
1624
1625 if (skb) {
1626 dev_kfree_skb(skb);
1627 dev->tx_skb[slot] = NULL;
1628 }
1629 slot = (slot + 1) % NUM_TX_BUFF;
1630
1631 if (unlikely(ctrl & bad_mask))
1632 emac_parse_tx_error(dev, ctrl);
1633
1634 if (--dev->tx_cnt)
1635 goto again;
1636 }
1637 if (n) {
1638 dev->ack_slot = slot;
1639 if (netif_queue_stopped(dev->ndev) &&
1640 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1641 netif_wake_queue(dev->ndev);
1642
1643 DBG2(dev, "tx %d pkts" NL, n);
1644 }
1645 }
1646 netif_tx_unlock_bh(dev->ndev);
1647}
1648
1649static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1650 int len)
1651{
1652 struct sk_buff *skb = dev->rx_skb[slot];
1653
1654 DBG2(dev, "recycle %d %d" NL, slot, len);
1655
1656 if (len)
1657 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1658 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1659
1660 dev->rx_desc[slot].data_len = 0;
1661 wmb();
1662 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1663 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1664}
1665
1666static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1667{
1668 struct emac_error_stats *st = &dev->estats;
1669
1670 DBG(dev, "BD RX error %04x" NL, ctrl);
1671
1672 ++st->rx_bd_errors;
1673 if (ctrl & EMAC_RX_ST_OE)
1674 ++st->rx_bd_overrun;
1675 if (ctrl & EMAC_RX_ST_BP)
1676 ++st->rx_bd_bad_packet;
1677 if (ctrl & EMAC_RX_ST_RP)
1678 ++st->rx_bd_runt_packet;
1679 if (ctrl & EMAC_RX_ST_SE)
1680 ++st->rx_bd_short_event;
1681 if (ctrl & EMAC_RX_ST_AE)
1682 ++st->rx_bd_alignment_error;
1683 if (ctrl & EMAC_RX_ST_BFCS)
1684 ++st->rx_bd_bad_fcs;
1685 if (ctrl & EMAC_RX_ST_PTL)
1686 ++st->rx_bd_packet_too_long;
1687 if (ctrl & EMAC_RX_ST_ORE)
1688 ++st->rx_bd_out_of_range;
1689 if (ctrl & EMAC_RX_ST_IRE)
1690 ++st->rx_bd_in_range;
1691}
1692
1693static inline void emac_rx_csum(struct emac_instance *dev,
1694 struct sk_buff *skb, u16 ctrl)
1695{
3b3bceef 1696#ifdef CONFIG_IBM_EMAC_TAH
1d3bb996
DG
1697 if (!ctrl && dev->tah_dev) {
1698 skb->ip_summed = CHECKSUM_UNNECESSARY;
1699 ++dev->stats.rx_packets_csum;
1700 }
1701#endif
1702}
1703
1704static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1705{
1706 if (likely(dev->rx_sg_skb != NULL)) {
1707 int len = dev->rx_desc[slot].data_len;
1708 int tot_len = dev->rx_sg_skb->len + len;
1709
1710 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1711 ++dev->estats.rx_dropped_mtu;
1712 dev_kfree_skb(dev->rx_sg_skb);
1713 dev->rx_sg_skb = NULL;
1714 } else {
b05ae4ee 1715 memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1716 dev->rx_skb[slot]->data, len);
1717 skb_put(dev->rx_sg_skb, len);
1718 emac_recycle_rx_skb(dev, slot, len);
1719 return 0;
1720 }
1721 }
1722 emac_recycle_rx_skb(dev, slot, 0);
1723 return -1;
1724}
1725
1726/* NAPI poll context */
1727static int emac_poll_rx(void *param, int budget)
1728{
1729 struct emac_instance *dev = param;
1730 int slot = dev->rx_slot, received = 0;
1731
1732 DBG2(dev, "poll_rx(%d)" NL, budget);
1733
1734 again:
1735 while (budget > 0) {
1736 int len;
1737 struct sk_buff *skb;
1738 u16 ctrl = dev->rx_desc[slot].ctrl;
1739
1740 if (ctrl & MAL_RX_CTRL_EMPTY)
1741 break;
1742
1743 skb = dev->rx_skb[slot];
1744 mb();
1745 len = dev->rx_desc[slot].data_len;
1746
1747 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1748 goto sg;
1749
1750 ctrl &= EMAC_BAD_RX_MASK;
1751 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1752 emac_parse_rx_error(dev, ctrl);
1753 ++dev->estats.rx_dropped_error;
1754 emac_recycle_rx_skb(dev, slot, 0);
1755 len = 0;
1756 goto next;
1757 }
6c688f42
SN
1758
1759 if (len < ETH_HLEN) {
1760 ++dev->estats.rx_dropped_stack;
1761 emac_recycle_rx_skb(dev, slot, len);
1762 goto next;
1763 }
1d3bb996
DG
1764
1765 if (len && len < EMAC_RX_COPY_THRESH) {
1766 struct sk_buff *copy_skb =
1767 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1768 if (unlikely(!copy_skb))
1769 goto oom;
1770
1771 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
b05ae4ee 1772 memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1d3bb996
DG
1773 emac_recycle_rx_skb(dev, slot, len);
1774 skb = copy_skb;
1775 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1776 goto oom;
1777
1778 skb_put(skb, len);
1779 push_packet:
1d3bb996
DG
1780 skb->protocol = eth_type_trans(skb, dev->ndev);
1781 emac_rx_csum(dev, skb, ctrl);
1782
1783 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1784 ++dev->estats.rx_dropped_stack;
1785 next:
1786 ++dev->stats.rx_packets;
1787 skip:
1788 dev->stats.rx_bytes += len;
1789 slot = (slot + 1) % NUM_RX_BUFF;
1790 --budget;
1791 ++received;
1792 continue;
1793 sg:
1794 if (ctrl & MAL_RX_CTRL_FIRST) {
1795 BUG_ON(dev->rx_sg_skb);
1796 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1797 DBG(dev, "rx OOM %d" NL, slot);
1798 ++dev->estats.rx_dropped_oom;
1799 emac_recycle_rx_skb(dev, slot, 0);
1800 } else {
1801 dev->rx_sg_skb = skb;
1802 skb_put(skb, len);
1803 }
1804 } else if (!emac_rx_sg_append(dev, slot) &&
1805 (ctrl & MAL_RX_CTRL_LAST)) {
1806
1807 skb = dev->rx_sg_skb;
1808 dev->rx_sg_skb = NULL;
1809
1810 ctrl &= EMAC_BAD_RX_MASK;
1811 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1812 emac_parse_rx_error(dev, ctrl);
1813 ++dev->estats.rx_dropped_error;
1814 dev_kfree_skb(skb);
1815 len = 0;
1816 } else
1817 goto push_packet;
1818 }
1819 goto skip;
1820 oom:
1821 DBG(dev, "rx OOM %d" NL, slot);
1822 /* Drop the packet and recycle skb */
1823 ++dev->estats.rx_dropped_oom;
1824 emac_recycle_rx_skb(dev, slot, 0);
1825 goto next;
1826 }
1827
1828 if (received) {
1829 DBG2(dev, "rx %d BDs" NL, received);
1830 dev->rx_slot = slot;
1831 }
1832
1833 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1834 mb();
1835 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1836 DBG2(dev, "rx restart" NL);
1837 received = 0;
1838 goto again;
1839 }
1840
1841 if (dev->rx_sg_skb) {
1842 DBG2(dev, "dropping partial rx packet" NL);
1843 ++dev->estats.rx_dropped_error;
1844 dev_kfree_skb(dev->rx_sg_skb);
1845 dev->rx_sg_skb = NULL;
1846 }
1847
1848 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1849 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1850 emac_rx_enable(dev);
1851 dev->rx_slot = 0;
1852 }
1853 return received;
1854}
1855
1856/* NAPI poll context */
1857static int emac_peek_rx(void *param)
1858{
1859 struct emac_instance *dev = param;
1860
1861 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1862}
1863
1864/* NAPI poll context */
1865static int emac_peek_rx_sg(void *param)
1866{
1867 struct emac_instance *dev = param;
1868
1869 int slot = dev->rx_slot;
1870 while (1) {
1871 u16 ctrl = dev->rx_desc[slot].ctrl;
1872 if (ctrl & MAL_RX_CTRL_EMPTY)
1873 return 0;
1874 else if (ctrl & MAL_RX_CTRL_LAST)
1875 return 1;
1876
1877 slot = (slot + 1) % NUM_RX_BUFF;
1878
1879 /* I'm just being paranoid here :) */
1880 if (unlikely(slot == dev->rx_slot))
1881 return 0;
1882 }
1883}
1884
1885/* Hard IRQ */
1886static void emac_rxde(void *param)
1887{
1888 struct emac_instance *dev = param;
1889
1890 ++dev->estats.rx_stopped;
1891 emac_rx_disable_async(dev);
1892}
1893
1894/* Hard IRQ */
1895static irqreturn_t emac_irq(int irq, void *dev_instance)
1896{
1897 struct emac_instance *dev = dev_instance;
1898 struct emac_regs __iomem *p = dev->emacp;
1899 struct emac_error_stats *st = &dev->estats;
1900 u32 isr;
1901
1902 spin_lock(&dev->lock);
1903
1904 isr = in_be32(&p->isr);
1905 out_be32(&p->isr, isr);
1906
1907 DBG(dev, "isr = %08x" NL, isr);
1908
1909 if (isr & EMAC4_ISR_TXPE)
1910 ++st->tx_parity;
1911 if (isr & EMAC4_ISR_RXPE)
1912 ++st->rx_parity;
1913 if (isr & EMAC4_ISR_TXUE)
1914 ++st->tx_underrun;
1915 if (isr & EMAC4_ISR_RXOE)
1916 ++st->rx_fifo_overrun;
1917 if (isr & EMAC_ISR_OVR)
1918 ++st->rx_overrun;
1919 if (isr & EMAC_ISR_BP)
1920 ++st->rx_bad_packet;
1921 if (isr & EMAC_ISR_RP)
1922 ++st->rx_runt_packet;
1923 if (isr & EMAC_ISR_SE)
1924 ++st->rx_short_event;
1925 if (isr & EMAC_ISR_ALE)
1926 ++st->rx_alignment_error;
1927 if (isr & EMAC_ISR_BFCS)
1928 ++st->rx_bad_fcs;
1929 if (isr & EMAC_ISR_PTLE)
1930 ++st->rx_packet_too_long;
1931 if (isr & EMAC_ISR_ORE)
1932 ++st->rx_out_of_range;
1933 if (isr & EMAC_ISR_IRE)
1934 ++st->rx_in_range;
1935 if (isr & EMAC_ISR_SQE)
1936 ++st->tx_sqe;
1937 if (isr & EMAC_ISR_TE)
1938 ++st->tx_errors;
1939
1940 spin_unlock(&dev->lock);
1941
1942 return IRQ_HANDLED;
1943}
1944
1945static struct net_device_stats *emac_stats(struct net_device *ndev)
1946{
1947 struct emac_instance *dev = netdev_priv(ndev);
1948 struct emac_stats *st = &dev->stats;
1949 struct emac_error_stats *est = &dev->estats;
065f4b69 1950 struct net_device_stats *nst = &ndev->stats;
1d3bb996
DG
1951 unsigned long flags;
1952
1953 DBG2(dev, "stats" NL);
1954
1955 /* Compute "legacy" statistics */
1956 spin_lock_irqsave(&dev->lock, flags);
1957 nst->rx_packets = (unsigned long)st->rx_packets;
1958 nst->rx_bytes = (unsigned long)st->rx_bytes;
1959 nst->tx_packets = (unsigned long)st->tx_packets;
1960 nst->tx_bytes = (unsigned long)st->tx_bytes;
1961 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1962 est->rx_dropped_error +
1963 est->rx_dropped_resize +
1964 est->rx_dropped_mtu);
1965 nst->tx_dropped = (unsigned long)est->tx_dropped;
1966
1967 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1968 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1969 est->rx_fifo_overrun +
1970 est->rx_overrun);
1971 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1972 est->rx_alignment_error);
1973 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1974 est->rx_bad_fcs);
1975 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1976 est->rx_bd_short_event +
1977 est->rx_bd_packet_too_long +
1978 est->rx_bd_out_of_range +
1979 est->rx_bd_in_range +
1980 est->rx_runt_packet +
1981 est->rx_short_event +
1982 est->rx_packet_too_long +
1983 est->rx_out_of_range +
1984 est->rx_in_range);
1985
1986 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1987 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1988 est->tx_underrun);
1989 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1990 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1991 est->tx_bd_excessive_collisions +
1992 est->tx_bd_late_collision +
1993 est->tx_bd_multple_collisions);
1994 spin_unlock_irqrestore(&dev->lock, flags);
1995 return nst;
1996}
1997
1998static struct mal_commac_ops emac_commac_ops = {
1999 .poll_tx = &emac_poll_tx,
2000 .poll_rx = &emac_poll_rx,
2001 .peek_rx = &emac_peek_rx,
2002 .rxde = &emac_rxde,
2003};
2004
2005static struct mal_commac_ops emac_commac_sg_ops = {
2006 .poll_tx = &emac_poll_tx,
2007 .poll_rx = &emac_poll_rx,
2008 .peek_rx = &emac_peek_rx_sg,
2009 .rxde = &emac_rxde,
2010};
2011
2012/* Ethtool support */
e4ccf764
PR
2013static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2014 struct ethtool_link_ksettings *cmd)
1d3bb996
DG
2015{
2016 struct emac_instance *dev = netdev_priv(ndev);
e4ccf764 2017 u32 supported, advertising;
1d3bb996 2018
e4ccf764
PR
2019 supported = dev->phy.features;
2020 cmd->base.port = PORT_MII;
2021 cmd->base.phy_address = dev->phy.address;
1d3bb996
DG
2022
2023 mutex_lock(&dev->link_lock);
e4ccf764
PR
2024 advertising = dev->phy.advertising;
2025 cmd->base.autoneg = dev->phy.autoneg;
2026 cmd->base.speed = dev->phy.speed;
2027 cmd->base.duplex = dev->phy.duplex;
1d3bb996
DG
2028 mutex_unlock(&dev->link_lock);
2029
e4ccf764
PR
2030 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2031 supported);
2032 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2033 advertising);
2034
1d3bb996
DG
2035 return 0;
2036}
2037
e4ccf764
PR
2038static int
2039emac_ethtool_set_link_ksettings(struct net_device *ndev,
2040 const struct ethtool_link_ksettings *cmd)
1d3bb996
DG
2041{
2042 struct emac_instance *dev = netdev_priv(ndev);
2043 u32 f = dev->phy.features;
e4ccf764
PR
2044 u32 advertising;
2045
2046 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2047 cmd->link_modes.advertising);
1d3bb996
DG
2048
2049 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
e4ccf764 2050 cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
1d3bb996
DG
2051
2052 /* Basic sanity checks */
2053 if (dev->phy.address < 0)
2054 return -EOPNOTSUPP;
e4ccf764
PR
2055 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2056 cmd->base.autoneg != AUTONEG_DISABLE)
1d3bb996 2057 return -EINVAL;
e4ccf764 2058 if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
1d3bb996 2059 return -EINVAL;
e4ccf764 2060 if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
1d3bb996
DG
2061 return -EINVAL;
2062
e4ccf764
PR
2063 if (cmd->base.autoneg == AUTONEG_DISABLE) {
2064 switch (cmd->base.speed) {
1d3bb996 2065 case SPEED_10:
e4ccf764 2066 if (cmd->base.duplex == DUPLEX_HALF &&
8e95a202 2067 !(f & SUPPORTED_10baseT_Half))
1d3bb996 2068 return -EINVAL;
e4ccf764 2069 if (cmd->base.duplex == DUPLEX_FULL &&
8e95a202 2070 !(f & SUPPORTED_10baseT_Full))
1d3bb996
DG
2071 return -EINVAL;
2072 break;
2073 case SPEED_100:
e4ccf764 2074 if (cmd->base.duplex == DUPLEX_HALF &&
8e95a202 2075 !(f & SUPPORTED_100baseT_Half))
1d3bb996 2076 return -EINVAL;
e4ccf764 2077 if (cmd->base.duplex == DUPLEX_FULL &&
8e95a202 2078 !(f & SUPPORTED_100baseT_Full))
1d3bb996
DG
2079 return -EINVAL;
2080 break;
2081 case SPEED_1000:
e4ccf764 2082 if (cmd->base.duplex == DUPLEX_HALF &&
8e95a202 2083 !(f & SUPPORTED_1000baseT_Half))
1d3bb996 2084 return -EINVAL;
e4ccf764 2085 if (cmd->base.duplex == DUPLEX_FULL &&
8e95a202 2086 !(f & SUPPORTED_1000baseT_Full))
1d3bb996
DG
2087 return -EINVAL;
2088 break;
2089 default:
2090 return -EINVAL;
2091 }
2092
2093 mutex_lock(&dev->link_lock);
e4ccf764
PR
2094 dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2095 cmd->base.duplex);
1d3bb996
DG
2096 mutex_unlock(&dev->link_lock);
2097
2098 } else {
2099 if (!(f & SUPPORTED_Autoneg))
2100 return -EINVAL;
2101
2102 mutex_lock(&dev->link_lock);
2103 dev->phy.def->ops->setup_aneg(&dev->phy,
e4ccf764 2104 (advertising & f) |
1d3bb996
DG
2105 (dev->phy.advertising &
2106 (ADVERTISED_Pause |
2107 ADVERTISED_Asym_Pause)));
2108 mutex_unlock(&dev->link_lock);
2109 }
2110 emac_force_link_update(dev);
2111
2112 return 0;
2113}
2114
2115static void emac_ethtool_get_ringparam(struct net_device *ndev,
2116 struct ethtool_ringparam *rp)
2117{
2118 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2119 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2120}
2121
2122static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2123 struct ethtool_pauseparam *pp)
2124{
2125 struct emac_instance *dev = netdev_priv(ndev);
2126
2127 mutex_lock(&dev->link_lock);
2128 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2129 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2130 pp->autoneg = 1;
2131
2132 if (dev->phy.duplex == DUPLEX_FULL) {
2133 if (dev->phy.pause)
2134 pp->rx_pause = pp->tx_pause = 1;
2135 else if (dev->phy.asym_pause)
2136 pp->tx_pause = 1;
2137 }
2138 mutex_unlock(&dev->link_lock);
2139}
2140
1d3bb996
DG
2141static int emac_get_regs_len(struct emac_instance *dev)
2142{
1d3bb996 2143 return sizeof(struct emac_ethtool_regs_subhdr) +
5369c71f 2144 sizeof(struct emac_regs);
1d3bb996
DG
2145}
2146
2147static int emac_ethtool_get_regs_len(struct net_device *ndev)
2148{
2149 struct emac_instance *dev = netdev_priv(ndev);
2150 int size;
2151
2152 size = sizeof(struct emac_ethtool_regs_hdr) +
2153 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2154 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2155 size += zmii_get_regs_len(dev->zmii_dev);
2156 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2157 size += rgmii_get_regs_len(dev->rgmii_dev);
2158 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2159 size += tah_get_regs_len(dev->tah_dev);
2160
2161 return size;
2162}
2163
2164static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2165{
2166 struct emac_ethtool_regs_subhdr *hdr = buf;
2167
2168 hdr->index = dev->cell_index;
5369c71f
IM
2169 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2170 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2171 } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
1d3bb996 2172 hdr->version = EMAC4_ETHTOOL_REGS_VER;
1d3bb996
DG
2173 } else {
2174 hdr->version = EMAC_ETHTOOL_REGS_VER;
1d3bb996 2175 }
5369c71f
IM
2176 memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2177 return (void *)(hdr + 1) + sizeof(struct emac_regs);
1d3bb996
DG
2178}
2179
2180static void emac_ethtool_get_regs(struct net_device *ndev,
2181 struct ethtool_regs *regs, void *buf)
2182{
2183 struct emac_instance *dev = netdev_priv(ndev);
2184 struct emac_ethtool_regs_hdr *hdr = buf;
2185
2186 hdr->components = 0;
2187 buf = hdr + 1;
2188
2189 buf = mal_dump_regs(dev->mal, buf);
2190 buf = emac_dump_regs(dev, buf);
2191 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2192 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2193 buf = zmii_dump_regs(dev->zmii_dev, buf);
2194 }
2195 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2196 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2197 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2198 }
2199 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2200 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2201 buf = tah_dump_regs(dev->tah_dev, buf);
2202 }
2203}
2204
2205static int emac_ethtool_nway_reset(struct net_device *ndev)
2206{
2207 struct emac_instance *dev = netdev_priv(ndev);
2208 int res = 0;
2209
2210 DBG(dev, "nway_reset" NL);
2211
2212 if (dev->phy.address < 0)
2213 return -EOPNOTSUPP;
2214
2215 mutex_lock(&dev->link_lock);
2216 if (!dev->phy.autoneg) {
2217 res = -EINVAL;
2218 goto out;
2219 }
2220
2221 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2222 out:
2223 mutex_unlock(&dev->link_lock);
2224 emac_force_link_update(dev);
2225 return res;
2226}
2227
15f0a394 2228static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
1d3bb996 2229{
15f0a394
BH
2230 if (stringset == ETH_SS_STATS)
2231 return EMAC_ETHTOOL_STATS_COUNT;
2232 else
2233 return -EINVAL;
1d3bb996
DG
2234}
2235
2236static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2237 u8 * buf)
2238{
2239 if (stringset == ETH_SS_STATS)
2240 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2241}
2242
2243static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2244 struct ethtool_stats *estats,
2245 u64 * tmp_stats)
2246{
2247 struct emac_instance *dev = netdev_priv(ndev);
2248
2249 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2250 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2251 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2252}
2253
2254static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2255 struct ethtool_drvinfo *info)
2256{
2257 struct emac_instance *dev = netdev_priv(ndev);
2258
7826d43f
JP
2259 strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2260 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2261 snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2262 dev->cell_index, dev->ofdev->dev.of_node->full_name);
1d3bb996
DG
2263}
2264
2265static const struct ethtool_ops emac_ethtool_ops = {
1d3bb996
DG
2266 .get_drvinfo = emac_ethtool_get_drvinfo,
2267
2268 .get_regs_len = emac_ethtool_get_regs_len,
2269 .get_regs = emac_ethtool_get_regs,
2270
2271 .nway_reset = emac_ethtool_nway_reset,
2272
2273 .get_ringparam = emac_ethtool_get_ringparam,
2274 .get_pauseparam = emac_ethtool_get_pauseparam,
2275
1d3bb996 2276 .get_strings = emac_ethtool_get_strings,
15f0a394 2277 .get_sset_count = emac_ethtool_get_sset_count,
1d3bb996
DG
2278 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2279
2280 .get_link = ethtool_op_get_link,
e4ccf764
PR
2281 .get_link_ksettings = emac_ethtool_get_link_ksettings,
2282 .set_link_ksettings = emac_ethtool_set_link_ksettings,
1d3bb996
DG
2283};
2284
2285static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2286{
2287 struct emac_instance *dev = netdev_priv(ndev);
0fa0ee05 2288 struct mii_ioctl_data *data = if_mii(rq);
1d3bb996
DG
2289
2290 DBG(dev, "ioctl %08x" NL, cmd);
2291
2292 if (dev->phy.address < 0)
2293 return -EOPNOTSUPP;
2294
2295 switch (cmd) {
2296 case SIOCGMIIPHY:
0fa0ee05 2297 data->phy_id = dev->phy.address;
1d3bb996
DG
2298 /* Fall through */
2299 case SIOCGMIIREG:
0fa0ee05
BH
2300 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2301 data->reg_num);
1d3bb996
DG
2302 return 0;
2303
2304 case SIOCSMIIREG:
0fa0ee05
BH
2305 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2306 data->val_in);
1d3bb996
DG
2307 return 0;
2308 default:
2309 return -EOPNOTSUPP;
2310 }
2311}
2312
2313struct emac_depentry {
2314 u32 phandle;
2315 struct device_node *node;
2dc11581 2316 struct platform_device *ofdev;
1d3bb996
DG
2317 void *drvdata;
2318};
2319
2320#define EMAC_DEP_MAL_IDX 0
2321#define EMAC_DEP_ZMII_IDX 1
2322#define EMAC_DEP_RGMII_IDX 2
2323#define EMAC_DEP_TAH_IDX 3
2324#define EMAC_DEP_MDIO_IDX 4
2325#define EMAC_DEP_PREV_IDX 5
2326#define EMAC_DEP_COUNT 6
2327
fe17dc1e 2328static int emac_check_deps(struct emac_instance *dev,
1dd06ae8 2329 struct emac_depentry *deps)
1d3bb996
DG
2330{
2331 int i, there = 0;
2332 struct device_node *np;
2333
2334 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2335 /* no dependency on that item, allright */
2336 if (deps[i].phandle == 0) {
2337 there++;
2338 continue;
2339 }
2340 /* special case for blist as the dependency might go away */
2341 if (i == EMAC_DEP_PREV_IDX) {
2342 np = *(dev->blist - 1);
2343 if (np == NULL) {
2344 deps[i].phandle = 0;
2345 there++;
2346 continue;
2347 }
2348 if (deps[i].node == NULL)
2349 deps[i].node = of_node_get(np);
2350 }
2351 if (deps[i].node == NULL)
2352 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2353 if (deps[i].node == NULL)
2354 continue;
2355 if (deps[i].ofdev == NULL)
2356 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2357 if (deps[i].ofdev == NULL)
2358 continue;
2359 if (deps[i].drvdata == NULL)
bc353832 2360 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
1d3bb996
DG
2361 if (deps[i].drvdata != NULL)
2362 there++;
2363 }
807540ba 2364 return there == EMAC_DEP_COUNT;
1d3bb996
DG
2365}
2366
2367static void emac_put_deps(struct emac_instance *dev)
2368{
5ac0fd00
ME
2369 of_dev_put(dev->mal_dev);
2370 of_dev_put(dev->zmii_dev);
2371 of_dev_put(dev->rgmii_dev);
2372 of_dev_put(dev->mdio_dev);
2373 of_dev_put(dev->tah_dev);
1d3bb996
DG
2374}
2375
1dd06ae8
GKH
2376static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2377 void *data)
1d3bb996
DG
2378{
2379 /* We are only intereted in device addition */
2380 if (action == BUS_NOTIFY_BOUND_DRIVER)
2381 wake_up_all(&emac_probe_wait);
2382 return 0;
2383}
2384
fe17dc1e 2385static struct notifier_block emac_of_bus_notifier = {
1d3bb996
DG
2386 .notifier_call = emac_of_bus_notify
2387};
2388
fe17dc1e 2389static int emac_wait_deps(struct emac_instance *dev)
1d3bb996
DG
2390{
2391 struct emac_depentry deps[EMAC_DEP_COUNT];
2392 int i, err;
2393
2394 memset(&deps, 0, sizeof(deps));
2395
2396 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2397 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2398 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2399 if (dev->tah_ph)
2400 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2401 if (dev->mdio_ph)
2402 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2403 if (dev->blist && dev->blist > emac_boot_list)
2404 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
1ab1d63a 2405 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
1d3bb996
DG
2406 wait_event_timeout(emac_probe_wait,
2407 emac_check_deps(dev, deps),
2408 EMAC_PROBE_DEP_TIMEOUT);
1ab1d63a 2409 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
1d3bb996
DG
2410 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2411 for (i = 0; i < EMAC_DEP_COUNT; i++) {
5ac0fd00 2412 of_node_put(deps[i].node);
f339664c 2413 if (err)
1d3bb996
DG
2414 of_dev_put(deps[i].ofdev);
2415 }
2416 if (err == 0) {
2417 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2418 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2419 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2420 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2421 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2422 }
5ac0fd00 2423 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
1d3bb996
DG
2424 return err;
2425}
2426
fe17dc1e 2427static int emac_read_uint_prop(struct device_node *np, const char *name,
1dd06ae8 2428 u32 *val, int fatal)
1d3bb996
DG
2429{
2430 int len;
2431 const u32 *prop = of_get_property(np, name, &len);
2432 if (prop == NULL || len < sizeof(u32)) {
2433 if (fatal)
2434 printk(KERN_ERR "%s: missing %s property\n",
2435 np->full_name, name);
2436 return -ENODEV;
2437 }
2438 *val = *prop;
2439 return 0;
2440}
2441
a577ca6b
CL
2442static void emac_adjust_link(struct net_device *ndev)
2443{
2444 struct emac_instance *dev = netdev_priv(ndev);
2445 struct phy_device *phy = dev->phy_dev;
2446
2447 dev->phy.autoneg = phy->autoneg;
2448 dev->phy.speed = phy->speed;
2449 dev->phy.duplex = phy->duplex;
2450 dev->phy.pause = phy->pause;
2451 dev->phy.asym_pause = phy->asym_pause;
2452 dev->phy.advertising = phy->advertising;
2453}
2454
2455static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2456{
2457 int ret = emac_mdio_read(bus->priv, addr, regnum);
2458 /* This is a workaround for powered down ports/phys.
2459 * In the wild, this was seen on the Cisco Meraki MX60(W).
2460 * This hardware disables ports as part of the handoff
2461 * procedure. Accessing the ports will lead to errors
2462 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2463 */
2464 return ret < 0 ? 0xffff : ret;
2465}
2466
2467static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2468 int regnum, u16 val)
2469{
2470 emac_mdio_write(bus->priv, addr, regnum, val);
2471 return 0;
2472}
2473
2474static int emac_mii_bus_reset(struct mii_bus *bus)
2475{
2476 struct emac_instance *dev = netdev_priv(bus->priv);
2477
2478 return emac_reset(dev);
2479}
2480
9065bc38
CL
2481static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2482 struct phy_device *phy_dev)
2483{
2484 phy_dev->autoneg = phy->autoneg;
2485 phy_dev->speed = phy->speed;
2486 phy_dev->duplex = phy->duplex;
2487 phy_dev->advertising = phy->advertising;
2488 return phy_start_aneg(phy_dev);
2489}
2490
a577ca6b
CL
2491static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2492{
2493 struct net_device *ndev = phy->dev;
2494 struct emac_instance *dev = netdev_priv(ndev);
2495
a577ca6b 2496 phy->autoneg = AUTONEG_ENABLE;
a577ca6b 2497 phy->advertising = advertise;
9065bc38 2498 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
a577ca6b
CL
2499}
2500
2501static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2502{
2503 struct net_device *ndev = phy->dev;
2504 struct emac_instance *dev = netdev_priv(ndev);
2505
a577ca6b
CL
2506 phy->autoneg = AUTONEG_DISABLE;
2507 phy->speed = speed;
2508 phy->duplex = fd;
9065bc38 2509 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
a577ca6b
CL
2510}
2511
2512static int emac_mdio_poll_link(struct mii_phy *phy)
2513{
2514 struct net_device *ndev = phy->dev;
2515 struct emac_instance *dev = netdev_priv(ndev);
2516 int res;
2517
2518 res = phy_read_status(dev->phy_dev);
2519 if (res) {
2520 dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2521 return ethtool_op_get_link(ndev);
2522 }
2523
2524 return dev->phy_dev->link;
2525}
2526
2527static int emac_mdio_read_link(struct mii_phy *phy)
2528{
2529 struct net_device *ndev = phy->dev;
2530 struct emac_instance *dev = netdev_priv(ndev);
9065bc38 2531 struct phy_device *phy_dev = dev->phy_dev;
a577ca6b
CL
2532 int res;
2533
9065bc38 2534 res = phy_read_status(phy_dev);
a577ca6b
CL
2535 if (res)
2536 return res;
2537
9065bc38
CL
2538 phy->speed = phy_dev->speed;
2539 phy->duplex = phy_dev->duplex;
2540 phy->pause = phy_dev->pause;
2541 phy->asym_pause = phy_dev->asym_pause;
a577ca6b
CL
2542 return 0;
2543}
2544
2545static int emac_mdio_init_phy(struct mii_phy *phy)
2546{
2547 struct net_device *ndev = phy->dev;
2548 struct emac_instance *dev = netdev_priv(ndev);
2549
2550 phy_start(dev->phy_dev);
a577ca6b
CL
2551 return phy_init_hw(dev->phy_dev);
2552}
2553
2554static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2555 .init = emac_mdio_init_phy,
2556 .setup_aneg = emac_mdio_setup_aneg,
2557 .setup_forced = emac_mdio_setup_forced,
2558 .poll_link = emac_mdio_poll_link,
2559 .read_link = emac_mdio_read_link,
2560};
2561
2562static int emac_dt_mdio_probe(struct emac_instance *dev)
2563{
2564 struct device_node *mii_np;
2565 int res;
2566
2567 mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2568 if (!mii_np) {
2569 dev_err(&dev->ofdev->dev, "no mdio definition found.");
2570 return -ENODEV;
2571 }
2572
2573 if (!of_device_is_available(mii_np)) {
2574 res = -ENODEV;
2575 goto put_node;
2576 }
2577
2578 dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2579 if (!dev->mii_bus) {
2580 res = -ENOMEM;
2581 goto put_node;
2582 }
2583
2584 dev->mii_bus->priv = dev->ndev;
2585 dev->mii_bus->parent = dev->ndev->dev.parent;
2586 dev->mii_bus->name = "emac_mdio";
2587 dev->mii_bus->read = &emac_mii_bus_read;
2588 dev->mii_bus->write = &emac_mii_bus_write;
2589 dev->mii_bus->reset = &emac_mii_bus_reset;
2590 snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2591 res = of_mdiobus_register(dev->mii_bus, mii_np);
2592 if (res) {
2593 dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2594 dev->mii_bus->name, res);
2595 }
2596
2597 put_node:
2598 of_node_put(mii_np);
2599 return res;
2600}
2601
2602static int emac_dt_phy_connect(struct emac_instance *dev,
2603 struct device_node *phy_handle)
2604{
a577ca6b
CL
2605 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2606 GFP_KERNEL);
2607 if (!dev->phy.def)
2608 return -ENOMEM;
2609
2610 dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2611 0, dev->phy_mode);
2612 if (!dev->phy_dev) {
2613 dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2614 return -ENODEV;
2615 }
2616
2617 dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2618 dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2619 dev->phy.def->name = dev->phy_dev->drv->name;
2620 dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2621 dev->phy.features = dev->phy_dev->supported;
2622 dev->phy.address = dev->phy_dev->mdio.addr;
2623 dev->phy.mode = dev->phy_dev->interface;
2624 return 0;
2625}
2626
2627static int emac_dt_phy_probe(struct emac_instance *dev)
2628{
2629 struct device_node *np = dev->ofdev->dev.of_node;
2630 struct device_node *phy_handle;
b793f081 2631 int res = 1;
a577ca6b
CL
2632
2633 phy_handle = of_parse_phandle(np, "phy-handle", 0);
2634
2635 if (phy_handle) {
2636 res = emac_dt_mdio_probe(dev);
2637 if (!res) {
2638 res = emac_dt_phy_connect(dev, phy_handle);
2639 if (res)
2640 mdiobus_unregister(dev->mii_bus);
2641 }
2642 }
2643
2644 of_node_put(phy_handle);
2645 return res;
2646}
2647
fe17dc1e 2648static int emac_init_phy(struct emac_instance *dev)
1d3bb996 2649{
61c7a080 2650 struct device_node *np = dev->ofdev->dev.of_node;
1d3bb996
DG
2651 struct net_device *ndev = dev->ndev;
2652 u32 phy_map, adv;
2653 int i;
2654
2655 dev->phy.dev = ndev;
2656 dev->phy.mode = dev->phy_mode;
2657
a577ca6b
CL
2658 /* PHY-less configuration. */
2659 if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2660 of_phy_is_fixed_link(np)) {
1d3bb996
DG
2661 emac_reset(dev);
2662
a577ca6b 2663 /* PHY-less configuration. */
1d3bb996 2664 dev->phy.address = -1;
9e3cb294
VG
2665 dev->phy.features = SUPPORTED_MII;
2666 if (emac_phy_supports_gige(dev->phy_mode))
2667 dev->phy.features |= SUPPORTED_1000baseT_Full;
2668 else
2669 dev->phy.features |= SUPPORTED_100baseT_Full;
1d3bb996
DG
2670 dev->phy.pause = 1;
2671
a577ca6b
CL
2672 if (of_phy_is_fixed_link(np)) {
2673 int res = emac_dt_mdio_probe(dev);
2674
2675 if (!res) {
2676 res = of_phy_register_fixed_link(np);
2677 if (res)
2678 mdiobus_unregister(dev->mii_bus);
2679 }
2680 return res;
2681 }
1d3bb996
DG
2682 return 0;
2683 }
2684
2685 mutex_lock(&emac_phy_map_lock);
2686 phy_map = dev->phy_map | busy_phy_map;
2687
2688 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2689
2690 dev->phy.mdio_read = emac_mdio_read;
2691 dev->phy.mdio_write = emac_mdio_write;
2692
0925ab5d
VB
2693 /* Enable internal clock source */
2694#ifdef CONFIG_PPC_DCR_NATIVE
2695 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2696 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
11121e30
VB
2697#endif
2698 /* PHY clock workaround */
2699 emac_rx_clk_tx(dev);
2700
2701 /* Enable internal clock source on 440GX*/
2702#ifdef CONFIG_PPC_DCR_NATIVE
2703 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2704 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
0925ab5d 2705#endif
1d3bb996
DG
2706 /* Configure EMAC with defaults so we can at least use MDIO
2707 * This is needed mostly for 440GX
2708 */
2709 if (emac_phy_gpcs(dev->phy.mode)) {
2710 /* XXX
2711 * Make GPCS PHY address equal to EMAC index.
2712 * We probably should take into account busy_phy_map
2713 * and/or phy_map here.
2714 *
2715 * Note that the busy_phy_map is currently global
2716 * while it should probably be per-ASIC...
2717 */
9e3cb294
VG
2718 dev->phy.gpcs_address = dev->gpcs_address;
2719 if (dev->phy.gpcs_address == 0xffffffff)
2720 dev->phy.address = dev->cell_index;
1d3bb996
DG
2721 }
2722
2723 emac_configure(dev);
2724
a577ca6b
CL
2725 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2726 int res = emac_dt_phy_probe(dev);
2727
b793f081
CL
2728 switch (res) {
2729 case 1:
2730 /* No phy-handle property configured.
2731 * Continue with the existing phy probe
2732 * and setup code.
2733 */
2734 break;
2735
2736 case 0:
2737 mutex_unlock(&emac_phy_map_lock);
a577ca6b
CL
2738 goto init_phy;
2739
b793f081
CL
2740 default:
2741 mutex_unlock(&emac_phy_map_lock);
2742 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2743 res);
2744 return res;
2745 }
a577ca6b
CL
2746 }
2747
1d3bb996
DG
2748 if (dev->phy_address != 0xffffffff)
2749 phy_map = ~(1 << dev->phy_address);
2750
2751 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2752 if (!(phy_map & 1)) {
2753 int r;
2754 busy_phy_map |= 1 << i;
2755
2756 /* Quick check if there is a PHY at the address */
2757 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2758 if (r == 0xffff || r < 0)
2759 continue;
2760 if (!emac_mii_phy_probe(&dev->phy, i))
2761 break;
2762 }
0925ab5d
VB
2763
2764 /* Enable external clock source */
2765#ifdef CONFIG_PPC_DCR_NATIVE
2766 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2767 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2768#endif
1d3bb996
DG
2769 mutex_unlock(&emac_phy_map_lock);
2770 if (i == 0x20) {
2771 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2772 return -ENXIO;
2773 }
2774
a577ca6b 2775 init_phy:
1d3bb996
DG
2776 /* Init PHY */
2777 if (dev->phy.def->ops->init)
2778 dev->phy.def->ops->init(&dev->phy);
2779
2780 /* Disable any PHY features not supported by the platform */
2781 dev->phy.def->features &= ~dev->phy_feat_exc;
ae5d3372 2782 dev->phy.features &= ~dev->phy_feat_exc;
1d3bb996
DG
2783
2784 /* Setup initial link parameters */
2785 if (dev->phy.features & SUPPORTED_Autoneg) {
2786 adv = dev->phy.features;
2787 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2788 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2789 /* Restart autonegotiation */
2790 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2791 } else {
2792 u32 f = dev->phy.def->features;
2793 int speed = SPEED_10, fd = DUPLEX_HALF;
2794
2795 /* Select highest supported speed/duplex */
2796 if (f & SUPPORTED_1000baseT_Full) {
2797 speed = SPEED_1000;
2798 fd = DUPLEX_FULL;
2799 } else if (f & SUPPORTED_1000baseT_Half)
2800 speed = SPEED_1000;
2801 else if (f & SUPPORTED_100baseT_Full) {
2802 speed = SPEED_100;
2803 fd = DUPLEX_FULL;
2804 } else if (f & SUPPORTED_100baseT_Half)
2805 speed = SPEED_100;
2806 else if (f & SUPPORTED_10baseT_Full)
2807 fd = DUPLEX_FULL;
2808
2809 /* Force link parameters */
2810 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2811 }
2812 return 0;
2813}
2814
fe17dc1e 2815static int emac_init_config(struct emac_instance *dev)
1d3bb996 2816{
61c7a080 2817 struct device_node *np = dev->ofdev->dev.of_node;
1d3bb996 2818 const void *p;
1d3bb996
DG
2819
2820 /* Read config from device-tree */
2821 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2822 return -ENXIO;
2823 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2824 return -ENXIO;
2825 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2826 return -ENXIO;
2827 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2828 return -ENXIO;
2829 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
3d5d96ac 2830 dev->max_mtu = ETH_DATA_LEN;
1d3bb996
DG
2831 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2832 dev->rx_fifo_size = 2048;
2833 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2834 dev->tx_fifo_size = 2048;
2835 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2836 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2837 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2838 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2839 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2840 dev->phy_address = 0xffffffff;
2841 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2842 dev->phy_map = 0xffffffff;
9e3cb294
VG
2843 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2844 dev->gpcs_address = 0xffffffff;
1d3bb996
DG
2845 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2846 return -ENXIO;
2847 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2848 dev->tah_ph = 0;
2849 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
63b6cad7 2850 dev->tah_port = 0;
1d3bb996
DG
2851 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2852 dev->mdio_ph = 0;
2853 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
a419aef8 2854 dev->zmii_ph = 0;
1d3bb996 2855 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
a419aef8 2856 dev->zmii_port = 0xffffffff;
1d3bb996 2857 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
a419aef8 2858 dev->rgmii_ph = 0;
1d3bb996 2859 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
a419aef8 2860 dev->rgmii_port = 0xffffffff;
1d3bb996
DG
2861 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2862 dev->fifo_entry_size = 16;
2863 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2864 dev->mal_burst_size = 256;
2865
2866 /* PHY mode needs some decoding */
4157ef1b
SG
2867 dev->phy_mode = of_get_phy_mode(np);
2868 if (dev->phy_mode < 0)
2869 dev->phy_mode = PHY_MODE_NA;
1d3bb996
DG
2870
2871 /* Check EMAC version */
05781ccd
GE
2872 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2873 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
9e3cb294
VG
2874 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2875 of_device_is_compatible(np, "ibm,emac-460gt"))
2876 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
9616a755
BH
2877 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2878 of_device_is_compatible(np, "ibm,emac-405exr"))
2879 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
ae5d3372
DD
2880 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2881 dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2882 EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2883 EMAC_FTR_460EX_PHY_CLK_FIX);
2884 }
05781ccd 2885 } else if (of_device_is_compatible(np, "ibm,emac4")) {
1d3bb996 2886 dev->features |= EMAC_FTR_EMAC4;
0925ab5d
VB
2887 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2888 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
11121e30
VB
2889 } else {
2890 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2891 of_device_is_compatible(np, "ibm,emac-440gr"))
2892 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
1ff0fcfc 2893 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
3b3bceef 2894#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
b68d185a 2895 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
1ff0fcfc
JB
2896#else
2897 printk(KERN_ERR "%s: Flow control not disabled!\n",
2898 np->full_name);
2899 return -ENXIO;
2900#endif
2901 }
2902
0925ab5d 2903 }
bff713b5
BH
2904
2905 /* Fixup some feature bits based on the device tree */
2906 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
1d3bb996 2907 dev->features |= EMAC_FTR_STACR_OC_INVERT;
bff713b5
BH
2908 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2909 dev->features |= EMAC_FTR_HAS_NEW_STACR;
1d3bb996 2910
bff713b5
BH
2911 /* CAB lacks the appropriate properties */
2912 if (of_device_is_compatible(np, "ibm,emac-axon"))
2913 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2914 EMAC_FTR_STACR_OC_INVERT;
2915
2916 /* Enable TAH/ZMII/RGMII features as found */
1d3bb996 2917 if (dev->tah_ph != 0) {
3b3bceef 2918#ifdef CONFIG_IBM_EMAC_TAH
1d3bb996
DG
2919 dev->features |= EMAC_FTR_HAS_TAH;
2920#else
2921 printk(KERN_ERR "%s: TAH support not enabled !\n",
2922 np->full_name);
2923 return -ENXIO;
2924#endif
2925 }
2926
2927 if (dev->zmii_ph != 0) {
3b3bceef 2928#ifdef CONFIG_IBM_EMAC_ZMII
1d3bb996
DG
2929 dev->features |= EMAC_FTR_HAS_ZMII;
2930#else
2931 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2932 np->full_name);
2933 return -ENXIO;
2934#endif
2935 }
2936
2937 if (dev->rgmii_ph != 0) {
3b3bceef 2938#ifdef CONFIG_IBM_EMAC_RGMII
1d3bb996
DG
2939 dev->features |= EMAC_FTR_HAS_RGMII;
2940#else
2941 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2942 np->full_name);
2943 return -ENXIO;
2944#endif
2945 }
2946
2947 /* Read MAC-address */
2948 p = of_get_property(np, "local-mac-address", NULL);
2949 if (p == NULL) {
2950 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2951 np->full_name);
2952 return -ENXIO;
2953 }
d458cdf7 2954 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
1d3bb996 2955
05781ccd
GE
2956 /* IAHT and GAHT filter parameterization */
2957 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2958 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2959 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2960 } else {
2961 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2962 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2963 }
2964
1d3bb996
DG
2965 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2966 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2967 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2968 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2969 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2970
2971 return 0;
2972}
2973
15efc02b
AB
2974static const struct net_device_ops emac_netdev_ops = {
2975 .ndo_open = emac_open,
2976 .ndo_stop = emac_close,
2977 .ndo_get_stats = emac_stats,
afc4b13d 2978 .ndo_set_rx_mode = emac_set_multicast_list,
15efc02b
AB
2979 .ndo_do_ioctl = emac_ioctl,
2980 .ndo_tx_timeout = emac_tx_timeout,
2981 .ndo_validate_addr = eth_validate_addr,
01afd972 2982 .ndo_set_mac_address = emac_set_mac_address,
15efc02b 2983 .ndo_start_xmit = emac_start_xmit,
15efc02b
AB
2984};
2985
2986static const struct net_device_ops emac_gige_netdev_ops = {
2987 .ndo_open = emac_open,
2988 .ndo_stop = emac_close,
2989 .ndo_get_stats = emac_stats,
afc4b13d 2990 .ndo_set_rx_mode = emac_set_multicast_list,
15efc02b
AB
2991 .ndo_do_ioctl = emac_ioctl,
2992 .ndo_tx_timeout = emac_tx_timeout,
2993 .ndo_validate_addr = eth_validate_addr,
01afd972 2994 .ndo_set_mac_address = emac_set_mac_address,
15efc02b
AB
2995 .ndo_start_xmit = emac_start_xmit_sg,
2996 .ndo_change_mtu = emac_change_mtu,
2997};
2998
fe17dc1e 2999static int emac_probe(struct platform_device *ofdev)
1d3bb996
DG
3000{
3001 struct net_device *ndev;
3002 struct emac_instance *dev;
61c7a080 3003 struct device_node *np = ofdev->dev.of_node;
1d3bb996
DG
3004 struct device_node **blist = NULL;
3005 int err, i;
3006
be63c09a
JB
3007 /* Skip unused/unwired EMACS. We leave the check for an unused
3008 * property here for now, but new flat device trees should set a
3009 * status property to "disabled" instead.
3010 */
3011 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3d722562
HB
3012 return -ENODEV;
3013
1d3bb996
DG
3014 /* Find ourselves in the bootlist if we are there */
3015 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3016 if (emac_boot_list[i] == np)
3017 blist = &emac_boot_list[i];
3018
3019 /* Allocate our net_device structure */
3020 err = -ENOMEM;
3021 ndev = alloc_etherdev(sizeof(struct emac_instance));
41de8d4c 3022 if (!ndev)
1d3bb996 3023 goto err_gone;
41de8d4c 3024
1d3bb996
DG
3025 dev = netdev_priv(ndev);
3026 dev->ndev = ndev;
3027 dev->ofdev = ofdev;
3028 dev->blist = blist;
1d3bb996
DG
3029 SET_NETDEV_DEV(ndev, &ofdev->dev);
3030
3031 /* Initialize some embedded data structures */
3032 mutex_init(&dev->mdio_lock);
3033 mutex_init(&dev->link_lock);
3034 spin_lock_init(&dev->lock);
3035 INIT_WORK(&dev->reset_work, emac_reset_work);
3036
3037 /* Init various config data based on device-tree */
3038 err = emac_init_config(dev);
3039 if (err != 0)
3040 goto err_free;
3041
3042 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3043 dev->emac_irq = irq_of_parse_and_map(np, 0);
3044 dev->wol_irq = irq_of_parse_and_map(np, 1);
99c1790e 3045 if (!dev->emac_irq) {
1d3bb996
DG
3046 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
3047 goto err_free;
3048 }
3049 ndev->irq = dev->emac_irq;
3050
3051 /* Map EMAC regs */
3052 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
3053 printk(KERN_ERR "%s: Can't get registers address\n",
3054 np->full_name);
3055 goto err_irq_unmap;
3056 }
3057 // TODO : request_mem_region
05781ccd 3058 dev->emacp = ioremap(dev->rsrc_regs.start,
28f65c11 3059 resource_size(&dev->rsrc_regs));
1d3bb996
DG
3060 if (dev->emacp == NULL) {
3061 printk(KERN_ERR "%s: Can't map device registers!\n",
3062 np->full_name);
3063 err = -ENOMEM;
3064 goto err_irq_unmap;
3065 }
3066
3067 /* Wait for dependent devices */
3068 err = emac_wait_deps(dev);
3069 if (err) {
3070 printk(KERN_ERR
3071 "%s: Timeout waiting for dependent devices\n",
3072 np->full_name);
3073 /* display more info about what's missing ? */
3074 goto err_reg_unmap;
3075 }
bc353832 3076 dev->mal = platform_get_drvdata(dev->mal_dev);
1d3bb996 3077 if (dev->mdio_dev != NULL)
bc353832 3078 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
1d3bb996
DG
3079
3080 /* Register with MAL */
3081 dev->commac.ops = &emac_commac_ops;
3082 dev->commac.dev = dev;
3083 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3084 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3085 err = mal_register_commac(dev->mal, &dev->commac);
3086 if (err) {
3087 printk(KERN_ERR "%s: failed to register with mal %s!\n",
61c7a080 3088 np->full_name, dev->mal_dev->dev.of_node->full_name);
1d3bb996
DG
3089 goto err_rel_deps;
3090 }
3091 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3092 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3093
3094 /* Get pointers to BD rings */
3095 dev->tx_desc =
3096 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3097 dev->rx_desc =
3098 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3099
3100 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3101 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3102
3103 /* Clean rings */
3104 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3105 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
ab9b30cc
SN
3106 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3107 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
1d3bb996
DG
3108
3109 /* Attach to ZMII, if needed */
3110 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3111 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3112 goto err_unreg_commac;
3113
3114 /* Attach to RGMII, if needed */
3115 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3116 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3117 goto err_detach_zmii;
3118
3119 /* Attach to TAH, if needed */
3120 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3121 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3122 goto err_detach_rgmii;
3123
3124 /* Set some link defaults before we can find out real parameters */
3125 dev->phy.speed = SPEED_100;
3126 dev->phy.duplex = DUPLEX_FULL;
3127 dev->phy.autoneg = AUTONEG_DISABLE;
3128 dev->phy.pause = dev->phy.asym_pause = 0;
3129 dev->stop_timeout = STOP_TIMEOUT_100;
3130 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3131
ae5d3372
DD
3132 /* Some SoCs like APM821xx does not support Half Duplex mode. */
3133 if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3134 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3135 SUPPORTED_100baseT_Half |
3136 SUPPORTED_10baseT_Half);
3137 }
3138
1d3bb996
DG
3139 /* Find PHY if any */
3140 err = emac_init_phy(dev);
3141 if (err != 0)
3142 goto err_detach_tah;
3143
5e4011e2
MM
3144 if (dev->tah_dev) {
3145 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3146 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3147 }
1d3bb996 3148 ndev->watchdog_timeo = 5 * HZ;
1d3bb996 3149 if (emac_phy_supports_gige(dev->phy_mode)) {
15efc02b 3150 ndev->netdev_ops = &emac_gige_netdev_ops;
1d3bb996 3151 dev->commac.ops = &emac_commac_sg_ops;
15efc02b
AB
3152 } else
3153 ndev->netdev_ops = &emac_netdev_ops;
7ad24ea4 3154 ndev->ethtool_ops = &emac_ethtool_ops;
1d3bb996 3155
3d5d96ac
JW
3156 /* MTU range: 46 - 1500 or whatever is in OF */
3157 ndev->min_mtu = EMAC_MIN_MTU;
3158 ndev->max_mtu = dev->max_mtu;
3159
1d3bb996 3160 netif_carrier_off(ndev);
1d3bb996
DG
3161
3162 err = register_netdev(ndev);
3163 if (err) {
3164 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
3165 np->full_name, err);
3166 goto err_detach_tah;
3167 }
3168
3169 /* Set our drvdata last as we don't want them visible until we are
3170 * fully initialized
3171 */
3172 wmb();
bc353832 3173 platform_set_drvdata(ofdev, dev);
1d3bb996
DG
3174
3175 /* There's a new kid in town ! Let's tell everybody */
3176 wake_up_all(&emac_probe_wait);
3177
3178
7c510e4b
JB
3179 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
3180 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
1d3bb996 3181
9e3cb294
VG
3182 if (dev->phy_mode == PHY_MODE_SGMII)
3183 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3184
1d3bb996
DG
3185 if (dev->phy.address >= 0)
3186 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3187 dev->phy.def->name, dev->phy.address);
3188
1d3bb996
DG
3189 /* Life is good */
3190 return 0;
3191
3192 /* I have a bad feeling about this ... */
3193
3194 err_detach_tah:
3195 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3196 tah_detach(dev->tah_dev, dev->tah_port);
3197 err_detach_rgmii:
3198 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3199 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3200 err_detach_zmii:
3201 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3202 zmii_detach(dev->zmii_dev, dev->zmii_port);
3203 err_unreg_commac:
3204 mal_unregister_commac(dev->mal, &dev->commac);
3205 err_rel_deps:
3206 emac_put_deps(dev);
3207 err_reg_unmap:
3208 iounmap(dev->emacp);
3209 err_irq_unmap:
99c1790e 3210 if (dev->wol_irq)
1d3bb996 3211 irq_dispose_mapping(dev->wol_irq);
99c1790e 3212 if (dev->emac_irq)
1d3bb996
DG
3213 irq_dispose_mapping(dev->emac_irq);
3214 err_free:
52933f05 3215 free_netdev(ndev);
1d3bb996
DG
3216 err_gone:
3217 /* if we were on the bootlist, remove us as we won't show up and
3218 * wake up all waiters to notify them in case they were waiting
3219 * on us
3220 */
3221 if (blist) {
3222 *blist = NULL;
3223 wake_up_all(&emac_probe_wait);
3224 }
3225 return err;
3226}
3227
fe17dc1e 3228static int emac_remove(struct platform_device *ofdev)
1d3bb996 3229{
bc353832 3230 struct emac_instance *dev = platform_get_drvdata(ofdev);
1d3bb996
DG
3231
3232 DBG(dev, "remove" NL);
3233
1d3bb996
DG
3234 unregister_netdev(dev->ndev);
3235
23f333a2 3236 cancel_work_sync(&dev->reset_work);
61dbcece 3237
1d3bb996
DG
3238 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3239 tah_detach(dev->tah_dev, dev->tah_port);
3240 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3241 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3242 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3243 zmii_detach(dev->zmii_dev, dev->zmii_port);
3244
a577ca6b
CL
3245 if (dev->phy_dev)
3246 phy_disconnect(dev->phy_dev);
3247
3248 if (dev->mii_bus)
3249 mdiobus_unregister(dev->mii_bus);
3250
d6f14483
WG
3251 busy_phy_map &= ~(1 << dev->phy.address);
3252 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3253
1d3bb996
DG
3254 mal_unregister_commac(dev->mal, &dev->commac);
3255 emac_put_deps(dev);
3256
1d3bb996
DG
3257 iounmap(dev->emacp);
3258
99c1790e 3259 if (dev->wol_irq)
1d3bb996 3260 irq_dispose_mapping(dev->wol_irq);
99c1790e 3261 if (dev->emac_irq)
1d3bb996
DG
3262 irq_dispose_mapping(dev->emac_irq);
3263
52933f05 3264 free_netdev(dev->ndev);
1d3bb996
DG
3265
3266 return 0;
3267}
3268
3269/* XXX Features in here should be replaced by properties... */
47b61667 3270static const struct of_device_id emac_match[] =
1d3bb996
DG
3271{
3272 {
3273 .type = "network",
3274 .compatible = "ibm,emac",
3275 },
3276 {
3277 .type = "network",
3278 .compatible = "ibm,emac4",
3279 },
05781ccd
GE
3280 {
3281 .type = "network",
3282 .compatible = "ibm,emac4sync",
3283 },
1d3bb996
DG
3284 {},
3285};
e72701ac 3286MODULE_DEVICE_TABLE(of, emac_match);
1d3bb996 3287
74888760 3288static struct platform_driver emac_driver = {
4018294b
GL
3289 .driver = {
3290 .name = "emac",
4018294b
GL
3291 .of_match_table = emac_match,
3292 },
1d3bb996
DG
3293 .probe = emac_probe,
3294 .remove = emac_remove,
3295};
3296
3297static void __init emac_make_bootlist(void)
3298{
3299 struct device_node *np = NULL;
c9003ec8 3300 int j, max, i = 0;
1d3bb996
DG
3301 int cell_indices[EMAC_BOOT_LIST_SIZE];
3302
3303 /* Collect EMACs */
3304 while((np = of_find_all_nodes(np)) != NULL) {
3305 const u32 *idx;
3306
3307 if (of_match_node(emac_match, np) == NULL)
3308 continue;
3309 if (of_get_property(np, "unused", NULL))
3310 continue;
3311 idx = of_get_property(np, "cell-index", NULL);
3312 if (idx == NULL)
3313 continue;
3314 cell_indices[i] = *idx;
3315 emac_boot_list[i++] = of_node_get(np);
3316 if (i >= EMAC_BOOT_LIST_SIZE) {
3317 of_node_put(np);
3318 break;
3319 }
3320 }
3321 max = i;
3322
3323 /* Bubble sort them (doh, what a creative algorithm :-) */
3324 for (i = 0; max > 1 && (i < (max - 1)); i++)
3325 for (j = i; j < max; j++) {
3326 if (cell_indices[i] > cell_indices[j]) {
c9003ec8
FF
3327 swap(emac_boot_list[i], emac_boot_list[j]);
3328 swap(cell_indices[i], cell_indices[j]);
1d3bb996
DG
3329 }
3330 }
3331}
3332
3333static int __init emac_init(void)
3334{
3335 int rc;
3336
3337 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3338
1d3bb996
DG
3339 /* Build EMAC boot list */
3340 emac_make_bootlist();
3341
3342 /* Init submodules */
3343 rc = mal_init();
3344 if (rc)
3345 goto err;
3346 rc = zmii_init();
3347 if (rc)
3348 goto err_mal;
3349 rc = rgmii_init();
3350 if (rc)
3351 goto err_zmii;
3352 rc = tah_init();
3353 if (rc)
3354 goto err_rgmii;
74888760 3355 rc = platform_driver_register(&emac_driver);
1d3bb996
DG
3356 if (rc)
3357 goto err_tah;
3358
3359 return 0;
3360
3361 err_tah:
3362 tah_exit();
3363 err_rgmii:
3364 rgmii_exit();
3365 err_zmii:
3366 zmii_exit();
3367 err_mal:
3368 mal_exit();
3369 err:
3370 return rc;
3371}
3372
3373static void __exit emac_exit(void)
3374{
3375 int i;
3376
74888760 3377 platform_driver_unregister(&emac_driver);
1d3bb996
DG
3378
3379 tah_exit();
3380 rgmii_exit();
3381 zmii_exit();
3382 mal_exit();
1d3bb996
DG
3383
3384 /* Destroy EMAC boot list */
3385 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
5ac0fd00 3386 of_node_put(emac_boot_list[i]);
1d3bb996
DG
3387}
3388
3389module_init(emac_init);
3390module_exit(emac_exit);