]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | |
3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | |
4 | * | |
7dd6a2aa | 5 | * Right now, I am very wasteful with the buffers. I allocate memory |
1da177e4 LT |
6 | * pages and then divide them into 2K frame buffers. This way I know I |
7 | * have buffers large enough to hold one frame within one buffer descriptor. | |
8 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | |
9 | * will be much more memory efficient and will easily handle lots of | |
10 | * small packets. | |
11 | * | |
12 | * Much better multiple PHY support by Magnus Damm. | |
13 | * Copyright (c) 2000 Ericsson Radio Systems AB. | |
14 | * | |
562d2f8c GU |
15 | * Support for FEC controller of ColdFire processors. |
16 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) | |
7dd6a2aa GU |
17 | * |
18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | |
677177c5 | 19 | * Copyright (c) 2004-2006 Macq Electronique SA. |
1da177e4 LT |
20 | */ |
21 | ||
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
24 | #include <linux/string.h> | |
25 | #include <linux/ptrace.h> | |
26 | #include <linux/errno.h> | |
27 | #include <linux/ioport.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/workqueue.h> | |
38 | #include <linux/bitops.h> | |
6f501b17 SH |
39 | #include <linux/io.h> |
40 | #include <linux/irq.h> | |
196719ec | 41 | #include <linux/clk.h> |
ead73183 | 42 | #include <linux/platform_device.h> |
e6b043d5 | 43 | #include <linux/phy.h> |
5eb32bd0 | 44 | #include <linux/fec.h> |
1da177e4 | 45 | |
080853af | 46 | #include <asm/cacheflush.h> |
196719ec SH |
47 | |
48 | #ifndef CONFIG_ARCH_MXC | |
1da177e4 LT |
49 | #include <asm/coldfire.h> |
50 | #include <asm/mcfsim.h> | |
196719ec | 51 | #endif |
6f501b17 | 52 | |
1da177e4 | 53 | #include "fec.h" |
1da177e4 | 54 | |
196719ec SH |
55 | #ifdef CONFIG_ARCH_MXC |
56 | #include <mach/hardware.h> | |
57 | #define FEC_ALIGNMENT 0xf | |
58 | #else | |
59 | #define FEC_ALIGNMENT 0x3 | |
60 | #endif | |
61 | ||
ead73183 SH |
62 | /* |
63 | * Define the fixed address of the FEC hardware. | |
64 | */ | |
87f4abb4 | 65 | #if defined(CONFIG_M5272) |
1da177e4 LT |
66 | |
67 | static unsigned char fec_mac_default[] = { | |
68 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
69 | }; | |
70 | ||
71 | /* | |
72 | * Some hardware gets it MAC address out of local flash memory. | |
73 | * if this is non-zero then assume it is the address to get MAC from. | |
74 | */ | |
75 | #if defined(CONFIG_NETtel) | |
76 | #define FEC_FLASHMAC 0xf0006006 | |
77 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) | |
78 | #define FEC_FLASHMAC 0xf0006000 | |
1da177e4 LT |
79 | #elif defined(CONFIG_CANCam) |
80 | #define FEC_FLASHMAC 0xf0020000 | |
7dd6a2aa GU |
81 | #elif defined (CONFIG_M5272C3) |
82 | #define FEC_FLASHMAC (0xffe04000 + 4) | |
83 | #elif defined(CONFIG_MOD5272) | |
84 | #define FEC_FLASHMAC 0xffc0406b | |
1da177e4 LT |
85 | #else |
86 | #define FEC_FLASHMAC 0 | |
87 | #endif | |
43be6366 | 88 | #endif /* CONFIG_M5272 */ |
ead73183 | 89 | |
1da177e4 LT |
90 | /* The number of Tx and Rx buffers. These are allocated from the page |
91 | * pool. The code may assume these are power of two, so it it best | |
92 | * to keep them that size. | |
93 | * We don't need to allocate pages for the transmitter. We just use | |
94 | * the skbuffer directly. | |
95 | */ | |
96 | #define FEC_ENET_RX_PAGES 8 | |
97 | #define FEC_ENET_RX_FRSIZE 2048 | |
98 | #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) | |
99 | #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) | |
100 | #define FEC_ENET_TX_FRSIZE 2048 | |
101 | #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) | |
102 | #define TX_RING_SIZE 16 /* Must be power of two */ | |
103 | #define TX_RING_MOD_MASK 15 /* for this to work */ | |
104 | ||
562d2f8c | 105 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) |
6b265293 | 106 | #error "FEC: descriptor ring size constants too large" |
562d2f8c GU |
107 | #endif |
108 | ||
22f6b860 | 109 | /* Interrupt events/masks. */ |
1da177e4 LT |
110 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ |
111 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | |
112 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | |
113 | #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ | |
114 | #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ | |
115 | #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ | |
116 | #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ | |
117 | #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ | |
118 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ | |
119 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ | |
120 | ||
121 | /* The FEC stores dest/src/type, data, and checksum for receive packets. | |
122 | */ | |
123 | #define PKT_MAXBUF_SIZE 1518 | |
124 | #define PKT_MINBUF_SIZE 64 | |
125 | #define PKT_MAXBLR_SIZE 1520 | |
126 | ||
127 | ||
128 | /* | |
6b265293 | 129 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame |
1da177e4 LT |
130 | * size bits. Other FEC hardware does not, so we need to take that into |
131 | * account when setting it. | |
132 | */ | |
562d2f8c | 133 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
196719ec | 134 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) |
1da177e4 LT |
135 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
136 | #else | |
137 | #define OPT_FRAME_SIZE 0 | |
138 | #endif | |
139 | ||
140 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and | |
141 | * tx_bd_base always point to the base of the buffer descriptors. The | |
142 | * cur_rx and cur_tx point to the currently available buffer. | |
143 | * The dirty_tx tracks the current buffer that is being sent by the | |
144 | * controller. The cur_tx and dirty_tx are equal under both completely | |
145 | * empty and completely full conditions. The empty/ready indicator in | |
146 | * the buffer descriptor determines the actual condition. | |
147 | */ | |
148 | struct fec_enet_private { | |
149 | /* Hardware registers of the FEC device */ | |
f44d6305 | 150 | void __iomem *hwp; |
1da177e4 | 151 | |
cb84d6e7 GU |
152 | struct net_device *netdev; |
153 | ||
ead73183 SH |
154 | struct clk *clk; |
155 | ||
1da177e4 LT |
156 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
157 | unsigned char *tx_bounce[TX_RING_SIZE]; | |
158 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | |
f0b3fbea | 159 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; |
1da177e4 LT |
160 | ushort skb_cur; |
161 | ushort skb_dirty; | |
162 | ||
22f6b860 | 163 | /* CPM dual port RAM relative addresses */ |
4661e75b | 164 | dma_addr_t bd_dma; |
22f6b860 | 165 | /* Address of Rx and Tx buffers */ |
2e28532f SH |
166 | struct bufdesc *rx_bd_base; |
167 | struct bufdesc *tx_bd_base; | |
168 | /* The next free ring entry */ | |
169 | struct bufdesc *cur_rx, *cur_tx; | |
22f6b860 | 170 | /* The ring entries to be free()ed */ |
2e28532f SH |
171 | struct bufdesc *dirty_tx; |
172 | ||
1da177e4 | 173 | uint tx_full; |
3b2b74ca SS |
174 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ |
175 | spinlock_t hw_lock; | |
1da177e4 | 176 | |
e6b043d5 | 177 | struct platform_device *pdev; |
1da177e4 | 178 | |
e6b043d5 | 179 | int opened; |
1da177e4 | 180 | |
e6b043d5 BW |
181 | /* Phylib and MDIO interface */ |
182 | struct mii_bus *mii_bus; | |
183 | struct phy_device *phy_dev; | |
184 | int mii_timeout; | |
185 | uint phy_speed; | |
5eb32bd0 | 186 | phy_interface_t phy_interface; |
1da177e4 | 187 | int index; |
1da177e4 | 188 | int link; |
1da177e4 | 189 | int full_duplex; |
1da177e4 LT |
190 | }; |
191 | ||
7d12e780 | 192 | static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); |
1da177e4 LT |
193 | static void fec_enet_tx(struct net_device *dev); |
194 | static void fec_enet_rx(struct net_device *dev); | |
195 | static int fec_enet_close(struct net_device *dev); | |
1da177e4 LT |
196 | static void fec_restart(struct net_device *dev, int duplex); |
197 | static void fec_stop(struct net_device *dev); | |
1da177e4 | 198 | |
e6b043d5 BW |
199 | /* FEC MII MMFR bits definition */ |
200 | #define FEC_MMFR_ST (1 << 30) | |
201 | #define FEC_MMFR_OP_READ (2 << 28) | |
202 | #define FEC_MMFR_OP_WRITE (1 << 28) | |
203 | #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) | |
204 | #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) | |
205 | #define FEC_MMFR_TA (2 << 16) | |
206 | #define FEC_MMFR_DATA(v) (v & 0xffff) | |
1da177e4 | 207 | |
e6b043d5 | 208 | #define FEC_MII_TIMEOUT 10000 |
1da177e4 | 209 | |
22f6b860 SH |
210 | /* Transmitter timeout */ |
211 | #define TX_TIMEOUT (2 * HZ) | |
1da177e4 | 212 | |
1da177e4 LT |
213 | static int |
214 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
215 | { | |
f44d6305 | 216 | struct fec_enet_private *fep = netdev_priv(dev); |
2e28532f | 217 | struct bufdesc *bdp; |
9555b31e | 218 | void *bufaddr; |
0e702ab3 | 219 | unsigned short status; |
3b2b74ca | 220 | unsigned long flags; |
1da177e4 | 221 | |
1da177e4 LT |
222 | if (!fep->link) { |
223 | /* Link is down or autonegotiation is in progress. */ | |
5b548140 | 224 | return NETDEV_TX_BUSY; |
1da177e4 LT |
225 | } |
226 | ||
3b2b74ca | 227 | spin_lock_irqsave(&fep->hw_lock, flags); |
1da177e4 LT |
228 | /* Fill in a Tx ring entry */ |
229 | bdp = fep->cur_tx; | |
230 | ||
0e702ab3 | 231 | status = bdp->cbd_sc; |
22f6b860 | 232 | |
0e702ab3 | 233 | if (status & BD_ENET_TX_READY) { |
1da177e4 LT |
234 | /* Ooops. All transmit buffers are full. Bail out. |
235 | * This should not happen, since dev->tbusy should be set. | |
236 | */ | |
237 | printk("%s: tx queue full!.\n", dev->name); | |
3b2b74ca | 238 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
5b548140 | 239 | return NETDEV_TX_BUSY; |
1da177e4 | 240 | } |
1da177e4 | 241 | |
22f6b860 | 242 | /* Clear all of the status flags */ |
0e702ab3 | 243 | status &= ~BD_ENET_TX_STATS; |
1da177e4 | 244 | |
22f6b860 | 245 | /* Set buffer length and buffer pointer */ |
9555b31e | 246 | bufaddr = skb->data; |
1da177e4 LT |
247 | bdp->cbd_datlen = skb->len; |
248 | ||
249 | /* | |
22f6b860 SH |
250 | * On some FEC implementations data must be aligned on |
251 | * 4-byte boundaries. Use bounce buffers to copy data | |
252 | * and get it aligned. Ugh. | |
1da177e4 | 253 | */ |
9555b31e | 254 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { |
1da177e4 LT |
255 | unsigned int index; |
256 | index = bdp - fep->tx_bd_base; | |
6989f512 | 257 | memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); |
9555b31e | 258 | bufaddr = fep->tx_bounce[index]; |
1da177e4 LT |
259 | } |
260 | ||
22f6b860 | 261 | /* Save skb pointer */ |
1da177e4 LT |
262 | fep->tx_skbuff[fep->skb_cur] = skb; |
263 | ||
09f75cd7 | 264 | dev->stats.tx_bytes += skb->len; |
1da177e4 | 265 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; |
6aa20a22 | 266 | |
1da177e4 LT |
267 | /* Push the data cache so the CPM does not get stale memory |
268 | * data. | |
269 | */ | |
9555b31e | 270 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, |
f0b3fbea | 271 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
1da177e4 | 272 | |
0e702ab3 GU |
273 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
274 | * it's the last BD of the frame, and to put the CRC on the end. | |
1da177e4 | 275 | */ |
0e702ab3 | 276 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR |
1da177e4 | 277 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
0e702ab3 | 278 | bdp->cbd_sc = status; |
1da177e4 | 279 | |
1da177e4 | 280 | /* Trigger transmission start */ |
f44d6305 | 281 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
1da177e4 | 282 | |
22f6b860 SH |
283 | /* If this was the last BD in the ring, start at the beginning again. */ |
284 | if (status & BD_ENET_TX_WRAP) | |
1da177e4 | 285 | bdp = fep->tx_bd_base; |
22f6b860 | 286 | else |
1da177e4 | 287 | bdp++; |
1da177e4 LT |
288 | |
289 | if (bdp == fep->dirty_tx) { | |
290 | fep->tx_full = 1; | |
291 | netif_stop_queue(dev); | |
292 | } | |
293 | ||
2e28532f | 294 | fep->cur_tx = bdp; |
1da177e4 | 295 | |
3b2b74ca | 296 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
1da177e4 | 297 | |
6ed10654 | 298 | return NETDEV_TX_OK; |
1da177e4 LT |
299 | } |
300 | ||
301 | static void | |
302 | fec_timeout(struct net_device *dev) | |
303 | { | |
304 | struct fec_enet_private *fep = netdev_priv(dev); | |
305 | ||
09f75cd7 | 306 | dev->stats.tx_errors++; |
1da177e4 | 307 | |
7dd6a2aa | 308 | fec_restart(dev, fep->full_duplex); |
1da177e4 LT |
309 | netif_wake_queue(dev); |
310 | } | |
311 | ||
1da177e4 | 312 | static irqreturn_t |
7d12e780 | 313 | fec_enet_interrupt(int irq, void * dev_id) |
1da177e4 LT |
314 | { |
315 | struct net_device *dev = dev_id; | |
f44d6305 | 316 | struct fec_enet_private *fep = netdev_priv(dev); |
1da177e4 | 317 | uint int_events; |
3b2b74ca | 318 | irqreturn_t ret = IRQ_NONE; |
1da177e4 | 319 | |
3b2b74ca | 320 | do { |
f44d6305 SH |
321 | int_events = readl(fep->hwp + FEC_IEVENT); |
322 | writel(int_events, fep->hwp + FEC_IEVENT); | |
1da177e4 | 323 | |
1da177e4 | 324 | if (int_events & FEC_ENET_RXF) { |
3b2b74ca | 325 | ret = IRQ_HANDLED; |
1da177e4 LT |
326 | fec_enet_rx(dev); |
327 | } | |
328 | ||
329 | /* Transmit OK, or non-fatal error. Update the buffer | |
f44d6305 SH |
330 | * descriptors. FEC handles all errors, we just discover |
331 | * them as part of the transmit process. | |
332 | */ | |
1da177e4 | 333 | if (int_events & FEC_ENET_TXF) { |
3b2b74ca | 334 | ret = IRQ_HANDLED; |
1da177e4 LT |
335 | fec_enet_tx(dev); |
336 | } | |
3b2b74ca SS |
337 | } while (int_events); |
338 | ||
339 | return ret; | |
1da177e4 LT |
340 | } |
341 | ||
342 | ||
343 | static void | |
344 | fec_enet_tx(struct net_device *dev) | |
345 | { | |
346 | struct fec_enet_private *fep; | |
2e28532f | 347 | struct bufdesc *bdp; |
0e702ab3 | 348 | unsigned short status; |
1da177e4 LT |
349 | struct sk_buff *skb; |
350 | ||
351 | fep = netdev_priv(dev); | |
81538e74 | 352 | spin_lock(&fep->hw_lock); |
1da177e4 LT |
353 | bdp = fep->dirty_tx; |
354 | ||
0e702ab3 | 355 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
f0b3fbea SH |
356 | if (bdp == fep->cur_tx && fep->tx_full == 0) |
357 | break; | |
358 | ||
359 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | |
360 | bdp->cbd_bufaddr = 0; | |
1da177e4 LT |
361 | |
362 | skb = fep->tx_skbuff[fep->skb_dirty]; | |
363 | /* Check for errors. */ | |
0e702ab3 | 364 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
1da177e4 LT |
365 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
366 | BD_ENET_TX_CSL)) { | |
09f75cd7 | 367 | dev->stats.tx_errors++; |
0e702ab3 | 368 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
09f75cd7 | 369 | dev->stats.tx_heartbeat_errors++; |
0e702ab3 | 370 | if (status & BD_ENET_TX_LC) /* Late collision */ |
09f75cd7 | 371 | dev->stats.tx_window_errors++; |
0e702ab3 | 372 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
09f75cd7 | 373 | dev->stats.tx_aborted_errors++; |
0e702ab3 | 374 | if (status & BD_ENET_TX_UN) /* Underrun */ |
09f75cd7 | 375 | dev->stats.tx_fifo_errors++; |
0e702ab3 | 376 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
09f75cd7 | 377 | dev->stats.tx_carrier_errors++; |
1da177e4 | 378 | } else { |
09f75cd7 | 379 | dev->stats.tx_packets++; |
1da177e4 LT |
380 | } |
381 | ||
0e702ab3 | 382 | if (status & BD_ENET_TX_READY) |
1da177e4 | 383 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
22f6b860 | 384 | |
1da177e4 LT |
385 | /* Deferred means some collisions occurred during transmit, |
386 | * but we eventually sent the packet OK. | |
387 | */ | |
0e702ab3 | 388 | if (status & BD_ENET_TX_DEF) |
09f75cd7 | 389 | dev->stats.collisions++; |
6aa20a22 | 390 | |
22f6b860 | 391 | /* Free the sk buffer associated with this last transmit */ |
1da177e4 LT |
392 | dev_kfree_skb_any(skb); |
393 | fep->tx_skbuff[fep->skb_dirty] = NULL; | |
394 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | |
6aa20a22 | 395 | |
22f6b860 | 396 | /* Update pointer to next buffer descriptor to be transmitted */ |
0e702ab3 | 397 | if (status & BD_ENET_TX_WRAP) |
1da177e4 LT |
398 | bdp = fep->tx_bd_base; |
399 | else | |
400 | bdp++; | |
6aa20a22 | 401 | |
22f6b860 | 402 | /* Since we have freed up a buffer, the ring is no longer full |
1da177e4 LT |
403 | */ |
404 | if (fep->tx_full) { | |
405 | fep->tx_full = 0; | |
406 | if (netif_queue_stopped(dev)) | |
407 | netif_wake_queue(dev); | |
408 | } | |
409 | } | |
2e28532f | 410 | fep->dirty_tx = bdp; |
81538e74 | 411 | spin_unlock(&fep->hw_lock); |
1da177e4 LT |
412 | } |
413 | ||
414 | ||
415 | /* During a receive, the cur_rx points to the current incoming buffer. | |
416 | * When we update through the ring, if the next incoming buffer has | |
417 | * not been given to the system, we just set the empty indicator, | |
418 | * effectively tossing the packet. | |
419 | */ | |
420 | static void | |
421 | fec_enet_rx(struct net_device *dev) | |
422 | { | |
f44d6305 | 423 | struct fec_enet_private *fep = netdev_priv(dev); |
2e28532f | 424 | struct bufdesc *bdp; |
0e702ab3 | 425 | unsigned short status; |
1da177e4 LT |
426 | struct sk_buff *skb; |
427 | ushort pkt_len; | |
428 | __u8 *data; | |
6aa20a22 | 429 | |
0e702ab3 GU |
430 | #ifdef CONFIG_M532x |
431 | flush_cache_all(); | |
6aa20a22 | 432 | #endif |
1da177e4 | 433 | |
81538e74 | 434 | spin_lock(&fep->hw_lock); |
3b2b74ca | 435 | |
1da177e4 LT |
436 | /* First, grab all of the stats for the incoming packet. |
437 | * These get messed up if we get called due to a busy condition. | |
438 | */ | |
439 | bdp = fep->cur_rx; | |
440 | ||
22f6b860 | 441 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
1da177e4 | 442 | |
22f6b860 SH |
443 | /* Since we have allocated space to hold a complete frame, |
444 | * the last indicator should be set. | |
445 | */ | |
446 | if ((status & BD_ENET_RX_LAST) == 0) | |
447 | printk("FEC ENET: rcv is not +last\n"); | |
1da177e4 | 448 | |
22f6b860 SH |
449 | if (!fep->opened) |
450 | goto rx_processing_done; | |
1da177e4 | 451 | |
22f6b860 SH |
452 | /* Check for errors. */ |
453 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | |
1da177e4 | 454 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
22f6b860 SH |
455 | dev->stats.rx_errors++; |
456 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | |
457 | /* Frame too long or too short. */ | |
458 | dev->stats.rx_length_errors++; | |
459 | } | |
460 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | |
461 | dev->stats.rx_frame_errors++; | |
462 | if (status & BD_ENET_RX_CR) /* CRC Error */ | |
463 | dev->stats.rx_crc_errors++; | |
464 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | |
465 | dev->stats.rx_fifo_errors++; | |
1da177e4 | 466 | } |
1da177e4 | 467 | |
22f6b860 SH |
468 | /* Report late collisions as a frame error. |
469 | * On this error, the BD is closed, but we don't know what we | |
470 | * have in the buffer. So, just drop this frame on the floor. | |
471 | */ | |
472 | if (status & BD_ENET_RX_CL) { | |
473 | dev->stats.rx_errors++; | |
474 | dev->stats.rx_frame_errors++; | |
475 | goto rx_processing_done; | |
476 | } | |
1da177e4 | 477 | |
22f6b860 SH |
478 | /* Process the incoming frame. */ |
479 | dev->stats.rx_packets++; | |
480 | pkt_len = bdp->cbd_datlen; | |
481 | dev->stats.rx_bytes += pkt_len; | |
482 | data = (__u8*)__va(bdp->cbd_bufaddr); | |
1da177e4 | 483 | |
f0b3fbea SH |
484 | dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, |
485 | DMA_FROM_DEVICE); | |
ccdc4f19 | 486 | |
22f6b860 SH |
487 | /* This does 16 byte alignment, exactly what we need. |
488 | * The packet length includes FCS, but we don't want to | |
489 | * include that when passing upstream as it messes up | |
490 | * bridging applications. | |
491 | */ | |
8549889c | 492 | skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); |
1da177e4 | 493 | |
8549889c | 494 | if (unlikely(!skb)) { |
22f6b860 SH |
495 | printk("%s: Memory squeeze, dropping packet.\n", |
496 | dev->name); | |
497 | dev->stats.rx_dropped++; | |
498 | } else { | |
8549889c | 499 | skb_reserve(skb, NET_IP_ALIGN); |
22f6b860 SH |
500 | skb_put(skb, pkt_len - 4); /* Make room */ |
501 | skb_copy_to_linear_data(skb, data, pkt_len - 4); | |
502 | skb->protocol = eth_type_trans(skb, dev); | |
503 | netif_rx(skb); | |
504 | } | |
f0b3fbea SH |
505 | |
506 | bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, | |
507 | DMA_FROM_DEVICE); | |
22f6b860 SH |
508 | rx_processing_done: |
509 | /* Clear the status flags for this buffer */ | |
510 | status &= ~BD_ENET_RX_STATS; | |
1da177e4 | 511 | |
22f6b860 SH |
512 | /* Mark the buffer empty */ |
513 | status |= BD_ENET_RX_EMPTY; | |
514 | bdp->cbd_sc = status; | |
6aa20a22 | 515 | |
22f6b860 SH |
516 | /* Update BD pointer to next entry */ |
517 | if (status & BD_ENET_RX_WRAP) | |
518 | bdp = fep->rx_bd_base; | |
519 | else | |
520 | bdp++; | |
521 | /* Doing this here will keep the FEC running while we process | |
522 | * incoming frames. On a heavily loaded network, we should be | |
523 | * able to keep up at the expense of system resources. | |
524 | */ | |
525 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | |
526 | } | |
2e28532f | 527 | fep->cur_rx = bdp; |
1da177e4 | 528 | |
81538e74 | 529 | spin_unlock(&fep->hw_lock); |
1da177e4 LT |
530 | } |
531 | ||
e6b043d5 BW |
532 | /* ------------------------------------------------------------------------- */ |
533 | #ifdef CONFIG_M5272 | |
534 | static void __inline__ fec_get_mac(struct net_device *dev) | |
1da177e4 | 535 | { |
e6b043d5 BW |
536 | struct fec_enet_private *fep = netdev_priv(dev); |
537 | unsigned char *iap, tmpaddr[ETH_ALEN]; | |
1da177e4 | 538 | |
e6b043d5 BW |
539 | if (FEC_FLASHMAC) { |
540 | /* | |
541 | * Get MAC address from FLASH. | |
542 | * If it is all 1's or 0's, use the default. | |
543 | */ | |
544 | iap = (unsigned char *)FEC_FLASHMAC; | |
545 | if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && | |
546 | (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) | |
547 | iap = fec_mac_default; | |
548 | if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && | |
549 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) | |
550 | iap = fec_mac_default; | |
f909b1ef | 551 | } else { |
e6b043d5 BW |
552 | *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); |
553 | *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | |
554 | iap = &tmpaddr[0]; | |
1da177e4 LT |
555 | } |
556 | ||
e6b043d5 | 557 | memcpy(dev->dev_addr, iap, ETH_ALEN); |
1da177e4 | 558 | |
e6b043d5 BW |
559 | /* Adjust MAC if using default MAC address */ |
560 | if (iap == fec_mac_default) | |
561 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | |
1da177e4 | 562 | } |
e6b043d5 | 563 | #endif |
1da177e4 | 564 | |
e6b043d5 | 565 | /* ------------------------------------------------------------------------- */ |
1da177e4 | 566 | |
e6b043d5 BW |
567 | /* |
568 | * Phy section | |
569 | */ | |
570 | static void fec_enet_adjust_link(struct net_device *dev) | |
1da177e4 LT |
571 | { |
572 | struct fec_enet_private *fep = netdev_priv(dev); | |
e6b043d5 BW |
573 | struct phy_device *phy_dev = fep->phy_dev; |
574 | unsigned long flags; | |
1da177e4 | 575 | |
e6b043d5 | 576 | int status_change = 0; |
1da177e4 | 577 | |
e6b043d5 | 578 | spin_lock_irqsave(&fep->hw_lock, flags); |
1da177e4 | 579 | |
e6b043d5 BW |
580 | /* Prevent a state halted on mii error */ |
581 | if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { | |
582 | phy_dev->state = PHY_RESUMING; | |
583 | goto spin_unlock; | |
584 | } | |
1da177e4 | 585 | |
e6b043d5 BW |
586 | /* Duplex link change */ |
587 | if (phy_dev->link) { | |
588 | if (fep->full_duplex != phy_dev->duplex) { | |
589 | fec_restart(dev, phy_dev->duplex); | |
590 | status_change = 1; | |
591 | } | |
592 | } | |
1da177e4 | 593 | |
e6b043d5 BW |
594 | /* Link on or off change */ |
595 | if (phy_dev->link != fep->link) { | |
596 | fep->link = phy_dev->link; | |
597 | if (phy_dev->link) | |
598 | fec_restart(dev, phy_dev->duplex); | |
1da177e4 | 599 | else |
e6b043d5 BW |
600 | fec_stop(dev); |
601 | status_change = 1; | |
1da177e4 | 602 | } |
6aa20a22 | 603 | |
e6b043d5 BW |
604 | spin_unlock: |
605 | spin_unlock_irqrestore(&fep->hw_lock, flags); | |
1da177e4 | 606 | |
e6b043d5 BW |
607 | if (status_change) |
608 | phy_print_status(phy_dev); | |
609 | } | |
1da177e4 | 610 | |
6aa20a22 | 611 | /* |
e6b043d5 | 612 | * NOTE: a MII transaction is during around 25 us, so polling it... |
1da177e4 | 613 | */ |
e6b043d5 | 614 | static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
1da177e4 | 615 | { |
e6b043d5 BW |
616 | struct fec_enet_private *fep = bus->priv; |
617 | int timeout = FEC_MII_TIMEOUT; | |
1da177e4 | 618 | |
e6b043d5 | 619 | fep->mii_timeout = 0; |
1da177e4 | 620 | |
e6b043d5 BW |
621 | /* clear MII end of transfer bit*/ |
622 | writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); | |
623 | ||
624 | /* start a read op */ | |
625 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | |
626 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | |
627 | FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); | |
628 | ||
629 | /* wait for end of transfer */ | |
630 | while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { | |
631 | cpu_relax(); | |
632 | if (timeout-- < 0) { | |
633 | fep->mii_timeout = 1; | |
634 | printk(KERN_ERR "FEC: MDIO read timeout\n"); | |
635 | return -ETIMEDOUT; | |
636 | } | |
1da177e4 | 637 | } |
1da177e4 | 638 | |
e6b043d5 BW |
639 | /* return value */ |
640 | return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); | |
7dd6a2aa | 641 | } |
6aa20a22 | 642 | |
e6b043d5 BW |
643 | static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
644 | u16 value) | |
1da177e4 | 645 | { |
e6b043d5 BW |
646 | struct fec_enet_private *fep = bus->priv; |
647 | int timeout = FEC_MII_TIMEOUT; | |
1da177e4 | 648 | |
e6b043d5 | 649 | fep->mii_timeout = 0; |
7dd6a2aa | 650 | |
e6b043d5 BW |
651 | /* clear MII end of transfer bit*/ |
652 | writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); | |
1da177e4 | 653 | |
e6b043d5 BW |
654 | /* start a read op */ |
655 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | |
656 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | |
657 | FEC_MMFR_TA | FEC_MMFR_DATA(value), | |
658 | fep->hwp + FEC_MII_DATA); | |
659 | ||
660 | /* wait for end of transfer */ | |
661 | while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { | |
662 | cpu_relax(); | |
663 | if (timeout-- < 0) { | |
664 | fep->mii_timeout = 1; | |
665 | printk(KERN_ERR "FEC: MDIO write timeout\n"); | |
666 | return -ETIMEDOUT; | |
667 | } | |
668 | } | |
1da177e4 | 669 | |
e6b043d5 BW |
670 | return 0; |
671 | } | |
1da177e4 | 672 | |
e6b043d5 | 673 | static int fec_enet_mdio_reset(struct mii_bus *bus) |
1da177e4 | 674 | { |
e6b043d5 | 675 | return 0; |
1da177e4 LT |
676 | } |
677 | ||
e6b043d5 | 678 | static int fec_enet_mii_probe(struct net_device *dev) |
562d2f8c | 679 | { |
4cf1653a | 680 | struct fec_enet_private *fep = netdev_priv(dev); |
e6b043d5 BW |
681 | struct phy_device *phy_dev = NULL; |
682 | int phy_addr; | |
562d2f8c | 683 | |
e6b043d5 BW |
684 | /* find the first phy */ |
685 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | |
686 | if (fep->mii_bus->phy_map[phy_addr]) { | |
687 | phy_dev = fep->mii_bus->phy_map[phy_addr]; | |
688 | break; | |
689 | } | |
690 | } | |
562d2f8c | 691 | |
e6b043d5 BW |
692 | if (!phy_dev) { |
693 | printk(KERN_ERR "%s: no PHY found\n", dev->name); | |
694 | return -ENODEV; | |
695 | } | |
1da177e4 | 696 | |
e6b043d5 BW |
697 | /* attach the mac to the phy */ |
698 | phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), | |
699 | &fec_enet_adjust_link, 0, | |
700 | PHY_INTERFACE_MODE_MII); | |
701 | if (IS_ERR(phy_dev)) { | |
702 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | |
703 | return PTR_ERR(phy_dev); | |
704 | } | |
1da177e4 | 705 | |
e6b043d5 BW |
706 | /* mask with MAC supported features */ |
707 | phy_dev->supported &= PHY_BASIC_FEATURES; | |
708 | phy_dev->advertising = phy_dev->supported; | |
1da177e4 | 709 | |
e6b043d5 BW |
710 | fep->phy_dev = phy_dev; |
711 | fep->link = 0; | |
712 | fep->full_duplex = 0; | |
1da177e4 | 713 | |
e6b043d5 | 714 | return 0; |
1da177e4 LT |
715 | } |
716 | ||
e6b043d5 | 717 | static int fec_enet_mii_init(struct platform_device *pdev) |
562d2f8c | 718 | { |
e6b043d5 | 719 | struct net_device *dev = platform_get_drvdata(pdev); |
562d2f8c | 720 | struct fec_enet_private *fep = netdev_priv(dev); |
e6b043d5 | 721 | int err = -ENXIO, i; |
6b265293 | 722 | |
e6b043d5 | 723 | fep->mii_timeout = 0; |
1da177e4 | 724 | |
e6b043d5 BW |
725 | /* |
726 | * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) | |
727 | */ | |
728 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1; | |
729 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | |
1da177e4 | 730 | |
e6b043d5 BW |
731 | fep->mii_bus = mdiobus_alloc(); |
732 | if (fep->mii_bus == NULL) { | |
733 | err = -ENOMEM; | |
734 | goto err_out; | |
1da177e4 LT |
735 | } |
736 | ||
e6b043d5 BW |
737 | fep->mii_bus->name = "fec_enet_mii_bus"; |
738 | fep->mii_bus->read = fec_enet_mdio_read; | |
739 | fep->mii_bus->write = fec_enet_mdio_write; | |
740 | fep->mii_bus->reset = fec_enet_mdio_reset; | |
741 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | |
742 | fep->mii_bus->priv = fep; | |
743 | fep->mii_bus->parent = &pdev->dev; | |
744 | ||
745 | fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | |
746 | if (!fep->mii_bus->irq) { | |
747 | err = -ENOMEM; | |
748 | goto err_out_free_mdiobus; | |
1da177e4 LT |
749 | } |
750 | ||
e6b043d5 BW |
751 | for (i = 0; i < PHY_MAX_ADDR; i++) |
752 | fep->mii_bus->irq[i] = PHY_POLL; | |
1da177e4 | 753 | |
e6b043d5 | 754 | platform_set_drvdata(dev, fep->mii_bus); |
1da177e4 | 755 | |
e6b043d5 BW |
756 | if (mdiobus_register(fep->mii_bus)) |
757 | goto err_out_free_mdio_irq; | |
1da177e4 | 758 | |
e6b043d5 BW |
759 | if (fec_enet_mii_probe(dev) != 0) |
760 | goto err_out_unregister_bus; | |
6aa20a22 | 761 | |
e6b043d5 | 762 | return 0; |
1da177e4 | 763 | |
e6b043d5 BW |
764 | err_out_unregister_bus: |
765 | mdiobus_unregister(fep->mii_bus); | |
766 | err_out_free_mdio_irq: | |
767 | kfree(fep->mii_bus->irq); | |
768 | err_out_free_mdiobus: | |
769 | mdiobus_free(fep->mii_bus); | |
770 | err_out: | |
771 | return err; | |
1da177e4 LT |
772 | } |
773 | ||
e6b043d5 | 774 | static void fec_enet_mii_remove(struct fec_enet_private *fep) |
1da177e4 | 775 | { |
e6b043d5 BW |
776 | if (fep->phy_dev) |
777 | phy_disconnect(fep->phy_dev); | |
778 | mdiobus_unregister(fep->mii_bus); | |
779 | kfree(fep->mii_bus->irq); | |
780 | mdiobus_free(fep->mii_bus); | |
1da177e4 LT |
781 | } |
782 | ||
e6b043d5 BW |
783 | static int fec_enet_get_settings(struct net_device *dev, |
784 | struct ethtool_cmd *cmd) | |
1da177e4 LT |
785 | { |
786 | struct fec_enet_private *fep = netdev_priv(dev); | |
e6b043d5 | 787 | struct phy_device *phydev = fep->phy_dev; |
1da177e4 | 788 | |
e6b043d5 BW |
789 | if (!phydev) |
790 | return -ENODEV; | |
1da177e4 | 791 | |
e6b043d5 | 792 | return phy_ethtool_gset(phydev, cmd); |
1da177e4 LT |
793 | } |
794 | ||
e6b043d5 BW |
795 | static int fec_enet_set_settings(struct net_device *dev, |
796 | struct ethtool_cmd *cmd) | |
1da177e4 LT |
797 | { |
798 | struct fec_enet_private *fep = netdev_priv(dev); | |
e6b043d5 | 799 | struct phy_device *phydev = fep->phy_dev; |
1da177e4 | 800 | |
e6b043d5 BW |
801 | if (!phydev) |
802 | return -ENODEV; | |
1da177e4 | 803 | |
e6b043d5 | 804 | return phy_ethtool_sset(phydev, cmd); |
1da177e4 LT |
805 | } |
806 | ||
e6b043d5 BW |
807 | static void fec_enet_get_drvinfo(struct net_device *dev, |
808 | struct ethtool_drvinfo *info) | |
1da177e4 | 809 | { |
e6b043d5 | 810 | struct fec_enet_private *fep = netdev_priv(dev); |
6aa20a22 | 811 | |
e6b043d5 BW |
812 | strcpy(info->driver, fep->pdev->dev.driver->name); |
813 | strcpy(info->version, "Revision: 1.0"); | |
814 | strcpy(info->bus_info, dev_name(&dev->dev)); | |
1da177e4 LT |
815 | } |
816 | ||
e6b043d5 BW |
817 | static struct ethtool_ops fec_enet_ethtool_ops = { |
818 | .get_settings = fec_enet_get_settings, | |
819 | .set_settings = fec_enet_set_settings, | |
820 | .get_drvinfo = fec_enet_get_drvinfo, | |
821 | .get_link = ethtool_op_get_link, | |
822 | }; | |
1da177e4 | 823 | |
e6b043d5 | 824 | static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1da177e4 | 825 | { |
1da177e4 | 826 | struct fec_enet_private *fep = netdev_priv(dev); |
e6b043d5 | 827 | struct phy_device *phydev = fep->phy_dev; |
1da177e4 | 828 | |
e6b043d5 BW |
829 | if (!netif_running(dev)) |
830 | return -EINVAL; | |
1da177e4 | 831 | |
e6b043d5 BW |
832 | if (!phydev) |
833 | return -ENODEV; | |
834 | ||
835 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); | |
1da177e4 LT |
836 | } |
837 | ||
f0b3fbea SH |
838 | static void fec_enet_free_buffers(struct net_device *dev) |
839 | { | |
840 | struct fec_enet_private *fep = netdev_priv(dev); | |
841 | int i; | |
842 | struct sk_buff *skb; | |
843 | struct bufdesc *bdp; | |
844 | ||
845 | bdp = fep->rx_bd_base; | |
846 | for (i = 0; i < RX_RING_SIZE; i++) { | |
847 | skb = fep->rx_skbuff[i]; | |
848 | ||
849 | if (bdp->cbd_bufaddr) | |
850 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, | |
851 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | |
852 | if (skb) | |
853 | dev_kfree_skb(skb); | |
854 | bdp++; | |
855 | } | |
856 | ||
857 | bdp = fep->tx_bd_base; | |
858 | for (i = 0; i < TX_RING_SIZE; i++) | |
859 | kfree(fep->tx_bounce[i]); | |
860 | } | |
861 | ||
862 | static int fec_enet_alloc_buffers(struct net_device *dev) | |
863 | { | |
864 | struct fec_enet_private *fep = netdev_priv(dev); | |
865 | int i; | |
866 | struct sk_buff *skb; | |
867 | struct bufdesc *bdp; | |
868 | ||
869 | bdp = fep->rx_bd_base; | |
870 | for (i = 0; i < RX_RING_SIZE; i++) { | |
871 | skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); | |
872 | if (!skb) { | |
873 | fec_enet_free_buffers(dev); | |
874 | return -ENOMEM; | |
875 | } | |
876 | fep->rx_skbuff[i] = skb; | |
877 | ||
878 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, | |
879 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | |
880 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | |
881 | bdp++; | |
882 | } | |
883 | ||
884 | /* Set the last buffer to wrap. */ | |
885 | bdp--; | |
886 | bdp->cbd_sc |= BD_SC_WRAP; | |
887 | ||
888 | bdp = fep->tx_bd_base; | |
889 | for (i = 0; i < TX_RING_SIZE; i++) { | |
890 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | |
891 | ||
892 | bdp->cbd_sc = 0; | |
893 | bdp->cbd_bufaddr = 0; | |
894 | bdp++; | |
895 | } | |
896 | ||
897 | /* Set the last buffer to wrap. */ | |
898 | bdp--; | |
899 | bdp->cbd_sc |= BD_SC_WRAP; | |
900 | ||
901 | return 0; | |
902 | } | |
903 | ||
1da177e4 LT |
904 | static int |
905 | fec_enet_open(struct net_device *dev) | |
906 | { | |
907 | struct fec_enet_private *fep = netdev_priv(dev); | |
f0b3fbea | 908 | int ret; |
1da177e4 LT |
909 | |
910 | /* I should reset the ring buffers here, but I don't yet know | |
911 | * a simple way to do that. | |
912 | */ | |
1da177e4 | 913 | |
f0b3fbea SH |
914 | ret = fec_enet_alloc_buffers(dev); |
915 | if (ret) | |
916 | return ret; | |
917 | ||
e6b043d5 BW |
918 | /* schedule a link state check */ |
919 | phy_start(fep->phy_dev); | |
1da177e4 LT |
920 | netif_start_queue(dev); |
921 | fep->opened = 1; | |
22f6b860 | 922 | return 0; |
1da177e4 LT |
923 | } |
924 | ||
925 | static int | |
926 | fec_enet_close(struct net_device *dev) | |
927 | { | |
928 | struct fec_enet_private *fep = netdev_priv(dev); | |
929 | ||
22f6b860 | 930 | /* Don't know what to do yet. */ |
1da177e4 | 931 | fep->opened = 0; |
e6b043d5 | 932 | phy_stop(fep->phy_dev); |
1da177e4 LT |
933 | netif_stop_queue(dev); |
934 | fec_stop(dev); | |
935 | ||
f0b3fbea SH |
936 | fec_enet_free_buffers(dev); |
937 | ||
1da177e4 LT |
938 | return 0; |
939 | } | |
940 | ||
1da177e4 LT |
941 | /* Set or clear the multicast filter for this adaptor. |
942 | * Skeleton taken from sunlance driver. | |
943 | * The CPM Ethernet implementation allows Multicast as well as individual | |
944 | * MAC address filtering. Some of the drivers check to make sure it is | |
945 | * a group multicast address, and discard those that are not. I guess I | |
946 | * will do the same for now, but just remove the test if you want | |
947 | * individual filtering as well (do the upper net layers want or support | |
948 | * this kind of feature?). | |
949 | */ | |
950 | ||
951 | #define HASH_BITS 6 /* #bits in hash */ | |
952 | #define CRC32_POLY 0xEDB88320 | |
953 | ||
954 | static void set_multicast_list(struct net_device *dev) | |
955 | { | |
f44d6305 | 956 | struct fec_enet_private *fep = netdev_priv(dev); |
22bedad3 | 957 | struct netdev_hw_addr *ha; |
48e2f183 | 958 | unsigned int i, bit, data, crc, tmp; |
1da177e4 LT |
959 | unsigned char hash; |
960 | ||
22f6b860 | 961 | if (dev->flags & IFF_PROMISC) { |
f44d6305 SH |
962 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
963 | tmp |= 0x8; | |
964 | writel(tmp, fep->hwp + FEC_R_CNTRL); | |
4e831836 SH |
965 | return; |
966 | } | |
1da177e4 | 967 | |
4e831836 SH |
968 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
969 | tmp &= ~0x8; | |
970 | writel(tmp, fep->hwp + FEC_R_CNTRL); | |
971 | ||
972 | if (dev->flags & IFF_ALLMULTI) { | |
973 | /* Catch all multicast addresses, so set the | |
974 | * filter to all 1's | |
975 | */ | |
976 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
977 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
978 | ||
979 | return; | |
980 | } | |
981 | ||
982 | /* Clear filter and add the addresses in hash register | |
983 | */ | |
984 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
985 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
986 | ||
22bedad3 | 987 | netdev_for_each_mc_addr(ha, dev) { |
4e831836 | 988 | /* Only support group multicast for now */ |
22bedad3 | 989 | if (!(ha->addr[0] & 1)) |
4e831836 SH |
990 | continue; |
991 | ||
992 | /* calculate crc32 value of mac address */ | |
993 | crc = 0xffffffff; | |
994 | ||
22bedad3 JP |
995 | for (i = 0; i < dev->addr_len; i++) { |
996 | data = ha->addr[i]; | |
4e831836 SH |
997 | for (bit = 0; bit < 8; bit++, data >>= 1) { |
998 | crc = (crc >> 1) ^ | |
999 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | |
1da177e4 LT |
1000 | } |
1001 | } | |
4e831836 SH |
1002 | |
1003 | /* only upper 6 bits (HASH_BITS) are used | |
1004 | * which point to specific bit in he hash registers | |
1005 | */ | |
1006 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | |
1007 | ||
1008 | if (hash > 31) { | |
1009 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
1010 | tmp |= 1 << (hash - 32); | |
1011 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
1012 | } else { | |
1013 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
1014 | tmp |= 1 << hash; | |
1015 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
1016 | } | |
1da177e4 LT |
1017 | } |
1018 | } | |
1019 | ||
22f6b860 | 1020 | /* Set a MAC change in hardware. */ |
009fda83 SH |
1021 | static int |
1022 | fec_set_mac_address(struct net_device *dev, void *p) | |
1da177e4 | 1023 | { |
f44d6305 | 1024 | struct fec_enet_private *fep = netdev_priv(dev); |
009fda83 SH |
1025 | struct sockaddr *addr = p; |
1026 | ||
1027 | if (!is_valid_ether_addr(addr->sa_data)) | |
1028 | return -EADDRNOTAVAIL; | |
1029 | ||
1030 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
1da177e4 | 1031 | |
f44d6305 SH |
1032 | writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | |
1033 | (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), | |
1034 | fep->hwp + FEC_ADDR_LOW); | |
1035 | writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), | |
7cff0943 | 1036 | fep->hwp + FEC_ADDR_HIGH); |
009fda83 | 1037 | return 0; |
1da177e4 LT |
1038 | } |
1039 | ||
009fda83 SH |
1040 | static const struct net_device_ops fec_netdev_ops = { |
1041 | .ndo_open = fec_enet_open, | |
1042 | .ndo_stop = fec_enet_close, | |
1043 | .ndo_start_xmit = fec_enet_start_xmit, | |
1044 | .ndo_set_multicast_list = set_multicast_list, | |
635ecaa7 | 1045 | .ndo_change_mtu = eth_change_mtu, |
009fda83 SH |
1046 | .ndo_validate_addr = eth_validate_addr, |
1047 | .ndo_tx_timeout = fec_timeout, | |
1048 | .ndo_set_mac_address = fec_set_mac_address, | |
e6b043d5 | 1049 | .ndo_do_ioctl = fec_enet_ioctl, |
009fda83 SH |
1050 | }; |
1051 | ||
1da177e4 LT |
1052 | /* |
1053 | * XXX: We need to clean up on failure exits here. | |
ead73183 SH |
1054 | * |
1055 | * index is only used in legacy code | |
1da177e4 | 1056 | */ |
78abcb13 | 1057 | static int fec_enet_init(struct net_device *dev, int index) |
1da177e4 LT |
1058 | { |
1059 | struct fec_enet_private *fep = netdev_priv(dev); | |
f0b3fbea | 1060 | struct bufdesc *cbd_base; |
633e7533 | 1061 | struct bufdesc *bdp; |
f0b3fbea | 1062 | int i; |
1da177e4 | 1063 | |
8d4dd5cf SH |
1064 | /* Allocate memory for buffer descriptors. */ |
1065 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | |
1066 | GFP_KERNEL); | |
1067 | if (!cbd_base) { | |
562d2f8c GU |
1068 | printk("FEC: allocate descriptor memory failed?\n"); |
1069 | return -ENOMEM; | |
1070 | } | |
1071 | ||
3b2b74ca | 1072 | spin_lock_init(&fep->hw_lock); |
3b2b74ca | 1073 | |
1da177e4 | 1074 | fep->index = index; |
f44d6305 | 1075 | fep->hwp = (void __iomem *)dev->base_addr; |
cb84d6e7 | 1076 | fep->netdev = dev; |
1da177e4 | 1077 | |
ead73183 | 1078 | /* Set the Ethernet address */ |
43be6366 | 1079 | #ifdef CONFIG_M5272 |
1da177e4 | 1080 | fec_get_mac(dev); |
ead73183 SH |
1081 | #else |
1082 | { | |
1083 | unsigned long l; | |
f44d6305 | 1084 | l = readl(fep->hwp + FEC_ADDR_LOW); |
ead73183 SH |
1085 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); |
1086 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); | |
1087 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); | |
1088 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); | |
f44d6305 | 1089 | l = readl(fep->hwp + FEC_ADDR_HIGH); |
ead73183 SH |
1090 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); |
1091 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); | |
1092 | } | |
1093 | #endif | |
1da177e4 | 1094 | |
8d4dd5cf | 1095 | /* Set receive and transmit descriptor base. */ |
1da177e4 LT |
1096 | fep->rx_bd_base = cbd_base; |
1097 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | |
1098 | ||
22f6b860 | 1099 | /* The FEC Ethernet specific entries in the device structure */ |
1da177e4 | 1100 | dev->watchdog_timeo = TX_TIMEOUT; |
009fda83 | 1101 | dev->netdev_ops = &fec_netdev_ops; |
e6b043d5 | 1102 | dev->ethtool_ops = &fec_enet_ethtool_ops; |
633e7533 RH |
1103 | |
1104 | /* Initialize the receive buffer descriptors. */ | |
1105 | bdp = fep->rx_bd_base; | |
1106 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1107 | ||
1108 | /* Initialize the BD for every fragment in the page. */ | |
1109 | bdp->cbd_sc = 0; | |
1110 | bdp++; | |
1111 | } | |
1112 | ||
1113 | /* Set the last buffer to wrap */ | |
1114 | bdp--; | |
1115 | bdp->cbd_sc |= BD_SC_WRAP; | |
1116 | ||
1117 | /* ...and the same for transmit */ | |
1118 | bdp = fep->tx_bd_base; | |
1119 | for (i = 0; i < TX_RING_SIZE; i++) { | |
1120 | ||
1121 | /* Initialize the BD for every fragment in the page. */ | |
1122 | bdp->cbd_sc = 0; | |
1123 | bdp->cbd_bufaddr = 0; | |
1124 | bdp++; | |
1125 | } | |
1126 | ||
1127 | /* Set the last buffer to wrap */ | |
1128 | bdp--; | |
1129 | bdp->cbd_sc |= BD_SC_WRAP; | |
1130 | ||
ead73183 | 1131 | fec_restart(dev, 0); |
1da177e4 | 1132 | |
1da177e4 LT |
1133 | return 0; |
1134 | } | |
1135 | ||
1136 | /* This function is called to start or restart the FEC during a link | |
1137 | * change. This only happens when switching between half and full | |
1138 | * duplex. | |
1139 | */ | |
1140 | static void | |
1141 | fec_restart(struct net_device *dev, int duplex) | |
1142 | { | |
f44d6305 | 1143 | struct fec_enet_private *fep = netdev_priv(dev); |
1da177e4 LT |
1144 | int i; |
1145 | ||
f44d6305 SH |
1146 | /* Whack a reset. We should wait for this. */ |
1147 | writel(1, fep->hwp + FEC_ECNTRL); | |
1da177e4 LT |
1148 | udelay(10); |
1149 | ||
f44d6305 SH |
1150 | /* Clear any outstanding interrupt. */ |
1151 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | |
1da177e4 | 1152 | |
f44d6305 SH |
1153 | /* Reset all multicast. */ |
1154 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
1155 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
4f1ceb4b SH |
1156 | #ifndef CONFIG_M5272 |
1157 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | |
1158 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | |
1159 | #endif | |
1da177e4 | 1160 | |
f44d6305 SH |
1161 | /* Set maximum receive buffer size. */ |
1162 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | |
1da177e4 | 1163 | |
f44d6305 SH |
1164 | /* Set receive and transmit descriptor base. */ |
1165 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | |
2e28532f | 1166 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, |
f44d6305 | 1167 | fep->hwp + FEC_X_DES_START); |
1da177e4 LT |
1168 | |
1169 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | |
1170 | fep->cur_rx = fep->rx_bd_base; | |
1171 | ||
f44d6305 | 1172 | /* Reset SKB transmit buffers. */ |
1da177e4 | 1173 | fep->skb_cur = fep->skb_dirty = 0; |
22f6b860 SH |
1174 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
1175 | if (fep->tx_skbuff[i]) { | |
1da177e4 LT |
1176 | dev_kfree_skb_any(fep->tx_skbuff[i]); |
1177 | fep->tx_skbuff[i] = NULL; | |
1178 | } | |
1179 | } | |
1180 | ||
22f6b860 | 1181 | /* Enable MII mode */ |
1da177e4 | 1182 | if (duplex) { |
f44d6305 SH |
1183 | /* MII enable / FD enable */ |
1184 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); | |
1185 | writel(0x04, fep->hwp + FEC_X_CNTRL); | |
f909b1ef | 1186 | } else { |
f44d6305 SH |
1187 | /* MII enable / No Rcv on Xmit */ |
1188 | writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); | |
1189 | writel(0x0, fep->hwp + FEC_X_CNTRL); | |
1da177e4 LT |
1190 | } |
1191 | fep->full_duplex = duplex; | |
1192 | ||
22f6b860 | 1193 | /* Set MII speed */ |
f44d6305 | 1194 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1da177e4 | 1195 | |
5eb32bd0 BS |
1196 | #ifdef FEC_MIIGSK_ENR |
1197 | if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { | |
1198 | /* disable the gasket and wait */ | |
1199 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | |
1200 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | |
1201 | udelay(1); | |
1202 | ||
1203 | /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ | |
1204 | writel(1, fep->hwp + FEC_MIIGSK_CFGR); | |
1205 | ||
1206 | /* re-enable the gasket */ | |
1207 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | |
1208 | } | |
1209 | #endif | |
1210 | ||
22f6b860 | 1211 | /* And last, enable the transmit and receive processing */ |
f44d6305 SH |
1212 | writel(2, fep->hwp + FEC_ECNTRL); |
1213 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | |
6b265293 | 1214 | |
22f6b860 | 1215 | /* Enable interrupts we wish to service */ |
e6b043d5 | 1216 | writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK); |
1da177e4 LT |
1217 | } |
1218 | ||
1219 | static void | |
1220 | fec_stop(struct net_device *dev) | |
1221 | { | |
f44d6305 | 1222 | struct fec_enet_private *fep = netdev_priv(dev); |
1da177e4 | 1223 | |
22f6b860 | 1224 | /* We cannot expect a graceful transmit stop without link !!! */ |
f44d6305 SH |
1225 | if (fep->link) { |
1226 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ | |
677177c5 | 1227 | udelay(10); |
f44d6305 | 1228 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) |
677177c5 | 1229 | printk("fec_stop : Graceful transmit stop did not complete !\n"); |
f44d6305 | 1230 | } |
1da177e4 | 1231 | |
f44d6305 SH |
1232 | /* Whack a reset. We should wait for this. */ |
1233 | writel(1, fep->hwp + FEC_ECNTRL); | |
1da177e4 LT |
1234 | udelay(10); |
1235 | ||
f44d6305 SH |
1236 | /* Clear outstanding MII command interrupts. */ |
1237 | writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); | |
1da177e4 | 1238 | |
f44d6305 | 1239 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1da177e4 LT |
1240 | } |
1241 | ||
ead73183 SH |
1242 | static int __devinit |
1243 | fec_probe(struct platform_device *pdev) | |
1244 | { | |
1245 | struct fec_enet_private *fep; | |
5eb32bd0 | 1246 | struct fec_platform_data *pdata; |
ead73183 SH |
1247 | struct net_device *ndev; |
1248 | int i, irq, ret = 0; | |
1249 | struct resource *r; | |
1250 | ||
1251 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1252 | if (!r) | |
1253 | return -ENXIO; | |
1254 | ||
1255 | r = request_mem_region(r->start, resource_size(r), pdev->name); | |
1256 | if (!r) | |
1257 | return -EBUSY; | |
1258 | ||
1259 | /* Init network device */ | |
1260 | ndev = alloc_etherdev(sizeof(struct fec_enet_private)); | |
1261 | if (!ndev) | |
1262 | return -ENOMEM; | |
1263 | ||
1264 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1265 | ||
1266 | /* setup board info structure */ | |
1267 | fep = netdev_priv(ndev); | |
1268 | memset(fep, 0, sizeof(*fep)); | |
1269 | ||
1270 | ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); | |
e6b043d5 | 1271 | fep->pdev = pdev; |
ead73183 SH |
1272 | |
1273 | if (!ndev->base_addr) { | |
1274 | ret = -ENOMEM; | |
1275 | goto failed_ioremap; | |
1276 | } | |
1277 | ||
1278 | platform_set_drvdata(pdev, ndev); | |
1279 | ||
5eb32bd0 BS |
1280 | pdata = pdev->dev.platform_data; |
1281 | if (pdata) | |
1282 | fep->phy_interface = pdata->phy; | |
1283 | ||
ead73183 SH |
1284 | /* This device has up to three irqs on some platforms */ |
1285 | for (i = 0; i < 3; i++) { | |
1286 | irq = platform_get_irq(pdev, i); | |
1287 | if (i && irq < 0) | |
1288 | break; | |
1289 | ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); | |
1290 | if (ret) { | |
1291 | while (i >= 0) { | |
1292 | irq = platform_get_irq(pdev, i); | |
1293 | free_irq(irq, ndev); | |
1294 | i--; | |
1295 | } | |
1296 | goto failed_irq; | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | fep->clk = clk_get(&pdev->dev, "fec_clk"); | |
1301 | if (IS_ERR(fep->clk)) { | |
1302 | ret = PTR_ERR(fep->clk); | |
1303 | goto failed_clk; | |
1304 | } | |
1305 | clk_enable(fep->clk); | |
1306 | ||
1307 | ret = fec_enet_init(ndev, 0); | |
1308 | if (ret) | |
1309 | goto failed_init; | |
1310 | ||
e6b043d5 BW |
1311 | ret = fec_enet_mii_init(pdev); |
1312 | if (ret) | |
1313 | goto failed_mii_init; | |
1314 | ||
ead73183 SH |
1315 | ret = register_netdev(ndev); |
1316 | if (ret) | |
1317 | goto failed_register; | |
1318 | ||
e6b043d5 BW |
1319 | printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " |
1320 | "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, | |
1321 | fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), | |
1322 | fep->phy_dev->irq); | |
1323 | ||
ead73183 SH |
1324 | return 0; |
1325 | ||
1326 | failed_register: | |
e6b043d5 BW |
1327 | fec_enet_mii_remove(fep); |
1328 | failed_mii_init: | |
ead73183 SH |
1329 | failed_init: |
1330 | clk_disable(fep->clk); | |
1331 | clk_put(fep->clk); | |
1332 | failed_clk: | |
1333 | for (i = 0; i < 3; i++) { | |
1334 | irq = platform_get_irq(pdev, i); | |
1335 | if (irq > 0) | |
1336 | free_irq(irq, ndev); | |
1337 | } | |
1338 | failed_irq: | |
1339 | iounmap((void __iomem *)ndev->base_addr); | |
1340 | failed_ioremap: | |
1341 | free_netdev(ndev); | |
1342 | ||
1343 | return ret; | |
1344 | } | |
1345 | ||
1346 | static int __devexit | |
1347 | fec_drv_remove(struct platform_device *pdev) | |
1348 | { | |
1349 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1350 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1351 | ||
1352 | platform_set_drvdata(pdev, NULL); | |
1353 | ||
1354 | fec_stop(ndev); | |
e6b043d5 | 1355 | fec_enet_mii_remove(fep); |
ead73183 SH |
1356 | clk_disable(fep->clk); |
1357 | clk_put(fep->clk); | |
1358 | iounmap((void __iomem *)ndev->base_addr); | |
1359 | unregister_netdev(ndev); | |
1360 | free_netdev(ndev); | |
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | static int | |
1365 | fec_suspend(struct platform_device *dev, pm_message_t state) | |
1366 | { | |
1367 | struct net_device *ndev = platform_get_drvdata(dev); | |
1368 | struct fec_enet_private *fep; | |
1369 | ||
1370 | if (ndev) { | |
1371 | fep = netdev_priv(ndev); | |
1372 | if (netif_running(ndev)) { | |
1373 | netif_device_detach(ndev); | |
1374 | fec_stop(ndev); | |
1375 | } | |
1376 | } | |
1377 | return 0; | |
1378 | } | |
1379 | ||
1380 | static int | |
1381 | fec_resume(struct platform_device *dev) | |
1382 | { | |
1383 | struct net_device *ndev = platform_get_drvdata(dev); | |
1384 | ||
1385 | if (ndev) { | |
1386 | if (netif_running(ndev)) { | |
1387 | fec_enet_init(ndev, 0); | |
1388 | netif_device_attach(ndev); | |
1389 | } | |
1390 | } | |
1391 | return 0; | |
1392 | } | |
1393 | ||
1394 | static struct platform_driver fec_driver = { | |
1395 | .driver = { | |
1396 | .name = "fec", | |
1397 | .owner = THIS_MODULE, | |
1398 | }, | |
1399 | .probe = fec_probe, | |
1400 | .remove = __devexit_p(fec_drv_remove), | |
1401 | .suspend = fec_suspend, | |
1402 | .resume = fec_resume, | |
1403 | }; | |
1404 | ||
1405 | static int __init | |
1406 | fec_enet_module_init(void) | |
1407 | { | |
1408 | printk(KERN_INFO "FEC Ethernet Driver\n"); | |
1409 | ||
1410 | return platform_driver_register(&fec_driver); | |
1411 | } | |
1412 | ||
1413 | static void __exit | |
1414 | fec_enet_cleanup(void) | |
1415 | { | |
1416 | platform_driver_unregister(&fec_driver); | |
1417 | } | |
1418 | ||
1419 | module_exit(fec_enet_cleanup); | |
1da177e4 LT |
1420 | module_init(fec_enet_module_init); |
1421 | ||
1422 | MODULE_LICENSE("GPL"); |