]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Ethernet driver for Motorola MPC8260. | |
3 | * Copyright (c) 1999 Dan Malek (dmalek@jlc.net) | |
4 | * Copyright (c) 2000 MontaVista Software Inc. (source@mvista.com) | |
5 | * 2.3.99 Updates | |
6 | * | |
7 | * I copied this from the 8xx CPM Ethernet driver, so follow the | |
8 | * credits back through that. | |
9 | * | |
10 | * This version of the driver is somewhat selectable for the different | |
11 | * processor/board combinations. It works for the boards I know about | |
12 | * now, and should be easily modified to include others. Some of the | |
13 | * configuration information is contained in <asm/cpm1.h> and the | |
14 | * remainder is here. | |
15 | * | |
16 | * Buffer descriptors are kept in the CPM dual port RAM, and the frame | |
17 | * buffers are in the host memory. | |
18 | * | |
19 | * Right now, I am very watseful with the buffers. I allocate memory | |
20 | * pages and then divide them into 2K frame buffers. This way I know I | |
21 | * have buffers large enough to hold one frame within one buffer descriptor. | |
22 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | |
23 | * will be much more memory efficient and will easily handle lots of | |
24 | * small packets. | |
25 | * | |
26 | */ | |
27 | #include <linux/kernel.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/string.h> | |
30 | #include <linux/ptrace.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/ioport.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/etherdevice.h> | |
39 | #include <linux/skbuff.h> | |
40 | #include <linux/spinlock.h> | |
41 | #include <linux/bitops.h> | |
42 | ||
43 | #include <asm/immap_cpm2.h> | |
44 | #include <asm/pgtable.h> | |
45 | #include <asm/mpc8260.h> | |
46 | #include <asm/uaccess.h> | |
47 | #include <asm/cpm2.h> | |
48 | #include <asm/irq.h> | |
49 | ||
50 | /* | |
51 | * Theory of Operation | |
52 | * | |
53 | * The MPC8260 CPM performs the Ethernet processing on an SCC. It can use | |
54 | * an aribtrary number of buffers on byte boundaries, but must have at | |
55 | * least two receive buffers to prevent constant overrun conditions. | |
56 | * | |
57 | * The buffer descriptors are allocated from the CPM dual port memory | |
58 | * with the data buffers allocated from host memory, just like all other | |
59 | * serial communication protocols. The host memory buffers are allocated | |
60 | * from the free page pool, and then divided into smaller receive and | |
61 | * transmit buffers. The size of the buffers should be a power of two, | |
62 | * since that nicely divides the page. This creates a ring buffer | |
63 | * structure similar to the LANCE and other controllers. | |
64 | * | |
65 | * Like the LANCE driver: | |
66 | * The driver runs as two independent, single-threaded flows of control. One | |
67 | * is the send-packet routine, which enforces single-threaded use by the | |
68 | * cep->tx_busy flag. The other thread is the interrupt handler, which is | |
69 | * single threaded by the hardware and other software. | |
70 | */ | |
71 | ||
72 | /* The transmitter timeout | |
73 | */ | |
74 | #define TX_TIMEOUT (2*HZ) | |
75 | ||
76 | /* The number of Tx and Rx buffers. These are allocated from the page | |
77 | * pool. The code may assume these are power of two, so it is best | |
78 | * to keep them that size. | |
79 | * We don't need to allocate pages for the transmitter. We just use | |
80 | * the skbuffer directly. | |
81 | */ | |
82 | #define CPM_ENET_RX_PAGES 4 | |
83 | #define CPM_ENET_RX_FRSIZE 2048 | |
84 | #define CPM_ENET_RX_FRPPG (PAGE_SIZE / CPM_ENET_RX_FRSIZE) | |
85 | #define RX_RING_SIZE (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES) | |
86 | #define TX_RING_SIZE 8 /* Must be power of two */ | |
87 | #define TX_RING_MOD_MASK 7 /* for this to work */ | |
88 | ||
89 | /* The CPM stores dest/src/type, data, and checksum for receive packets. | |
90 | */ | |
91 | #define PKT_MAXBUF_SIZE 1518 | |
92 | #define PKT_MINBUF_SIZE 64 | |
93 | #define PKT_MAXBLR_SIZE 1520 | |
94 | ||
95 | /* The CPM buffer descriptors track the ring buffers. The rx_bd_base and | |
96 | * tx_bd_base always point to the base of the buffer descriptors. The | |
97 | * cur_rx and cur_tx point to the currently available buffer. | |
98 | * The dirty_tx tracks the current buffer that is being sent by the | |
99 | * controller. The cur_tx and dirty_tx are equal under both completely | |
100 | * empty and completely full conditions. The empty/ready indicator in | |
101 | * the buffer descriptor determines the actual condition. | |
102 | */ | |
103 | struct scc_enet_private { | |
104 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | |
105 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | |
106 | ushort skb_cur; | |
107 | ushort skb_dirty; | |
108 | ||
109 | /* CPM dual port RAM relative addresses. | |
110 | */ | |
111 | cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ | |
112 | cbd_t *tx_bd_base; | |
113 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ | |
114 | cbd_t *dirty_tx; /* The ring entries to be free()ed. */ | |
115 | scc_t *sccp; | |
116 | struct net_device_stats stats; | |
117 | uint tx_full; | |
118 | spinlock_t lock; | |
119 | }; | |
120 | ||
121 | static int scc_enet_open(struct net_device *dev); | |
122 | static int scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
123 | static int scc_enet_rx(struct net_device *dev); | |
124 | static irqreturn_t scc_enet_interrupt(int irq, void *dev_id); | |
125 | static int scc_enet_close(struct net_device *dev); | |
126 | static struct net_device_stats *scc_enet_get_stats(struct net_device *dev); | |
127 | static void set_multicast_list(struct net_device *dev); | |
128 | ||
129 | /* These will be configurable for the SCC choice. | |
130 | */ | |
131 | #define CPM_ENET_BLOCK CPM_CR_SCC1_SBLOCK | |
132 | #define CPM_ENET_PAGE CPM_CR_SCC1_PAGE | |
133 | #define PROFF_ENET PROFF_SCC1 | |
134 | #define SCC_ENET 0 | |
135 | #define SIU_INT_ENET SIU_INT_SCC1 | |
136 | ||
137 | /* These are both board and SCC dependent.... | |
138 | */ | |
139 | #define PD_ENET_RXD ((uint)0x00000001) | |
140 | #define PD_ENET_TXD ((uint)0x00000002) | |
141 | #define PD_ENET_TENA ((uint)0x00000004) | |
142 | #define PC_ENET_RENA ((uint)0x00020000) | |
143 | #define PC_ENET_CLSN ((uint)0x00000004) | |
144 | #define PC_ENET_TXCLK ((uint)0x00000800) | |
145 | #define PC_ENET_RXCLK ((uint)0x00000400) | |
146 | #define CMX_CLK_ROUTE ((uint)0x25000000) | |
147 | #define CMX_CLK_MASK ((uint)0xff000000) | |
148 | ||
149 | /* Specific to a board. | |
150 | */ | |
151 | #define PC_EST8260_ENET_LOOPBACK ((uint)0x80000000) | |
152 | #define PC_EST8260_ENET_SQE ((uint)0x40000000) | |
153 | #define PC_EST8260_ENET_NOTFD ((uint)0x20000000) | |
154 | ||
155 | static int | |
156 | scc_enet_open(struct net_device *dev) | |
157 | { | |
158 | ||
159 | /* I should reset the ring buffers here, but I don't yet know | |
160 | * a simple way to do that. | |
161 | */ | |
162 | netif_start_queue(dev); | |
163 | return 0; /* Always succeed */ | |
164 | } | |
165 | ||
166 | static int | |
167 | scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
168 | { | |
169 | struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; | |
170 | volatile cbd_t *bdp; | |
171 | ||
172 | ||
173 | /* Fill in a Tx ring entry */ | |
174 | bdp = cep->cur_tx; | |
175 | ||
176 | #ifndef final_version | |
177 | if (bdp->cbd_sc & BD_ENET_TX_READY) { | |
178 | /* Ooops. All transmit buffers are full. Bail out. | |
179 | * This should not happen, since cep->tx_full should be set. | |
180 | */ | |
181 | printk("%s: tx queue full!.\n", dev->name); | |
182 | return 1; | |
183 | } | |
184 | #endif | |
185 | ||
186 | /* Clear all of the status flags. | |
187 | */ | |
188 | bdp->cbd_sc &= ~BD_ENET_TX_STATS; | |
189 | ||
190 | /* If the frame is short, tell CPM to pad it. | |
191 | */ | |
192 | if (skb->len <= ETH_ZLEN) | |
193 | bdp->cbd_sc |= BD_ENET_TX_PAD; | |
194 | else | |
195 | bdp->cbd_sc &= ~BD_ENET_TX_PAD; | |
196 | ||
197 | /* Set buffer length and buffer pointer. | |
198 | */ | |
199 | bdp->cbd_datlen = skb->len; | |
200 | bdp->cbd_bufaddr = __pa(skb->data); | |
201 | ||
202 | /* Save skb pointer. | |
203 | */ | |
204 | cep->tx_skbuff[cep->skb_cur] = skb; | |
205 | ||
206 | cep->stats.tx_bytes += skb->len; | |
207 | cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; | |
208 | ||
209 | spin_lock_irq(&cep->lock); | |
210 | ||
211 | /* Send it on its way. Tell CPM its ready, interrupt when done, | |
212 | * its the last BD of the frame, and to put the CRC on the end. | |
213 | */ | |
214 | bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); | |
215 | ||
216 | dev->trans_start = jiffies; | |
217 | ||
218 | /* If this was the last BD in the ring, start at the beginning again. | |
219 | */ | |
220 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | |
221 | bdp = cep->tx_bd_base; | |
222 | else | |
223 | bdp++; | |
224 | ||
225 | if (bdp->cbd_sc & BD_ENET_TX_READY) { | |
226 | netif_stop_queue(dev); | |
227 | cep->tx_full = 1; | |
228 | } | |
229 | ||
230 | cep->cur_tx = (cbd_t *)bdp; | |
231 | ||
232 | spin_unlock_irq(&cep->lock); | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
237 | static void | |
238 | scc_enet_timeout(struct net_device *dev) | |
239 | { | |
240 | struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; | |
241 | ||
242 | printk("%s: transmit timed out.\n", dev->name); | |
243 | cep->stats.tx_errors++; | |
244 | #ifndef final_version | |
245 | { | |
246 | int i; | |
247 | cbd_t *bdp; | |
248 | printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n", | |
249 | cep->cur_tx, cep->tx_full ? " (full)" : "", | |
250 | cep->cur_rx); | |
251 | bdp = cep->tx_bd_base; | |
252 | printk(" Tx @base %p :\n", bdp); | |
253 | for (i = 0 ; i < TX_RING_SIZE; i++, bdp++) | |
254 | printk("%04x %04x %08x\n", | |
255 | bdp->cbd_sc, | |
256 | bdp->cbd_datlen, | |
257 | bdp->cbd_bufaddr); | |
258 | bdp = cep->rx_bd_base; | |
259 | printk(" Rx @base %p :\n", bdp); | |
260 | for (i = 0 ; i < RX_RING_SIZE; i++, bdp++) | |
261 | printk("%04x %04x %08x\n", | |
262 | bdp->cbd_sc, | |
263 | bdp->cbd_datlen, | |
264 | bdp->cbd_bufaddr); | |
265 | } | |
266 | #endif | |
267 | if (!cep->tx_full) | |
268 | netif_wake_queue(dev); | |
269 | } | |
270 | ||
271 | /* The interrupt handler. | |
272 | * This is called from the CPM handler, not the MPC core interrupt. | |
273 | */ | |
274 | static irqreturn_t | |
275 | scc_enet_interrupt(int irq, void *dev_id) | |
276 | { | |
277 | struct net_device *dev = dev_id; | |
278 | volatile struct scc_enet_private *cep; | |
279 | volatile cbd_t *bdp; | |
280 | ushort int_events; | |
281 | int must_restart; | |
282 | ||
283 | cep = dev->priv; | |
284 | ||
285 | /* Get the interrupt events that caused us to be here. | |
286 | */ | |
287 | int_events = cep->sccp->scc_scce; | |
288 | cep->sccp->scc_scce = int_events; | |
289 | must_restart = 0; | |
290 | ||
291 | /* Handle receive event in its own function. | |
292 | */ | |
293 | if (int_events & SCCE_ENET_RXF) | |
294 | scc_enet_rx(dev_id); | |
295 | ||
296 | /* Check for a transmit error. The manual is a little unclear | |
297 | * about this, so the debug code until I get it figured out. It | |
298 | * appears that if TXE is set, then TXB is not set. However, | |
299 | * if carrier sense is lost during frame transmission, the TXE | |
300 | * bit is set, "and continues the buffer transmission normally." | |
301 | * I don't know if "normally" implies TXB is set when the buffer | |
302 | * descriptor is closed.....trial and error :-). | |
303 | */ | |
304 | ||
305 | /* Transmit OK, or non-fatal error. Update the buffer descriptors. | |
306 | */ | |
307 | if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) { | |
308 | spin_lock(&cep->lock); | |
309 | bdp = cep->dirty_tx; | |
310 | while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { | |
311 | if ((bdp==cep->cur_tx) && (cep->tx_full == 0)) | |
312 | break; | |
313 | ||
314 | if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ | |
315 | cep->stats.tx_heartbeat_errors++; | |
316 | if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ | |
317 | cep->stats.tx_window_errors++; | |
318 | if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ | |
319 | cep->stats.tx_aborted_errors++; | |
320 | if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ | |
321 | cep->stats.tx_fifo_errors++; | |
322 | if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ | |
323 | cep->stats.tx_carrier_errors++; | |
324 | ||
325 | ||
326 | /* No heartbeat or Lost carrier are not really bad errors. | |
327 | * The others require a restart transmit command. | |
328 | */ | |
329 | if (bdp->cbd_sc & | |
330 | (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | |
331 | must_restart = 1; | |
332 | cep->stats.tx_errors++; | |
333 | } | |
334 | ||
335 | cep->stats.tx_packets++; | |
336 | ||
337 | /* Deferred means some collisions occurred during transmit, | |
338 | * but we eventually sent the packet OK. | |
339 | */ | |
340 | if (bdp->cbd_sc & BD_ENET_TX_DEF) | |
341 | cep->stats.collisions++; | |
342 | ||
343 | /* Free the sk buffer associated with this last transmit. | |
344 | */ | |
345 | dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]); | |
346 | cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; | |
347 | ||
348 | /* Update pointer to next buffer descriptor to be transmitted. | |
349 | */ | |
350 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | |
351 | bdp = cep->tx_bd_base; | |
352 | else | |
353 | bdp++; | |
354 | ||
355 | /* I don't know if we can be held off from processing these | |
356 | * interrupts for more than one frame time. I really hope | |
357 | * not. In such a case, we would now want to check the | |
358 | * currently available BD (cur_tx) and determine if any | |
359 | * buffers between the dirty_tx and cur_tx have also been | |
360 | * sent. We would want to process anything in between that | |
361 | * does not have BD_ENET_TX_READY set. | |
362 | */ | |
363 | ||
364 | /* Since we have freed up a buffer, the ring is no longer | |
365 | * full. | |
366 | */ | |
367 | if (cep->tx_full) { | |
368 | cep->tx_full = 0; | |
369 | if (netif_queue_stopped(dev)) { | |
370 | netif_wake_queue(dev); | |
371 | } | |
372 | } | |
373 | ||
374 | cep->dirty_tx = (cbd_t *)bdp; | |
375 | } | |
376 | ||
377 | if (must_restart) { | |
378 | volatile cpm_cpm2_t *cp; | |
379 | ||
380 | /* Some transmit errors cause the transmitter to shut | |
381 | * down. We now issue a restart transmit. Since the | |
382 | * errors close the BD and update the pointers, the restart | |
383 | * _should_ pick up without having to reset any of our | |
384 | * pointers either. | |
385 | */ | |
386 | ||
387 | cp = cpmp; | |
388 | cp->cp_cpcr = | |
389 | mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0, | |
390 | CPM_CR_RESTART_TX) | CPM_CR_FLG; | |
391 | while (cp->cp_cpcr & CPM_CR_FLG); | |
392 | } | |
393 | spin_unlock(&cep->lock); | |
394 | } | |
395 | ||
396 | /* Check for receive busy, i.e. packets coming but no place to | |
397 | * put them. This "can't happen" because the receive interrupt | |
398 | * is tossing previous frames. | |
399 | */ | |
400 | if (int_events & SCCE_ENET_BSY) { | |
401 | cep->stats.rx_dropped++; | |
402 | printk("SCC ENET: BSY can't happen.\n"); | |
403 | } | |
404 | ||
405 | return IRQ_HANDLED; | |
406 | } | |
407 | ||
408 | /* During a receive, the cur_rx points to the current incoming buffer. | |
409 | * When we update through the ring, if the next incoming buffer has | |
410 | * not been given to the system, we just set the empty indicator, | |
411 | * effectively tossing the packet. | |
412 | */ | |
413 | static int | |
414 | scc_enet_rx(struct net_device *dev) | |
415 | { | |
416 | struct scc_enet_private *cep; | |
417 | volatile cbd_t *bdp; | |
418 | struct sk_buff *skb; | |
419 | ushort pkt_len; | |
420 | ||
421 | cep = dev->priv; | |
422 | ||
423 | /* First, grab all of the stats for the incoming packet. | |
424 | * These get messed up if we get called due to a busy condition. | |
425 | */ | |
426 | bdp = cep->cur_rx; | |
427 | ||
428 | for (;;) { | |
429 | if (bdp->cbd_sc & BD_ENET_RX_EMPTY) | |
430 | break; | |
431 | ||
432 | #ifndef final_version | |
433 | /* Since we have allocated space to hold a complete frame, both | |
434 | * the first and last indicators should be set. | |
435 | */ | |
436 | if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != | |
437 | (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) | |
438 | printk("CPM ENET: rcv is not first+last\n"); | |
439 | #endif | |
440 | ||
441 | /* Frame too long or too short. | |
442 | */ | |
443 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | |
444 | cep->stats.rx_length_errors++; | |
445 | if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ | |
446 | cep->stats.rx_frame_errors++; | |
447 | if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ | |
448 | cep->stats.rx_crc_errors++; | |
449 | if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ | |
450 | cep->stats.rx_crc_errors++; | |
451 | ||
452 | /* Report late collisions as a frame error. | |
453 | * On this error, the BD is closed, but we don't know what we | |
454 | * have in the buffer. So, just drop this frame on the floor. | |
455 | */ | |
456 | if (bdp->cbd_sc & BD_ENET_RX_CL) { | |
457 | cep->stats.rx_frame_errors++; | |
458 | } | |
459 | else { | |
460 | ||
461 | /* Process the incoming frame. | |
462 | */ | |
463 | cep->stats.rx_packets++; | |
464 | pkt_len = bdp->cbd_datlen; | |
465 | cep->stats.rx_bytes += pkt_len; | |
466 | ||
467 | /* This does 16 byte alignment, much more than we need. | |
468 | * The packet length includes FCS, but we don't want to | |
469 | * include that when passing upstream as it messes up | |
470 | * bridging applications. | |
471 | */ | |
472 | skb = dev_alloc_skb(pkt_len-4); | |
473 | ||
474 | if (skb == NULL) { | |
475 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | |
476 | cep->stats.rx_dropped++; | |
477 | } | |
478 | else { | |
479 | skb_put(skb,pkt_len-4); /* Make room */ | |
480 | skb_copy_to_linear_data(skb, | |
481 | (unsigned char *)__va(bdp->cbd_bufaddr), | |
482 | pkt_len-4); | |
483 | skb->protocol=eth_type_trans(skb,dev); | |
484 | netif_rx(skb); | |
485 | } | |
486 | } | |
487 | ||
488 | /* Clear the status flags for this buffer. | |
489 | */ | |
490 | bdp->cbd_sc &= ~BD_ENET_RX_STATS; | |
491 | ||
492 | /* Mark the buffer empty. | |
493 | */ | |
494 | bdp->cbd_sc |= BD_ENET_RX_EMPTY; | |
495 | ||
496 | /* Update BD pointer to next entry. | |
497 | */ | |
498 | if (bdp->cbd_sc & BD_ENET_RX_WRAP) | |
499 | bdp = cep->rx_bd_base; | |
500 | else | |
501 | bdp++; | |
502 | ||
503 | } | |
504 | cep->cur_rx = (cbd_t *)bdp; | |
505 | ||
506 | return 0; | |
507 | } | |
508 | ||
509 | static int | |
510 | scc_enet_close(struct net_device *dev) | |
511 | { | |
512 | /* Don't know what to do yet. | |
513 | */ | |
514 | netif_stop_queue(dev); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
519 | static struct net_device_stats *scc_enet_get_stats(struct net_device *dev) | |
520 | { | |
521 | struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; | |
522 | ||
523 | return &cep->stats; | |
524 | } | |
525 | ||
526 | /* Set or clear the multicast filter for this adaptor. | |
527 | * Skeleton taken from sunlance driver. | |
528 | * The CPM Ethernet implementation allows Multicast as well as individual | |
529 | * MAC address filtering. Some of the drivers check to make sure it is | |
530 | * a group multicast address, and discard those that are not. I guess I | |
531 | * will do the same for now, but just remove the test if you want | |
532 | * individual filtering as well (do the upper net layers want or support | |
533 | * this kind of feature?). | |
534 | */ | |
535 | ||
536 | static void set_multicast_list(struct net_device *dev) | |
537 | { | |
538 | struct scc_enet_private *cep; | |
539 | struct dev_mc_list *dmi; | |
540 | u_char *mcptr, *tdptr; | |
541 | volatile scc_enet_t *ep; | |
542 | int i, j; | |
543 | cep = (struct scc_enet_private *)dev->priv; | |
544 | ||
545 | /* Get pointer to SCC area in parameter RAM. | |
546 | */ | |
547 | ep = (scc_enet_t *)dev->base_addr; | |
548 | ||
549 | if (dev->flags&IFF_PROMISC) { | |
550 | ||
551 | /* Log any net taps. */ | |
552 | printk("%s: Promiscuous mode enabled.\n", dev->name); | |
553 | cep->sccp->scc_psmr |= SCC_PSMR_PRO; | |
554 | } else { | |
555 | ||
556 | cep->sccp->scc_psmr &= ~SCC_PSMR_PRO; | |
557 | ||
558 | if (dev->flags & IFF_ALLMULTI) { | |
559 | /* Catch all multicast addresses, so set the | |
560 | * filter to all 1's. | |
561 | */ | |
562 | ep->sen_gaddr1 = 0xffff; | |
563 | ep->sen_gaddr2 = 0xffff; | |
564 | ep->sen_gaddr3 = 0xffff; | |
565 | ep->sen_gaddr4 = 0xffff; | |
566 | } | |
567 | else { | |
568 | /* Clear filter and add the addresses in the list. | |
569 | */ | |
570 | ep->sen_gaddr1 = 0; | |
571 | ep->sen_gaddr2 = 0; | |
572 | ep->sen_gaddr3 = 0; | |
573 | ep->sen_gaddr4 = 0; | |
574 | ||
575 | dmi = dev->mc_list; | |
576 | ||
577 | for (i=0; i<dev->mc_count; i++) { | |
578 | ||
579 | /* Only support group multicast for now. | |
580 | */ | |
581 | if (!(dmi->dmi_addr[0] & 1)) | |
582 | continue; | |
583 | ||
584 | /* The address in dmi_addr is LSB first, | |
585 | * and taddr is MSB first. We have to | |
586 | * copy bytes MSB first from dmi_addr. | |
587 | */ | |
588 | mcptr = (u_char *)dmi->dmi_addr + 5; | |
589 | tdptr = (u_char *)&ep->sen_taddrh; | |
590 | for (j=0; j<6; j++) | |
591 | *tdptr++ = *mcptr--; | |
592 | ||
593 | /* Ask CPM to run CRC and set bit in | |
594 | * filter mask. | |
595 | */ | |
596 | cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, | |
597 | CPM_ENET_BLOCK, 0, | |
598 | CPM_CR_SET_GADDR) | CPM_CR_FLG; | |
599 | /* this delay is necessary here -- Cort */ | |
600 | udelay(10); | |
601 | while (cpmp->cp_cpcr & CPM_CR_FLG); | |
602 | } | |
603 | } | |
604 | } | |
605 | } | |
606 | ||
607 | /* Initialize the CPM Ethernet on SCC. | |
608 | */ | |
609 | static int __init scc_enet_init(void) | |
610 | { | |
611 | struct net_device *dev; | |
612 | struct scc_enet_private *cep; | |
613 | int i, j, err; | |
614 | uint dp_offset; | |
615 | unsigned char *eap; | |
616 | unsigned long mem_addr; | |
617 | bd_t *bd; | |
618 | volatile cbd_t *bdp; | |
619 | volatile cpm_cpm2_t *cp; | |
620 | volatile scc_t *sccp; | |
621 | volatile scc_enet_t *ep; | |
622 | volatile cpm2_map_t *immap; | |
623 | volatile iop_cpm2_t *io; | |
624 | ||
625 | cp = cpmp; /* Get pointer to Communication Processor */ | |
626 | ||
627 | immap = (cpm2_map_t *)CPM_MAP_ADDR; /* and to internal registers */ | |
628 | io = &immap->im_ioport; | |
629 | ||
630 | bd = (bd_t *)__res; | |
631 | ||
632 | /* Create an Ethernet device instance. | |
633 | */ | |
634 | dev = alloc_etherdev(sizeof(*cep)); | |
635 | if (!dev) | |
636 | return -ENOMEM; | |
637 | ||
638 | cep = dev->priv; | |
639 | spin_lock_init(&cep->lock); | |
640 | ||
641 | /* Get pointer to SCC area in parameter RAM. | |
642 | */ | |
643 | ep = (scc_enet_t *)(&immap->im_dprambase[PROFF_ENET]); | |
644 | ||
645 | /* And another to the SCC register area. | |
646 | */ | |
647 | sccp = (volatile scc_t *)(&immap->im_scc[SCC_ENET]); | |
648 | cep->sccp = (scc_t *)sccp; /* Keep the pointer handy */ | |
649 | ||
650 | /* Disable receive and transmit in case someone left it running. | |
651 | */ | |
652 | sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); | |
653 | ||
654 | /* Configure port C and D pins for SCC Ethernet. This | |
655 | * won't work for all SCC possibilities....it will be | |
656 | * board/port specific. | |
657 | */ | |
658 | io->iop_pparc |= | |
659 | (PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK); | |
660 | io->iop_pdirc &= | |
661 | ~(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK); | |
662 | io->iop_psorc &= | |
663 | ~(PC_ENET_RENA | PC_ENET_TXCLK | PC_ENET_RXCLK); | |
664 | io->iop_psorc |= PC_ENET_CLSN; | |
665 | ||
666 | io->iop_ppard |= (PD_ENET_RXD | PD_ENET_TXD | PD_ENET_TENA); | |
667 | io->iop_pdird |= (PD_ENET_TXD | PD_ENET_TENA); | |
668 | io->iop_pdird &= ~PD_ENET_RXD; | |
669 | io->iop_psord |= PD_ENET_TXD; | |
670 | io->iop_psord &= ~(PD_ENET_RXD | PD_ENET_TENA); | |
671 | ||
672 | /* Configure Serial Interface clock routing. | |
673 | * First, clear all SCC bits to zero, then set the ones we want. | |
674 | */ | |
675 | immap->im_cpmux.cmx_scr &= ~CMX_CLK_MASK; | |
676 | immap->im_cpmux.cmx_scr |= CMX_CLK_ROUTE; | |
677 | ||
678 | /* Allocate space for the buffer descriptors in the DP ram. | |
679 | * These are relative offsets in the DP ram address space. | |
680 | * Initialize base addresses for the buffer descriptors. | |
681 | */ | |
682 | dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8); | |
683 | ep->sen_genscc.scc_rbase = dp_offset; | |
684 | cep->rx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset); | |
685 | ||
686 | dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8); | |
687 | ep->sen_genscc.scc_tbase = dp_offset; | |
688 | cep->tx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset); | |
689 | ||
690 | cep->dirty_tx = cep->cur_tx = cep->tx_bd_base; | |
691 | cep->cur_rx = cep->rx_bd_base; | |
692 | ||
693 | ep->sen_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB; | |
694 | ep->sen_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB; | |
695 | ||
696 | /* Set maximum bytes per receive buffer. | |
697 | * This appears to be an Ethernet frame size, not the buffer | |
698 | * fragment size. It must be a multiple of four. | |
699 | */ | |
700 | ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE; | |
701 | ||
702 | /* Set CRC preset and mask. | |
703 | */ | |
704 | ep->sen_cpres = 0xffffffff; | |
705 | ep->sen_cmask = 0xdebb20e3; | |
706 | ||
707 | ep->sen_crcec = 0; /* CRC Error counter */ | |
708 | ep->sen_alec = 0; /* alignment error counter */ | |
709 | ep->sen_disfc = 0; /* discard frame counter */ | |
710 | ||
711 | ep->sen_pads = 0x8888; /* Tx short frame pad character */ | |
712 | ep->sen_retlim = 15; /* Retry limit threshold */ | |
713 | ||
714 | ep->sen_maxflr = PKT_MAXBUF_SIZE; /* maximum frame length register */ | |
715 | ep->sen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */ | |
716 | ||
717 | ep->sen_maxd1 = PKT_MAXBLR_SIZE; /* maximum DMA1 length */ | |
718 | ep->sen_maxd2 = PKT_MAXBLR_SIZE; /* maximum DMA2 length */ | |
719 | ||
720 | /* Clear hash tables. | |
721 | */ | |
722 | ep->sen_gaddr1 = 0; | |
723 | ep->sen_gaddr2 = 0; | |
724 | ep->sen_gaddr3 = 0; | |
725 | ep->sen_gaddr4 = 0; | |
726 | ep->sen_iaddr1 = 0; | |
727 | ep->sen_iaddr2 = 0; | |
728 | ep->sen_iaddr3 = 0; | |
729 | ep->sen_iaddr4 = 0; | |
730 | ||
731 | /* Set Ethernet station address. | |
732 | * | |
733 | * This is supplied in the board information structure, so we | |
734 | * copy that into the controller. | |
735 | */ | |
736 | eap = (unsigned char *)&(ep->sen_paddrh); | |
737 | for (i=5; i>=0; i--) | |
738 | *eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i]; | |
739 | ||
740 | ep->sen_pper = 0; /* 'cause the book says so */ | |
741 | ep->sen_taddrl = 0; /* temp address (LSB) */ | |
742 | ep->sen_taddrm = 0; | |
743 | ep->sen_taddrh = 0; /* temp address (MSB) */ | |
744 | ||
745 | /* Now allocate the host memory pages and initialize the | |
746 | * buffer descriptors. | |
747 | */ | |
748 | bdp = cep->tx_bd_base; | |
749 | for (i=0; i<TX_RING_SIZE; i++) { | |
750 | ||
751 | /* Initialize the BD for every fragment in the page. | |
752 | */ | |
753 | bdp->cbd_sc = 0; | |
754 | bdp->cbd_bufaddr = 0; | |
755 | bdp++; | |
756 | } | |
757 | ||
758 | /* Set the last buffer to wrap. | |
759 | */ | |
760 | bdp--; | |
761 | bdp->cbd_sc |= BD_SC_WRAP; | |
762 | ||
763 | bdp = cep->rx_bd_base; | |
764 | for (i=0; i<CPM_ENET_RX_PAGES; i++) { | |
765 | ||
766 | /* Allocate a page. | |
767 | */ | |
768 | mem_addr = __get_free_page(GFP_KERNEL); | |
769 | /* BUG: no check for failure */ | |
770 | ||
771 | /* Initialize the BD for every fragment in the page. | |
772 | */ | |
773 | for (j=0; j<CPM_ENET_RX_FRPPG; j++) { | |
774 | bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR; | |
775 | bdp->cbd_bufaddr = __pa(mem_addr); | |
776 | mem_addr += CPM_ENET_RX_FRSIZE; | |
777 | bdp++; | |
778 | } | |
779 | } | |
780 | ||
781 | /* Set the last buffer to wrap. | |
782 | */ | |
783 | bdp--; | |
784 | bdp->cbd_sc |= BD_SC_WRAP; | |
785 | ||
786 | /* Let's re-initialize the channel now. We have to do it later | |
787 | * than the manual describes because we have just now finished | |
788 | * the BD initialization. | |
789 | */ | |
790 | cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0, | |
791 | CPM_CR_INIT_TRX) | CPM_CR_FLG; | |
792 | while (cp->cp_cpcr & CPM_CR_FLG); | |
793 | ||
794 | cep->skb_cur = cep->skb_dirty = 0; | |
795 | ||
796 | sccp->scc_scce = 0xffff; /* Clear any pending events */ | |
797 | ||
798 | /* Enable interrupts for transmit error, complete frame | |
799 | * received, and any transmit buffer we have also set the | |
800 | * interrupt flag. | |
801 | */ | |
802 | sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); | |
803 | ||
804 | /* Install our interrupt handler. | |
805 | */ | |
806 | request_irq(SIU_INT_ENET, scc_enet_interrupt, 0, "enet", dev); | |
807 | /* BUG: no check for failure */ | |
808 | ||
809 | /* Set GSMR_H to enable all normal operating modes. | |
810 | * Set GSMR_L to enable Ethernet to MC68160. | |
811 | */ | |
812 | sccp->scc_gsmrh = 0; | |
813 | sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET); | |
814 | ||
815 | /* Set sync/delimiters. | |
816 | */ | |
817 | sccp->scc_dsr = 0xd555; | |
818 | ||
819 | /* Set processing mode. Use Ethernet CRC, catch broadcast, and | |
820 | * start frame search 22 bit times after RENA. | |
821 | */ | |
822 | sccp->scc_psmr = (SCC_PSMR_ENCRC | SCC_PSMR_NIB22); | |
823 | ||
824 | /* It is now OK to enable the Ethernet transmitter. | |
825 | * Unfortunately, there are board implementation differences here. | |
826 | */ | |
827 | io->iop_pparc &= ~(PC_EST8260_ENET_LOOPBACK | | |
828 | PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD); | |
829 | io->iop_psorc &= ~(PC_EST8260_ENET_LOOPBACK | | |
830 | PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD); | |
831 | io->iop_pdirc |= (PC_EST8260_ENET_LOOPBACK | | |
832 | PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD); | |
833 | io->iop_pdatc &= ~(PC_EST8260_ENET_LOOPBACK | PC_EST8260_ENET_SQE); | |
834 | io->iop_pdatc |= PC_EST8260_ENET_NOTFD; | |
835 | ||
836 | dev->base_addr = (unsigned long)ep; | |
837 | ||
838 | /* The CPM Ethernet specific entries in the device structure. */ | |
839 | dev->open = scc_enet_open; | |
840 | dev->hard_start_xmit = scc_enet_start_xmit; | |
841 | dev->tx_timeout = scc_enet_timeout; | |
842 | dev->watchdog_timeo = TX_TIMEOUT; | |
843 | dev->stop = scc_enet_close; | |
844 | dev->get_stats = scc_enet_get_stats; | |
845 | dev->set_multicast_list = set_multicast_list; | |
846 | ||
847 | /* And last, enable the transmit and receive processing. | |
848 | */ | |
849 | sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); | |
850 | ||
851 | err = register_netdev(dev); | |
852 | if (err) { | |
853 | free_netdev(dev); | |
854 | return err; | |
855 | } | |
856 | ||
857 | printk("%s: SCC ENET Version 0.1, ", dev->name); | |
858 | for (i=0; i<5; i++) | |
859 | printk("%02x:", dev->dev_addr[i]); | |
860 | printk("%02x\n", dev->dev_addr[5]); | |
861 | ||
862 | return 0; | |
863 | } | |
864 | ||
865 | module_init(scc_enet_init); |