]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. | |
3 | * | |
4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | |
5 | */ | |
a4d4d518 RB |
6 | |
7 | #undef DEBUG | |
8 | ||
1da177e4 LT |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/interrupt.h> | |
1da177e4 LT |
15 | #include <linux/slab.h> |
16 | #include <linux/string.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/netdevice.h> | |
df9f5408 | 19 | #include <linux/platform_device.h> |
1da177e4 LT |
20 | #include <linux/etherdevice.h> |
21 | #include <linux/skbuff.h> | |
1da177e4 | 22 | |
1da177e4 LT |
23 | #include <asm/sgi/hpc3.h> |
24 | #include <asm/sgi/ip22.h> | |
df9f5408 | 25 | #include <asm/sgi/seeq.h> |
1da177e4 LT |
26 | |
27 | #include "sgiseeq.h" | |
28 | ||
1da177e4 LT |
29 | static char *sgiseeqstr = "SGI Seeq8003"; |
30 | ||
31 | /* | |
32 | * If you want speed, you do something silly, it always has worked for me. So, | |
33 | * with that in mind, I've decided to make this driver look completely like a | |
34 | * stupid Lance from a driver architecture perspective. Only difference is that | |
35 | * here our "ring buffer" looks and acts like a real Lance one does but is | |
36 | * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised | |
37 | * how a stupid idea like this can pay off in performance, not to mention | |
38 | * making this driver 2,000 times easier to write. ;-) | |
39 | */ | |
40 | ||
41 | /* Tune these if we tend to run out often etc. */ | |
42 | #define SEEQ_RX_BUFFERS 16 | |
43 | #define SEEQ_TX_BUFFERS 16 | |
44 | ||
45 | #define PKT_BUF_SZ 1584 | |
46 | ||
47 | #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) | |
48 | #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) | |
49 | #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) | |
50 | #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) | |
51 | ||
52 | #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ | |
53 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ | |
54 | sp->tx_old - sp->tx_new - 1) | |
55 | ||
1da177e4 LT |
56 | struct sgiseeq_rx_desc { |
57 | volatile struct hpc_dma_desc rdma; | |
58 | volatile signed int buf_vaddr; | |
59 | }; | |
60 | ||
61 | struct sgiseeq_tx_desc { | |
62 | volatile struct hpc_dma_desc tdma; | |
63 | volatile signed int buf_vaddr; | |
64 | }; | |
65 | ||
66 | /* | |
67 | * Warning: This structure is layed out in a certain way because HPC dma | |
68 | * descriptors must be 8-byte aligned. So don't touch this without | |
69 | * some care. | |
70 | */ | |
71 | struct sgiseeq_init_block { /* Note the name ;-) */ | |
72 | struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; | |
73 | struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; | |
74 | }; | |
75 | ||
76 | struct sgiseeq_private { | |
77 | struct sgiseeq_init_block *srings; | |
78 | ||
79 | /* Ptrs to the descriptors in uncached space. */ | |
80 | struct sgiseeq_rx_desc *rx_desc; | |
81 | struct sgiseeq_tx_desc *tx_desc; | |
82 | ||
83 | char *name; | |
84 | struct hpc3_ethregs *hregs; | |
85 | struct sgiseeq_regs *sregs; | |
86 | ||
87 | /* Ring entry counters. */ | |
88 | unsigned int rx_new, tx_new; | |
89 | unsigned int rx_old, tx_old; | |
90 | ||
91 | int is_edlc; | |
92 | unsigned char control; | |
93 | unsigned char mode; | |
94 | ||
95 | struct net_device_stats stats; | |
96 | ||
1da177e4 LT |
97 | spinlock_t tx_lock; |
98 | }; | |
99 | ||
1da177e4 LT |
100 | static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) |
101 | { | |
302a5c4b | 102 | hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; |
1da177e4 | 103 | udelay(20); |
302a5c4b | 104 | hregs->reset = 0; |
1da177e4 LT |
105 | } |
106 | ||
107 | static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, | |
108 | struct sgiseeq_regs *sregs) | |
109 | { | |
110 | hregs->rx_ctrl = hregs->tx_ctrl = 0; | |
111 | hpc3_eth_reset(hregs); | |
112 | } | |
113 | ||
114 | #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ | |
115 | SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) | |
116 | ||
117 | static inline void seeq_go(struct sgiseeq_private *sp, | |
118 | struct hpc3_ethregs *hregs, | |
119 | struct sgiseeq_regs *sregs) | |
120 | { | |
121 | sregs->rstat = sp->mode | RSTAT_GO_BITS; | |
122 | hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; | |
123 | } | |
124 | ||
125 | static inline void __sgiseeq_set_mac_address(struct net_device *dev) | |
126 | { | |
127 | struct sgiseeq_private *sp = netdev_priv(dev); | |
128 | struct sgiseeq_regs *sregs = sp->sregs; | |
129 | int i; | |
130 | ||
131 | sregs->tstat = SEEQ_TCMD_RB0; | |
132 | for (i = 0; i < 6; i++) | |
133 | sregs->rw.eth_addr[i] = dev->dev_addr[i]; | |
134 | } | |
135 | ||
136 | static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) | |
137 | { | |
138 | struct sgiseeq_private *sp = netdev_priv(dev); | |
139 | struct sockaddr *sa = addr; | |
140 | ||
141 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); | |
142 | ||
143 | spin_lock_irq(&sp->tx_lock); | |
144 | __sgiseeq_set_mac_address(dev); | |
145 | spin_unlock_irq(&sp->tx_lock); | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) | |
151 | #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) | |
152 | #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) | |
153 | ||
154 | static int seeq_init_ring(struct net_device *dev) | |
155 | { | |
156 | struct sgiseeq_private *sp = netdev_priv(dev); | |
157 | int i; | |
158 | ||
159 | netif_stop_queue(dev); | |
160 | sp->rx_new = sp->tx_new = 0; | |
161 | sp->rx_old = sp->tx_old = 0; | |
162 | ||
163 | __sgiseeq_set_mac_address(dev); | |
164 | ||
165 | /* Setup tx ring. */ | |
166 | for(i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
167 | if (!sp->tx_desc[i].tdma.pbuf) { | |
168 | unsigned long buffer; | |
169 | ||
170 | buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); | |
171 | if (!buffer) | |
172 | return -ENOMEM; | |
173 | sp->tx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); | |
174 | sp->tx_desc[i].tdma.pbuf = CPHYSADDR(buffer); | |
175 | } | |
176 | sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; | |
177 | } | |
178 | ||
179 | /* And now the rx ring. */ | |
180 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
181 | if (!sp->rx_desc[i].rdma.pbuf) { | |
182 | unsigned long buffer; | |
183 | ||
184 | buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); | |
185 | if (!buffer) | |
186 | return -ENOMEM; | |
187 | sp->rx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); | |
188 | sp->rx_desc[i].rdma.pbuf = CPHYSADDR(buffer); | |
189 | } | |
190 | sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; | |
191 | } | |
192 | sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; | |
193 | return 0; | |
194 | } | |
195 | ||
196 | #ifdef DEBUG | |
197 | static struct sgiseeq_private *gpriv; | |
198 | static struct net_device *gdev; | |
199 | ||
a4d4d518 | 200 | static void sgiseeq_dump_rings(void) |
1da177e4 LT |
201 | { |
202 | static int once; | |
203 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; | |
204 | struct sgiseeq_tx_desc *t = gpriv->tx_desc; | |
205 | struct hpc3_ethregs *hregs = gpriv->hregs; | |
206 | int i; | |
207 | ||
208 | if (once) | |
209 | return; | |
210 | once++; | |
211 | printk("RING DUMP:\n"); | |
212 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
213 | printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", | |
214 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | |
215 | r[i].rdma.pnext); | |
216 | i += 1; | |
217 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | |
218 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | |
219 | r[i].rdma.pnext); | |
220 | } | |
221 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
222 | printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", | |
223 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | |
224 | t[i].tdma.pnext); | |
225 | i += 1; | |
226 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | |
227 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | |
228 | t[i].tdma.pnext); | |
229 | } | |
230 | printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", | |
231 | gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); | |
232 | printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", | |
233 | hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); | |
234 | printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", | |
235 | hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); | |
236 | } | |
237 | #endif | |
238 | ||
239 | #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) | |
240 | #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) | |
1da177e4 LT |
241 | |
242 | static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, | |
243 | struct sgiseeq_regs *sregs) | |
244 | { | |
245 | struct hpc3_ethregs *hregs = sp->hregs; | |
246 | int err; | |
247 | ||
248 | reset_hpc3_and_seeq(hregs, sregs); | |
249 | err = seeq_init_ring(dev); | |
250 | if (err) | |
251 | return err; | |
252 | ||
253 | /* Setup to field the proper interrupt types. */ | |
254 | if (sp->is_edlc) { | |
255 | sregs->tstat = TSTAT_INIT_EDLC; | |
256 | sregs->rw.wregs.control = sp->control; | |
257 | sregs->rw.wregs.frame_gap = 0; | |
258 | } else { | |
259 | sregs->tstat = TSTAT_INIT_SEEQ; | |
260 | } | |
261 | ||
1da177e4 LT |
262 | hregs->rx_ndptr = CPHYSADDR(sp->rx_desc); |
263 | hregs->tx_ndptr = CPHYSADDR(sp->tx_desc); | |
264 | ||
265 | seeq_go(sp, hregs, sregs); | |
266 | return 0; | |
267 | } | |
268 | ||
269 | static inline void record_rx_errors(struct sgiseeq_private *sp, | |
270 | unsigned char status) | |
271 | { | |
272 | if (status & SEEQ_RSTAT_OVERF || | |
273 | status & SEEQ_RSTAT_SFRAME) | |
274 | sp->stats.rx_over_errors++; | |
275 | if (status & SEEQ_RSTAT_CERROR) | |
276 | sp->stats.rx_crc_errors++; | |
277 | if (status & SEEQ_RSTAT_DERROR) | |
278 | sp->stats.rx_frame_errors++; | |
279 | if (status & SEEQ_RSTAT_REOF) | |
280 | sp->stats.rx_errors++; | |
281 | } | |
282 | ||
283 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, | |
284 | struct hpc3_ethregs *hregs, | |
285 | struct sgiseeq_regs *sregs) | |
286 | { | |
287 | if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { | |
288 | hregs->rx_ndptr = CPHYSADDR(sp->rx_desc + sp->rx_new); | |
289 | seeq_go(sp, hregs, sregs); | |
290 | } | |
291 | } | |
292 | ||
293 | #define for_each_rx(rd, sp) for((rd) = &(sp)->rx_desc[(sp)->rx_new]; \ | |
294 | !((rd)->rdma.cntinfo & HPCDMA_OWN); \ | |
295 | (rd) = &(sp)->rx_desc[(sp)->rx_new]) | |
296 | ||
297 | static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, | |
298 | struct hpc3_ethregs *hregs, | |
299 | struct sgiseeq_regs *sregs) | |
300 | { | |
301 | struct sgiseeq_rx_desc *rd; | |
a4d4d518 | 302 | struct sk_buff *skb = NULL; |
1da177e4 | 303 | unsigned char pkt_status; |
a4d4d518 | 304 | unsigned char *pkt_pointer = NULL; |
1da177e4 LT |
305 | int len = 0; |
306 | unsigned int orig_end = PREV_RX(sp->rx_new); | |
307 | ||
308 | /* Service every received packet. */ | |
309 | for_each_rx(rd, sp) { | |
310 | len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; | |
311 | pkt_pointer = (unsigned char *)(long)rd->buf_vaddr; | |
312 | pkt_status = pkt_pointer[len + 2]; | |
313 | ||
314 | if (pkt_status & SEEQ_RSTAT_FIG) { | |
315 | /* Packet is OK. */ | |
316 | skb = dev_alloc_skb(len + 2); | |
317 | ||
318 | if (skb) { | |
1da177e4 LT |
319 | skb_reserve(skb, 2); |
320 | skb_put(skb, len); | |
321 | ||
322 | /* Copy out of kseg1 to avoid silly cache flush. */ | |
8c7b7faa | 323 | skb_copy_to_linear_data(skb, pkt_pointer + 2, len); |
1da177e4 LT |
324 | skb->protocol = eth_type_trans(skb, dev); |
325 | ||
326 | /* We don't want to receive our own packets */ | |
327 | if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { | |
328 | netif_rx(skb); | |
329 | dev->last_rx = jiffies; | |
330 | sp->stats.rx_packets++; | |
331 | sp->stats.rx_bytes += len; | |
332 | } else { | |
333 | /* Silently drop my own packets */ | |
334 | dev_kfree_skb_irq(skb); | |
335 | } | |
336 | } else { | |
337 | printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", | |
338 | dev->name); | |
339 | sp->stats.rx_dropped++; | |
340 | } | |
341 | } else { | |
342 | record_rx_errors(sp, pkt_status); | |
343 | } | |
344 | ||
345 | /* Return the entry to the ring pool. */ | |
346 | rd->rdma.cntinfo = RCNTINFO_INIT; | |
347 | sp->rx_new = NEXT_RX(sp->rx_new); | |
348 | } | |
349 | sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); | |
350 | sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; | |
351 | rx_maybe_restart(sp, hregs, sregs); | |
352 | } | |
353 | ||
354 | static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, | |
355 | struct sgiseeq_regs *sregs) | |
356 | { | |
357 | if (sp->is_edlc) { | |
358 | sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); | |
359 | sregs->rw.wregs.control = sp->control; | |
360 | } | |
361 | } | |
362 | ||
363 | static inline void kick_tx(struct sgiseeq_tx_desc *td, | |
364 | struct hpc3_ethregs *hregs) | |
365 | { | |
366 | /* If the HPC aint doin nothin, and there are more packets | |
367 | * with ETXD cleared and XIU set we must make very certain | |
368 | * that we restart the HPC else we risk locking up the | |
369 | * adapter. The following code is only safe iff the HPCDMA | |
370 | * is not active! | |
371 | */ | |
372 | while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == | |
373 | (HPCDMA_XIU | HPCDMA_ETXD)) | |
374 | td = (struct sgiseeq_tx_desc *)(long) CKSEG1ADDR(td->tdma.pnext); | |
375 | if (td->tdma.cntinfo & HPCDMA_XIU) { | |
376 | hregs->tx_ndptr = CPHYSADDR(td); | |
377 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | |
378 | } | |
379 | } | |
380 | ||
381 | static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, | |
382 | struct hpc3_ethregs *hregs, | |
383 | struct sgiseeq_regs *sregs) | |
384 | { | |
385 | struct sgiseeq_tx_desc *td; | |
386 | unsigned long status = hregs->tx_ctrl; | |
387 | int j; | |
388 | ||
389 | tx_maybe_reset_collisions(sp, sregs); | |
390 | ||
391 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { | |
392 | /* Oops, HPC detected some sort of error. */ | |
393 | if (status & SEEQ_TSTAT_R16) | |
394 | sp->stats.tx_aborted_errors++; | |
395 | if (status & SEEQ_TSTAT_UFLOW) | |
396 | sp->stats.tx_fifo_errors++; | |
397 | if (status & SEEQ_TSTAT_LCLS) | |
398 | sp->stats.collisions++; | |
399 | } | |
400 | ||
401 | /* Ack 'em... */ | |
402 | for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { | |
403 | td = &sp->tx_desc[j]; | |
404 | ||
405 | if (!(td->tdma.cntinfo & (HPCDMA_XIU))) | |
406 | break; | |
407 | if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { | |
408 | if (!(status & HPC3_ETXCTRL_ACTIVE)) { | |
409 | hregs->tx_ndptr = CPHYSADDR(td); | |
410 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | |
411 | } | |
412 | break; | |
413 | } | |
414 | sp->stats.tx_packets++; | |
415 | sp->tx_old = NEXT_TX(sp->tx_old); | |
416 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); | |
417 | td->tdma.cntinfo |= HPCDMA_EOX; | |
418 | } | |
419 | } | |
420 | ||
7d12e780 | 421 | static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) |
1da177e4 LT |
422 | { |
423 | struct net_device *dev = (struct net_device *) dev_id; | |
424 | struct sgiseeq_private *sp = netdev_priv(dev); | |
425 | struct hpc3_ethregs *hregs = sp->hregs; | |
426 | struct sgiseeq_regs *sregs = sp->sregs; | |
427 | ||
428 | spin_lock(&sp->tx_lock); | |
429 | ||
430 | /* Ack the IRQ and set software state. */ | |
302a5c4b | 431 | hregs->reset = HPC3_ERST_CLRIRQ; |
1da177e4 LT |
432 | |
433 | /* Always check for received packets. */ | |
434 | sgiseeq_rx(dev, sp, hregs, sregs); | |
435 | ||
436 | /* Only check for tx acks if we have something queued. */ | |
437 | if (sp->tx_old != sp->tx_new) | |
438 | sgiseeq_tx(dev, sp, hregs, sregs); | |
439 | ||
440 | if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { | |
441 | netif_wake_queue(dev); | |
442 | } | |
443 | spin_unlock(&sp->tx_lock); | |
444 | ||
445 | return IRQ_HANDLED; | |
446 | } | |
447 | ||
448 | static int sgiseeq_open(struct net_device *dev) | |
449 | { | |
450 | struct sgiseeq_private *sp = netdev_priv(dev); | |
451 | struct sgiseeq_regs *sregs = sp->sregs; | |
452 | unsigned int irq = dev->irq; | |
453 | int err; | |
454 | ||
455 | if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { | |
456 | printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); | |
457 | err = -EAGAIN; | |
458 | } | |
459 | ||
460 | err = init_seeq(dev, sp, sregs); | |
461 | if (err) | |
462 | goto out_free_irq; | |
463 | ||
464 | netif_start_queue(dev); | |
465 | ||
466 | return 0; | |
467 | ||
468 | out_free_irq: | |
469 | free_irq(irq, dev); | |
470 | ||
471 | return err; | |
472 | } | |
473 | ||
474 | static int sgiseeq_close(struct net_device *dev) | |
475 | { | |
476 | struct sgiseeq_private *sp = netdev_priv(dev); | |
477 | struct sgiseeq_regs *sregs = sp->sregs; | |
2891439e | 478 | unsigned int irq = dev->irq; |
1da177e4 LT |
479 | |
480 | netif_stop_queue(dev); | |
481 | ||
482 | /* Shutdown the Seeq. */ | |
483 | reset_hpc3_and_seeq(sp->hregs, sregs); | |
2891439e | 484 | free_irq(irq, dev); |
1da177e4 LT |
485 | |
486 | return 0; | |
487 | } | |
488 | ||
489 | static inline int sgiseeq_reset(struct net_device *dev) | |
490 | { | |
491 | struct sgiseeq_private *sp = netdev_priv(dev); | |
492 | struct sgiseeq_regs *sregs = sp->sregs; | |
493 | int err; | |
494 | ||
495 | err = init_seeq(dev, sp, sregs); | |
496 | if (err) | |
497 | return err; | |
498 | ||
499 | dev->trans_start = jiffies; | |
500 | netif_wake_queue(dev); | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
1da177e4 LT |
505 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) |
506 | { | |
507 | struct sgiseeq_private *sp = netdev_priv(dev); | |
508 | struct hpc3_ethregs *hregs = sp->hregs; | |
509 | unsigned long flags; | |
510 | struct sgiseeq_tx_desc *td; | |
511 | int skblen, len, entry; | |
512 | ||
513 | spin_lock_irqsave(&sp->tx_lock, flags); | |
514 | ||
515 | /* Setup... */ | |
516 | skblen = skb->len; | |
517 | len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; | |
518 | sp->stats.tx_bytes += len; | |
519 | entry = sp->tx_new; | |
520 | td = &sp->tx_desc[entry]; | |
521 | ||
522 | /* Create entry. There are so many races with adding a new | |
523 | * descriptor to the chain: | |
524 | * 1) Assume that the HPC is off processing a DMA chain while | |
525 | * we are changing all of the following. | |
526 | * 2) Do no allow the HPC to look at a new descriptor until | |
527 | * we have completely set up it's state. This means, do | |
528 | * not clear HPCDMA_EOX in the current last descritptor | |
529 | * until the one we are adding looks consistent and could | |
530 | * be processes right now. | |
531 | * 3) The tx interrupt code must notice when we've added a new | |
532 | * entry and the HPC got to the end of the chain before we | |
533 | * added this new entry and restarted it. | |
534 | */ | |
d626f62b | 535 | skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen); |
1da177e4 LT |
536 | if (len != skblen) |
537 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); | |
538 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | | |
539 | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; | |
540 | if (sp->tx_old != sp->tx_new) { | |
541 | struct sgiseeq_tx_desc *backend; | |
542 | ||
543 | backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; | |
544 | backend->tdma.cntinfo &= ~HPCDMA_EOX; | |
545 | } | |
546 | sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ | |
547 | ||
548 | /* Maybe kick the HPC back into motion. */ | |
549 | if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) | |
550 | kick_tx(&sp->tx_desc[sp->tx_old], hregs); | |
551 | ||
552 | dev->trans_start = jiffies; | |
553 | dev_kfree_skb(skb); | |
554 | ||
555 | if (!TX_BUFFS_AVAIL(sp)) | |
556 | netif_stop_queue(dev); | |
557 | spin_unlock_irqrestore(&sp->tx_lock, flags); | |
558 | ||
559 | return 0; | |
560 | } | |
561 | ||
562 | static void timeout(struct net_device *dev) | |
563 | { | |
564 | printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); | |
565 | sgiseeq_reset(dev); | |
566 | ||
567 | dev->trans_start = jiffies; | |
568 | netif_wake_queue(dev); | |
569 | } | |
570 | ||
571 | static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev) | |
572 | { | |
573 | struct sgiseeq_private *sp = netdev_priv(dev); | |
574 | ||
575 | return &sp->stats; | |
576 | } | |
577 | ||
578 | static void sgiseeq_set_multicast(struct net_device *dev) | |
579 | { | |
580 | struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; | |
581 | unsigned char oldmode = sp->mode; | |
582 | ||
583 | if(dev->flags & IFF_PROMISC) | |
584 | sp->mode = SEEQ_RCMD_RANY; | |
585 | else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) | |
586 | sp->mode = SEEQ_RCMD_RBMCAST; | |
587 | else | |
588 | sp->mode = SEEQ_RCMD_RBCAST; | |
589 | ||
590 | /* XXX I know this sucks, but is there a better way to reprogram | |
591 | * XXX the receiver? At least, this shouldn't happen too often. | |
592 | */ | |
593 | ||
594 | if (oldmode != sp->mode) | |
595 | sgiseeq_reset(dev); | |
596 | } | |
597 | ||
598 | static inline void setup_tx_ring(struct sgiseeq_tx_desc *buf, int nbufs) | |
599 | { | |
600 | int i = 0; | |
601 | ||
602 | while (i < (nbufs - 1)) { | |
603 | buf[i].tdma.pnext = CPHYSADDR(buf + i + 1); | |
604 | buf[i].tdma.pbuf = 0; | |
605 | i++; | |
606 | } | |
607 | buf[i].tdma.pnext = CPHYSADDR(buf); | |
608 | } | |
609 | ||
610 | static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs) | |
611 | { | |
612 | int i = 0; | |
613 | ||
614 | while (i < (nbufs - 1)) { | |
615 | buf[i].rdma.pnext = CPHYSADDR(buf + i + 1); | |
616 | buf[i].rdma.pbuf = 0; | |
617 | i++; | |
618 | } | |
619 | buf[i].rdma.pbuf = 0; | |
620 | buf[i].rdma.pnext = CPHYSADDR(buf); | |
621 | } | |
622 | ||
623 | #define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf)) | |
624 | ||
df9f5408 | 625 | static int __init sgiseeq_probe(struct platform_device *pdev) |
1da177e4 | 626 | { |
df9f5408 RB |
627 | struct sgiseeq_platform_data *pd = pdev->dev.platform_data; |
628 | struct hpc3_regs *hpcregs = pd->hpc; | |
1da177e4 | 629 | struct sgiseeq_init_block *sr; |
df9f5408 | 630 | unsigned int irq = pd->irq; |
1da177e4 LT |
631 | struct sgiseeq_private *sp; |
632 | struct net_device *dev; | |
633 | int err, i; | |
634 | ||
635 | dev = alloc_etherdev(sizeof (struct sgiseeq_private)); | |
636 | if (!dev) { | |
637 | printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n"); | |
638 | err = -ENOMEM; | |
639 | goto err_out; | |
640 | } | |
df9f5408 RB |
641 | |
642 | platform_set_drvdata(pdev, dev); | |
1da177e4 LT |
643 | sp = netdev_priv(dev); |
644 | ||
645 | /* Make private data page aligned */ | |
646 | sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL); | |
647 | if (!sr) { | |
648 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); | |
649 | err = -ENOMEM; | |
650 | goto err_out_free_dev; | |
651 | } | |
652 | sp->srings = sr; | |
653 | ||
df9f5408 | 654 | memcpy(dev->dev_addr, pd->mac, ETH_ALEN); |
1da177e4 LT |
655 | |
656 | #ifdef DEBUG | |
657 | gpriv = sp; | |
658 | gdev = dev; | |
659 | #endif | |
302a5c4b RB |
660 | sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; |
661 | sp->hregs = &hpcregs->ethregs; | |
1da177e4 LT |
662 | sp->name = sgiseeqstr; |
663 | sp->mode = SEEQ_RCMD_RBCAST; | |
664 | ||
665 | sp->rx_desc = (struct sgiseeq_rx_desc *) | |
666 | CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0])); | |
667 | dma_cache_wback_inv((unsigned long)&sp->srings->rxvector, | |
668 | sizeof(sp->srings->rxvector)); | |
669 | sp->tx_desc = (struct sgiseeq_tx_desc *) | |
670 | CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0])); | |
671 | dma_cache_wback_inv((unsigned long)&sp->srings->txvector, | |
672 | sizeof(sp->srings->txvector)); | |
673 | ||
674 | /* A couple calculations now, saves many cycles later. */ | |
675 | setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); | |
676 | setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); | |
677 | ||
78ee5b3c LM |
678 | /* Setup PIO and DMA transfer timing */ |
679 | sp->hregs->pconfig = 0x161; | |
680 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | | |
681 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; | |
682 | ||
302a5c4b RB |
683 | /* Setup PIO and DMA transfer timing */ |
684 | sp->hregs->pconfig = 0x161; | |
685 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | | |
686 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; | |
687 | ||
1da177e4 LT |
688 | /* Reset the chip. */ |
689 | hpc3_eth_reset(sp->hregs); | |
690 | ||
691 | sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); | |
692 | if (sp->is_edlc) | |
693 | sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | | |
694 | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | | |
695 | SEEQ_CTRL_ENCARR; | |
696 | ||
697 | dev->open = sgiseeq_open; | |
698 | dev->stop = sgiseeq_close; | |
699 | dev->hard_start_xmit = sgiseeq_start_xmit; | |
700 | dev->tx_timeout = timeout; | |
701 | dev->watchdog_timeo = (200 * HZ) / 1000; | |
702 | dev->get_stats = sgiseeq_get_stats; | |
703 | dev->set_multicast_list = sgiseeq_set_multicast; | |
704 | dev->set_mac_address = sgiseeq_set_mac_address; | |
705 | dev->irq = irq; | |
706 | ||
707 | if (register_netdev(dev)) { | |
708 | printk(KERN_ERR "Sgiseeq: Cannot register net device, " | |
709 | "aborting.\n"); | |
710 | err = -ENODEV; | |
711 | goto err_out_free_page; | |
712 | } | |
713 | ||
302a5c4b | 714 | printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr); |
1da177e4 LT |
715 | for (i = 0; i < 6; i++) |
716 | printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); | |
717 | ||
1da177e4 LT |
718 | return 0; |
719 | ||
720 | err_out_free_page: | |
2891439e | 721 | free_page((unsigned long) sp->srings); |
1da177e4 LT |
722 | err_out_free_dev: |
723 | kfree(dev); | |
724 | ||
725 | err_out: | |
726 | return err; | |
727 | } | |
728 | ||
df9f5408 | 729 | static void __exit sgiseeq_remove(struct platform_device *pdev) |
1da177e4 | 730 | { |
df9f5408 RB |
731 | struct net_device *dev = platform_get_drvdata(pdev); |
732 | struct sgiseeq_private *sp = netdev_priv(dev); | |
78ee5b3c | 733 | |
df9f5408 RB |
734 | unregister_netdev(dev); |
735 | free_page((unsigned long) sp->srings); | |
736 | free_netdev(dev); | |
737 | platform_set_drvdata(pdev, NULL); | |
1da177e4 LT |
738 | } |
739 | ||
df9f5408 RB |
740 | static struct platform_driver sgiseeq_driver = { |
741 | .probe = sgiseeq_probe, | |
742 | .remove = __devexit_p(sgiseeq_remove), | |
743 | .driver = { | |
744 | .name = "sgiseeq" | |
745 | } | |
746 | }; | |
1da177e4 | 747 | |
df9f5408 RB |
748 | static int __init sgiseeq_module_init(void) |
749 | { | |
750 | if (platform_driver_register(&sgiseeq_driver)) { | |
751 | printk(KERN_ERR "Driver registration failed\n"); | |
752 | return -ENODEV; | |
1da177e4 | 753 | } |
df9f5408 RB |
754 | |
755 | return 0; | |
756 | } | |
757 | ||
758 | static void __exit sgiseeq_module_exit(void) | |
759 | { | |
760 | platform_driver_unregister(&sgiseeq_driver); | |
1da177e4 LT |
761 | } |
762 | ||
df9f5408 RB |
763 | module_init(sgiseeq_module_init); |
764 | module_exit(sgiseeq_module_exit); | |
1da177e4 | 765 | |
302a5c4b RB |
766 | MODULE_DESCRIPTION("SGI Seeq 8003 driver"); |
767 | MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); | |
1da177e4 | 768 | MODULE_LICENSE("GPL"); |