]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. | |
3 | * | |
4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | |
5 | */ | |
a4d4d518 RB |
6 | |
7 | #undef DEBUG | |
8 | ||
1da177e4 LT |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/ioport.h> | |
16 | #include <linux/socket.h> | |
17 | #include <linux/in.h> | |
18 | #include <linux/route.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/delay.h> | |
22 | #include <linux/netdevice.h> | |
23 | #include <linux/etherdevice.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/bitops.h> | |
26 | ||
27 | #include <asm/byteorder.h> | |
28 | #include <asm/io.h> | |
29 | #include <asm/system.h> | |
30 | #include <asm/page.h> | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/sgi/hpc3.h> | |
33 | #include <asm/sgi/ip22.h> | |
34 | #include <asm/sgialib.h> | |
35 | ||
36 | #include "sgiseeq.h" | |
37 | ||
1da177e4 LT |
38 | static char *sgiseeqstr = "SGI Seeq8003"; |
39 | ||
40 | /* | |
41 | * If you want speed, you do something silly, it always has worked for me. So, | |
42 | * with that in mind, I've decided to make this driver look completely like a | |
43 | * stupid Lance from a driver architecture perspective. Only difference is that | |
44 | * here our "ring buffer" looks and acts like a real Lance one does but is | |
45 | * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised | |
46 | * how a stupid idea like this can pay off in performance, not to mention | |
47 | * making this driver 2,000 times easier to write. ;-) | |
48 | */ | |
49 | ||
50 | /* Tune these if we tend to run out often etc. */ | |
51 | #define SEEQ_RX_BUFFERS 16 | |
52 | #define SEEQ_TX_BUFFERS 16 | |
53 | ||
54 | #define PKT_BUF_SZ 1584 | |
55 | ||
56 | #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) | |
57 | #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) | |
58 | #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) | |
59 | #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) | |
60 | ||
61 | #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ | |
62 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ | |
63 | sp->tx_old - sp->tx_new - 1) | |
64 | ||
1da177e4 LT |
65 | struct sgiseeq_rx_desc { |
66 | volatile struct hpc_dma_desc rdma; | |
67 | volatile signed int buf_vaddr; | |
68 | }; | |
69 | ||
70 | struct sgiseeq_tx_desc { | |
71 | volatile struct hpc_dma_desc tdma; | |
72 | volatile signed int buf_vaddr; | |
73 | }; | |
74 | ||
75 | /* | |
76 | * Warning: This structure is layed out in a certain way because HPC dma | |
77 | * descriptors must be 8-byte aligned. So don't touch this without | |
78 | * some care. | |
79 | */ | |
80 | struct sgiseeq_init_block { /* Note the name ;-) */ | |
81 | struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; | |
82 | struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; | |
83 | }; | |
84 | ||
85 | struct sgiseeq_private { | |
86 | struct sgiseeq_init_block *srings; | |
87 | ||
88 | /* Ptrs to the descriptors in uncached space. */ | |
89 | struct sgiseeq_rx_desc *rx_desc; | |
90 | struct sgiseeq_tx_desc *tx_desc; | |
91 | ||
92 | char *name; | |
93 | struct hpc3_ethregs *hregs; | |
94 | struct sgiseeq_regs *sregs; | |
95 | ||
96 | /* Ring entry counters. */ | |
97 | unsigned int rx_new, tx_new; | |
98 | unsigned int rx_old, tx_old; | |
99 | ||
100 | int is_edlc; | |
101 | unsigned char control; | |
102 | unsigned char mode; | |
103 | ||
104 | struct net_device_stats stats; | |
105 | ||
106 | struct net_device *next_module; | |
107 | spinlock_t tx_lock; | |
108 | }; | |
109 | ||
110 | /* A list of all installed seeq devices, for removing the driver module. */ | |
111 | static struct net_device *root_sgiseeq_dev; | |
112 | ||
113 | static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) | |
114 | { | |
302a5c4b | 115 | hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; |
1da177e4 | 116 | udelay(20); |
302a5c4b | 117 | hregs->reset = 0; |
1da177e4 LT |
118 | } |
119 | ||
120 | static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, | |
121 | struct sgiseeq_regs *sregs) | |
122 | { | |
123 | hregs->rx_ctrl = hregs->tx_ctrl = 0; | |
124 | hpc3_eth_reset(hregs); | |
125 | } | |
126 | ||
127 | #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ | |
128 | SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) | |
129 | ||
130 | static inline void seeq_go(struct sgiseeq_private *sp, | |
131 | struct hpc3_ethregs *hregs, | |
132 | struct sgiseeq_regs *sregs) | |
133 | { | |
134 | sregs->rstat = sp->mode | RSTAT_GO_BITS; | |
135 | hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; | |
136 | } | |
137 | ||
138 | static inline void __sgiseeq_set_mac_address(struct net_device *dev) | |
139 | { | |
140 | struct sgiseeq_private *sp = netdev_priv(dev); | |
141 | struct sgiseeq_regs *sregs = sp->sregs; | |
142 | int i; | |
143 | ||
144 | sregs->tstat = SEEQ_TCMD_RB0; | |
145 | for (i = 0; i < 6; i++) | |
146 | sregs->rw.eth_addr[i] = dev->dev_addr[i]; | |
147 | } | |
148 | ||
149 | static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) | |
150 | { | |
151 | struct sgiseeq_private *sp = netdev_priv(dev); | |
152 | struct sockaddr *sa = addr; | |
153 | ||
154 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); | |
155 | ||
156 | spin_lock_irq(&sp->tx_lock); | |
157 | __sgiseeq_set_mac_address(dev); | |
158 | spin_unlock_irq(&sp->tx_lock); | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
163 | #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) | |
164 | #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) | |
165 | #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) | |
166 | ||
167 | static int seeq_init_ring(struct net_device *dev) | |
168 | { | |
169 | struct sgiseeq_private *sp = netdev_priv(dev); | |
170 | int i; | |
171 | ||
172 | netif_stop_queue(dev); | |
173 | sp->rx_new = sp->tx_new = 0; | |
174 | sp->rx_old = sp->tx_old = 0; | |
175 | ||
176 | __sgiseeq_set_mac_address(dev); | |
177 | ||
178 | /* Setup tx ring. */ | |
179 | for(i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
180 | if (!sp->tx_desc[i].tdma.pbuf) { | |
181 | unsigned long buffer; | |
182 | ||
183 | buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); | |
184 | if (!buffer) | |
185 | return -ENOMEM; | |
186 | sp->tx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); | |
187 | sp->tx_desc[i].tdma.pbuf = CPHYSADDR(buffer); | |
188 | } | |
189 | sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; | |
190 | } | |
191 | ||
192 | /* And now the rx ring. */ | |
193 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
194 | if (!sp->rx_desc[i].rdma.pbuf) { | |
195 | unsigned long buffer; | |
196 | ||
197 | buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); | |
198 | if (!buffer) | |
199 | return -ENOMEM; | |
200 | sp->rx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); | |
201 | sp->rx_desc[i].rdma.pbuf = CPHYSADDR(buffer); | |
202 | } | |
203 | sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; | |
204 | } | |
205 | sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; | |
206 | return 0; | |
207 | } | |
208 | ||
209 | #ifdef DEBUG | |
210 | static struct sgiseeq_private *gpriv; | |
211 | static struct net_device *gdev; | |
212 | ||
a4d4d518 | 213 | static void sgiseeq_dump_rings(void) |
1da177e4 LT |
214 | { |
215 | static int once; | |
216 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; | |
217 | struct sgiseeq_tx_desc *t = gpriv->tx_desc; | |
218 | struct hpc3_ethregs *hregs = gpriv->hregs; | |
219 | int i; | |
220 | ||
221 | if (once) | |
222 | return; | |
223 | once++; | |
224 | printk("RING DUMP:\n"); | |
225 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
226 | printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", | |
227 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | |
228 | r[i].rdma.pnext); | |
229 | i += 1; | |
230 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | |
231 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | |
232 | r[i].rdma.pnext); | |
233 | } | |
234 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
235 | printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", | |
236 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | |
237 | t[i].tdma.pnext); | |
238 | i += 1; | |
239 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | |
240 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | |
241 | t[i].tdma.pnext); | |
242 | } | |
243 | printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", | |
244 | gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); | |
245 | printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", | |
246 | hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); | |
247 | printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", | |
248 | hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); | |
249 | } | |
250 | #endif | |
251 | ||
252 | #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) | |
253 | #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) | |
1da177e4 LT |
254 | |
255 | static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, | |
256 | struct sgiseeq_regs *sregs) | |
257 | { | |
258 | struct hpc3_ethregs *hregs = sp->hregs; | |
259 | int err; | |
260 | ||
261 | reset_hpc3_and_seeq(hregs, sregs); | |
262 | err = seeq_init_ring(dev); | |
263 | if (err) | |
264 | return err; | |
265 | ||
266 | /* Setup to field the proper interrupt types. */ | |
267 | if (sp->is_edlc) { | |
268 | sregs->tstat = TSTAT_INIT_EDLC; | |
269 | sregs->rw.wregs.control = sp->control; | |
270 | sregs->rw.wregs.frame_gap = 0; | |
271 | } else { | |
272 | sregs->tstat = TSTAT_INIT_SEEQ; | |
273 | } | |
274 | ||
1da177e4 LT |
275 | hregs->rx_ndptr = CPHYSADDR(sp->rx_desc); |
276 | hregs->tx_ndptr = CPHYSADDR(sp->tx_desc); | |
277 | ||
278 | seeq_go(sp, hregs, sregs); | |
279 | return 0; | |
280 | } | |
281 | ||
282 | static inline void record_rx_errors(struct sgiseeq_private *sp, | |
283 | unsigned char status) | |
284 | { | |
285 | if (status & SEEQ_RSTAT_OVERF || | |
286 | status & SEEQ_RSTAT_SFRAME) | |
287 | sp->stats.rx_over_errors++; | |
288 | if (status & SEEQ_RSTAT_CERROR) | |
289 | sp->stats.rx_crc_errors++; | |
290 | if (status & SEEQ_RSTAT_DERROR) | |
291 | sp->stats.rx_frame_errors++; | |
292 | if (status & SEEQ_RSTAT_REOF) | |
293 | sp->stats.rx_errors++; | |
294 | } | |
295 | ||
296 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, | |
297 | struct hpc3_ethregs *hregs, | |
298 | struct sgiseeq_regs *sregs) | |
299 | { | |
300 | if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { | |
301 | hregs->rx_ndptr = CPHYSADDR(sp->rx_desc + sp->rx_new); | |
302 | seeq_go(sp, hregs, sregs); | |
303 | } | |
304 | } | |
305 | ||
306 | #define for_each_rx(rd, sp) for((rd) = &(sp)->rx_desc[(sp)->rx_new]; \ | |
307 | !((rd)->rdma.cntinfo & HPCDMA_OWN); \ | |
308 | (rd) = &(sp)->rx_desc[(sp)->rx_new]) | |
309 | ||
310 | static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, | |
311 | struct hpc3_ethregs *hregs, | |
312 | struct sgiseeq_regs *sregs) | |
313 | { | |
314 | struct sgiseeq_rx_desc *rd; | |
a4d4d518 | 315 | struct sk_buff *skb = NULL; |
1da177e4 | 316 | unsigned char pkt_status; |
a4d4d518 | 317 | unsigned char *pkt_pointer = NULL; |
1da177e4 LT |
318 | int len = 0; |
319 | unsigned int orig_end = PREV_RX(sp->rx_new); | |
320 | ||
321 | /* Service every received packet. */ | |
322 | for_each_rx(rd, sp) { | |
323 | len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; | |
324 | pkt_pointer = (unsigned char *)(long)rd->buf_vaddr; | |
325 | pkt_status = pkt_pointer[len + 2]; | |
326 | ||
327 | if (pkt_status & SEEQ_RSTAT_FIG) { | |
328 | /* Packet is OK. */ | |
329 | skb = dev_alloc_skb(len + 2); | |
330 | ||
331 | if (skb) { | |
332 | skb->dev = dev; | |
333 | skb_reserve(skb, 2); | |
334 | skb_put(skb, len); | |
335 | ||
336 | /* Copy out of kseg1 to avoid silly cache flush. */ | |
337 | eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); | |
338 | skb->protocol = eth_type_trans(skb, dev); | |
339 | ||
340 | /* We don't want to receive our own packets */ | |
341 | if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { | |
342 | netif_rx(skb); | |
343 | dev->last_rx = jiffies; | |
344 | sp->stats.rx_packets++; | |
345 | sp->stats.rx_bytes += len; | |
346 | } else { | |
347 | /* Silently drop my own packets */ | |
348 | dev_kfree_skb_irq(skb); | |
349 | } | |
350 | } else { | |
351 | printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", | |
352 | dev->name); | |
353 | sp->stats.rx_dropped++; | |
354 | } | |
355 | } else { | |
356 | record_rx_errors(sp, pkt_status); | |
357 | } | |
358 | ||
359 | /* Return the entry to the ring pool. */ | |
360 | rd->rdma.cntinfo = RCNTINFO_INIT; | |
361 | sp->rx_new = NEXT_RX(sp->rx_new); | |
362 | } | |
363 | sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); | |
364 | sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; | |
365 | rx_maybe_restart(sp, hregs, sregs); | |
366 | } | |
367 | ||
368 | static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, | |
369 | struct sgiseeq_regs *sregs) | |
370 | { | |
371 | if (sp->is_edlc) { | |
372 | sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); | |
373 | sregs->rw.wregs.control = sp->control; | |
374 | } | |
375 | } | |
376 | ||
377 | static inline void kick_tx(struct sgiseeq_tx_desc *td, | |
378 | struct hpc3_ethregs *hregs) | |
379 | { | |
380 | /* If the HPC aint doin nothin, and there are more packets | |
381 | * with ETXD cleared and XIU set we must make very certain | |
382 | * that we restart the HPC else we risk locking up the | |
383 | * adapter. The following code is only safe iff the HPCDMA | |
384 | * is not active! | |
385 | */ | |
386 | while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == | |
387 | (HPCDMA_XIU | HPCDMA_ETXD)) | |
388 | td = (struct sgiseeq_tx_desc *)(long) CKSEG1ADDR(td->tdma.pnext); | |
389 | if (td->tdma.cntinfo & HPCDMA_XIU) { | |
390 | hregs->tx_ndptr = CPHYSADDR(td); | |
391 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | |
392 | } | |
393 | } | |
394 | ||
395 | static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, | |
396 | struct hpc3_ethregs *hregs, | |
397 | struct sgiseeq_regs *sregs) | |
398 | { | |
399 | struct sgiseeq_tx_desc *td; | |
400 | unsigned long status = hregs->tx_ctrl; | |
401 | int j; | |
402 | ||
403 | tx_maybe_reset_collisions(sp, sregs); | |
404 | ||
405 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { | |
406 | /* Oops, HPC detected some sort of error. */ | |
407 | if (status & SEEQ_TSTAT_R16) | |
408 | sp->stats.tx_aborted_errors++; | |
409 | if (status & SEEQ_TSTAT_UFLOW) | |
410 | sp->stats.tx_fifo_errors++; | |
411 | if (status & SEEQ_TSTAT_LCLS) | |
412 | sp->stats.collisions++; | |
413 | } | |
414 | ||
415 | /* Ack 'em... */ | |
416 | for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { | |
417 | td = &sp->tx_desc[j]; | |
418 | ||
419 | if (!(td->tdma.cntinfo & (HPCDMA_XIU))) | |
420 | break; | |
421 | if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { | |
422 | if (!(status & HPC3_ETXCTRL_ACTIVE)) { | |
423 | hregs->tx_ndptr = CPHYSADDR(td); | |
424 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | |
425 | } | |
426 | break; | |
427 | } | |
428 | sp->stats.tx_packets++; | |
429 | sp->tx_old = NEXT_TX(sp->tx_old); | |
430 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); | |
431 | td->tdma.cntinfo |= HPCDMA_EOX; | |
432 | } | |
433 | } | |
434 | ||
435 | static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
436 | { | |
437 | struct net_device *dev = (struct net_device *) dev_id; | |
438 | struct sgiseeq_private *sp = netdev_priv(dev); | |
439 | struct hpc3_ethregs *hregs = sp->hregs; | |
440 | struct sgiseeq_regs *sregs = sp->sregs; | |
441 | ||
442 | spin_lock(&sp->tx_lock); | |
443 | ||
444 | /* Ack the IRQ and set software state. */ | |
302a5c4b | 445 | hregs->reset = HPC3_ERST_CLRIRQ; |
1da177e4 LT |
446 | |
447 | /* Always check for received packets. */ | |
448 | sgiseeq_rx(dev, sp, hregs, sregs); | |
449 | ||
450 | /* Only check for tx acks if we have something queued. */ | |
451 | if (sp->tx_old != sp->tx_new) | |
452 | sgiseeq_tx(dev, sp, hregs, sregs); | |
453 | ||
454 | if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { | |
455 | netif_wake_queue(dev); | |
456 | } | |
457 | spin_unlock(&sp->tx_lock); | |
458 | ||
459 | return IRQ_HANDLED; | |
460 | } | |
461 | ||
462 | static int sgiseeq_open(struct net_device *dev) | |
463 | { | |
464 | struct sgiseeq_private *sp = netdev_priv(dev); | |
465 | struct sgiseeq_regs *sregs = sp->sregs; | |
466 | unsigned int irq = dev->irq; | |
467 | int err; | |
468 | ||
469 | if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { | |
470 | printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); | |
471 | err = -EAGAIN; | |
472 | } | |
473 | ||
474 | err = init_seeq(dev, sp, sregs); | |
475 | if (err) | |
476 | goto out_free_irq; | |
477 | ||
478 | netif_start_queue(dev); | |
479 | ||
480 | return 0; | |
481 | ||
482 | out_free_irq: | |
483 | free_irq(irq, dev); | |
484 | ||
485 | return err; | |
486 | } | |
487 | ||
488 | static int sgiseeq_close(struct net_device *dev) | |
489 | { | |
490 | struct sgiseeq_private *sp = netdev_priv(dev); | |
491 | struct sgiseeq_regs *sregs = sp->sregs; | |
2891439e | 492 | unsigned int irq = dev->irq; |
1da177e4 LT |
493 | |
494 | netif_stop_queue(dev); | |
495 | ||
496 | /* Shutdown the Seeq. */ | |
497 | reset_hpc3_and_seeq(sp->hregs, sregs); | |
2891439e | 498 | free_irq(irq, dev); |
1da177e4 LT |
499 | |
500 | return 0; | |
501 | } | |
502 | ||
503 | static inline int sgiseeq_reset(struct net_device *dev) | |
504 | { | |
505 | struct sgiseeq_private *sp = netdev_priv(dev); | |
506 | struct sgiseeq_regs *sregs = sp->sregs; | |
507 | int err; | |
508 | ||
509 | err = init_seeq(dev, sp, sregs); | |
510 | if (err) | |
511 | return err; | |
512 | ||
513 | dev->trans_start = jiffies; | |
514 | netif_wake_queue(dev); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
1da177e4 LT |
519 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) |
520 | { | |
521 | struct sgiseeq_private *sp = netdev_priv(dev); | |
522 | struct hpc3_ethregs *hregs = sp->hregs; | |
523 | unsigned long flags; | |
524 | struct sgiseeq_tx_desc *td; | |
525 | int skblen, len, entry; | |
526 | ||
527 | spin_lock_irqsave(&sp->tx_lock, flags); | |
528 | ||
529 | /* Setup... */ | |
530 | skblen = skb->len; | |
531 | len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; | |
532 | sp->stats.tx_bytes += len; | |
533 | entry = sp->tx_new; | |
534 | td = &sp->tx_desc[entry]; | |
535 | ||
536 | /* Create entry. There are so many races with adding a new | |
537 | * descriptor to the chain: | |
538 | * 1) Assume that the HPC is off processing a DMA chain while | |
539 | * we are changing all of the following. | |
540 | * 2) Do no allow the HPC to look at a new descriptor until | |
541 | * we have completely set up it's state. This means, do | |
542 | * not clear HPCDMA_EOX in the current last descritptor | |
543 | * until the one we are adding looks consistent and could | |
544 | * be processes right now. | |
545 | * 3) The tx interrupt code must notice when we've added a new | |
546 | * entry and the HPC got to the end of the chain before we | |
547 | * added this new entry and restarted it. | |
548 | */ | |
549 | memcpy((char *)(long)td->buf_vaddr, skb->data, skblen); | |
550 | if (len != skblen) | |
551 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); | |
552 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | | |
553 | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; | |
554 | if (sp->tx_old != sp->tx_new) { | |
555 | struct sgiseeq_tx_desc *backend; | |
556 | ||
557 | backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; | |
558 | backend->tdma.cntinfo &= ~HPCDMA_EOX; | |
559 | } | |
560 | sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ | |
561 | ||
562 | /* Maybe kick the HPC back into motion. */ | |
563 | if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) | |
564 | kick_tx(&sp->tx_desc[sp->tx_old], hregs); | |
565 | ||
566 | dev->trans_start = jiffies; | |
567 | dev_kfree_skb(skb); | |
568 | ||
569 | if (!TX_BUFFS_AVAIL(sp)) | |
570 | netif_stop_queue(dev); | |
571 | spin_unlock_irqrestore(&sp->tx_lock, flags); | |
572 | ||
573 | return 0; | |
574 | } | |
575 | ||
576 | static void timeout(struct net_device *dev) | |
577 | { | |
578 | printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); | |
579 | sgiseeq_reset(dev); | |
580 | ||
581 | dev->trans_start = jiffies; | |
582 | netif_wake_queue(dev); | |
583 | } | |
584 | ||
585 | static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev) | |
586 | { | |
587 | struct sgiseeq_private *sp = netdev_priv(dev); | |
588 | ||
589 | return &sp->stats; | |
590 | } | |
591 | ||
592 | static void sgiseeq_set_multicast(struct net_device *dev) | |
593 | { | |
594 | struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; | |
595 | unsigned char oldmode = sp->mode; | |
596 | ||
597 | if(dev->flags & IFF_PROMISC) | |
598 | sp->mode = SEEQ_RCMD_RANY; | |
599 | else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) | |
600 | sp->mode = SEEQ_RCMD_RBMCAST; | |
601 | else | |
602 | sp->mode = SEEQ_RCMD_RBCAST; | |
603 | ||
604 | /* XXX I know this sucks, but is there a better way to reprogram | |
605 | * XXX the receiver? At least, this shouldn't happen too often. | |
606 | */ | |
607 | ||
608 | if (oldmode != sp->mode) | |
609 | sgiseeq_reset(dev); | |
610 | } | |
611 | ||
612 | static inline void setup_tx_ring(struct sgiseeq_tx_desc *buf, int nbufs) | |
613 | { | |
614 | int i = 0; | |
615 | ||
616 | while (i < (nbufs - 1)) { | |
617 | buf[i].tdma.pnext = CPHYSADDR(buf + i + 1); | |
618 | buf[i].tdma.pbuf = 0; | |
619 | i++; | |
620 | } | |
621 | buf[i].tdma.pnext = CPHYSADDR(buf); | |
622 | } | |
623 | ||
624 | static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs) | |
625 | { | |
626 | int i = 0; | |
627 | ||
628 | while (i < (nbufs - 1)) { | |
629 | buf[i].rdma.pnext = CPHYSADDR(buf + i + 1); | |
630 | buf[i].rdma.pbuf = 0; | |
631 | i++; | |
632 | } | |
633 | buf[i].rdma.pbuf = 0; | |
634 | buf[i].rdma.pnext = CPHYSADDR(buf); | |
635 | } | |
636 | ||
637 | #define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf)) | |
638 | ||
302a5c4b | 639 | static int sgiseeq_init(struct hpc3_regs* hpcregs, int irq) |
1da177e4 LT |
640 | { |
641 | struct sgiseeq_init_block *sr; | |
642 | struct sgiseeq_private *sp; | |
643 | struct net_device *dev; | |
644 | int err, i; | |
645 | ||
646 | dev = alloc_etherdev(sizeof (struct sgiseeq_private)); | |
647 | if (!dev) { | |
648 | printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n"); | |
649 | err = -ENOMEM; | |
650 | goto err_out; | |
651 | } | |
652 | sp = netdev_priv(dev); | |
653 | ||
654 | /* Make private data page aligned */ | |
655 | sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL); | |
656 | if (!sr) { | |
657 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); | |
658 | err = -ENOMEM; | |
659 | goto err_out_free_dev; | |
660 | } | |
661 | sp->srings = sr; | |
662 | ||
663 | #define EADDR_NVOFS 250 | |
664 | for (i = 0; i < 3; i++) { | |
665 | unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i); | |
666 | ||
667 | dev->dev_addr[2 * i] = tmp >> 8; | |
668 | dev->dev_addr[2 * i + 1] = tmp & 0xff; | |
669 | } | |
670 | ||
671 | #ifdef DEBUG | |
672 | gpriv = sp; | |
673 | gdev = dev; | |
674 | #endif | |
302a5c4b RB |
675 | sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; |
676 | sp->hregs = &hpcregs->ethregs; | |
1da177e4 LT |
677 | sp->name = sgiseeqstr; |
678 | sp->mode = SEEQ_RCMD_RBCAST; | |
679 | ||
680 | sp->rx_desc = (struct sgiseeq_rx_desc *) | |
681 | CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0])); | |
682 | dma_cache_wback_inv((unsigned long)&sp->srings->rxvector, | |
683 | sizeof(sp->srings->rxvector)); | |
684 | sp->tx_desc = (struct sgiseeq_tx_desc *) | |
685 | CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0])); | |
686 | dma_cache_wback_inv((unsigned long)&sp->srings->txvector, | |
687 | sizeof(sp->srings->txvector)); | |
688 | ||
689 | /* A couple calculations now, saves many cycles later. */ | |
690 | setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); | |
691 | setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); | |
692 | ||
302a5c4b RB |
693 | /* Setup PIO and DMA transfer timing */ |
694 | sp->hregs->pconfig = 0x161; | |
695 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | | |
696 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; | |
697 | ||
1da177e4 LT |
698 | /* Reset the chip. */ |
699 | hpc3_eth_reset(sp->hregs); | |
700 | ||
701 | sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); | |
702 | if (sp->is_edlc) | |
703 | sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | | |
704 | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | | |
705 | SEEQ_CTRL_ENCARR; | |
706 | ||
707 | dev->open = sgiseeq_open; | |
708 | dev->stop = sgiseeq_close; | |
709 | dev->hard_start_xmit = sgiseeq_start_xmit; | |
710 | dev->tx_timeout = timeout; | |
711 | dev->watchdog_timeo = (200 * HZ) / 1000; | |
712 | dev->get_stats = sgiseeq_get_stats; | |
713 | dev->set_multicast_list = sgiseeq_set_multicast; | |
714 | dev->set_mac_address = sgiseeq_set_mac_address; | |
715 | dev->irq = irq; | |
716 | ||
717 | if (register_netdev(dev)) { | |
718 | printk(KERN_ERR "Sgiseeq: Cannot register net device, " | |
719 | "aborting.\n"); | |
720 | err = -ENODEV; | |
721 | goto err_out_free_page; | |
722 | } | |
723 | ||
302a5c4b | 724 | printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr); |
1da177e4 LT |
725 | for (i = 0; i < 6; i++) |
726 | printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); | |
727 | ||
728 | sp->next_module = root_sgiseeq_dev; | |
729 | root_sgiseeq_dev = dev; | |
730 | ||
731 | return 0; | |
732 | ||
733 | err_out_free_page: | |
2891439e | 734 | free_page((unsigned long) sp->srings); |
1da177e4 LT |
735 | err_out_free_dev: |
736 | kfree(dev); | |
737 | ||
738 | err_out: | |
739 | return err; | |
740 | } | |
741 | ||
742 | static int __init sgiseeq_probe(void) | |
743 | { | |
1da177e4 LT |
744 | /* On board adapter on 1st HPC is always present */ |
745 | return sgiseeq_init(hpc3c0, SGI_ENET_IRQ); | |
746 | } | |
747 | ||
748 | static void __exit sgiseeq_exit(void) | |
749 | { | |
750 | struct net_device *next, *dev; | |
751 | struct sgiseeq_private *sp; | |
1da177e4 LT |
752 | |
753 | for (dev = root_sgiseeq_dev; dev; dev = next) { | |
754 | sp = (struct sgiseeq_private *) netdev_priv(dev); | |
755 | next = sp->next_module; | |
1da177e4 | 756 | unregister_netdev(dev); |
2891439e | 757 | free_page((unsigned long) sp->srings); |
1da177e4 LT |
758 | free_netdev(dev); |
759 | } | |
760 | } | |
761 | ||
762 | module_init(sgiseeq_probe); | |
763 | module_exit(sgiseeq_exit); | |
764 | ||
302a5c4b RB |
765 | MODULE_DESCRIPTION("SGI Seeq 8003 driver"); |
766 | MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); | |
1da177e4 | 767 | MODULE_LICENSE("GPL"); |