]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/atm/zatm.c
Merge tag 'powerpc-4.13-8' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-artful-kernel.git] / drivers / atm / zatm.c
1 /* drivers/atm/zatm.c - ZeitNet ZN122x device driver */
2
3 /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/mm.h>
9 #include <linux/pci.h>
10 #include <linux/errno.h>
11 #include <linux/atm.h>
12 #include <linux/atmdev.h>
13 #include <linux/sonet.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/delay.h>
17 #include <linux/uio.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/atm_zatm.h>
22 #include <linux/capability.h>
23 #include <linux/bitops.h>
24 #include <linux/wait.h>
25 #include <linux/slab.h>
26 #include <asm/byteorder.h>
27 #include <asm/string.h>
28 #include <asm/io.h>
29 #include <linux/atomic.h>
30 #include <linux/uaccess.h>
31
32 #include "uPD98401.h"
33 #include "uPD98402.h"
34 #include "zeprom.h"
35 #include "zatm.h"
36
37
38 /*
39 * TODO:
40 *
41 * Minor features
42 * - support 64 kB SDUs (will have to use multibuffer batches then :-( )
43 * - proper use of CDV, credit = max(1,CDVT*PCR)
44 * - AAL0
45 * - better receive timestamps
46 * - OAM
47 */
48
49 #define ZATM_COPPER 1
50
51 #if 0
52 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
53 #else
54 #define DPRINTK(format,args...)
55 #endif
56
57 #ifndef CONFIG_ATM_ZATM_DEBUG
58
59
60 #define NULLCHECK(x)
61
62 #define EVENT(s,a,b)
63
64
65 static void event_dump(void)
66 {
67 }
68
69
70 #else
71
72
73 /*
74 * NULL pointer checking
75 */
76
77 #define NULLCHECK(x) \
78 if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x))
79
80 /*
81 * Very extensive activity logging. Greatly improves bug detection speed but
82 * costs a few Mbps if enabled.
83 */
84
85 #define EV 64
86
87 static const char *ev[EV];
88 static unsigned long ev_a[EV],ev_b[EV];
89 static int ec = 0;
90
91
92 static void EVENT(const char *s,unsigned long a,unsigned long b)
93 {
94 ev[ec] = s;
95 ev_a[ec] = a;
96 ev_b[ec] = b;
97 ec = (ec+1) % EV;
98 }
99
100
101 static void event_dump(void)
102 {
103 int n,i;
104
105 printk(KERN_NOTICE "----- event dump follows -----\n");
106 for (n = 0; n < EV; n++) {
107 i = (ec+n) % EV;
108 printk(KERN_NOTICE);
109 printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]);
110 }
111 printk(KERN_NOTICE "----- event dump ends here -----\n");
112 }
113
114
115 #endif /* CONFIG_ATM_ZATM_DEBUG */
116
117
118 #define RING_BUSY 1 /* indication from do_tx that PDU has to be
119 backlogged */
120
121 static struct atm_dev *zatm_boards = NULL;
122 static unsigned long dummy[2] = {0,0};
123
124
125 #define zin_n(r) inl(zatm_dev->base+r*4)
126 #define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
127 #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
128 #define zwait while (zin(CMR) & uPD98401_BUSY)
129
130 /* RX0, RX1, TX0, TX1 */
131 static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
132 static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
133
134 #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i])
135
136
137 /*-------------------------------- utilities --------------------------------*/
138
139
140 static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
141 {
142 zwait;
143 zout(value,CER);
144 zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
145 (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
146 }
147
148
149 static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
150 {
151 zwait;
152 zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
153 (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
154 zwait;
155 return zin(CER);
156 }
157
158
159 /*------------------------------- free lists --------------------------------*/
160
161
162 /*
163 * Free buffer head structure:
164 * [0] pointer to buffer (for SAR)
165 * [1] buffer descr link pointer (for SAR)
166 * [2] back pointer to skb (for poll_rx)
167 * [3] data
168 * ...
169 */
170
171 struct rx_buffer_head {
172 u32 buffer; /* pointer to buffer (for SAR) */
173 u32 link; /* buffer descriptor link pointer (for SAR) */
174 struct sk_buff *skb; /* back pointer to skb (for poll_rx) */
175 };
176
177
178 static void refill_pool(struct atm_dev *dev,int pool)
179 {
180 struct zatm_dev *zatm_dev;
181 struct sk_buff *skb;
182 struct rx_buffer_head *first;
183 unsigned long flags;
184 int align,offset,free,count,size;
185
186 EVENT("refill_pool\n",0,0);
187 zatm_dev = ZATM_DEV(dev);
188 size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
189 pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
190 if (size < PAGE_SIZE) {
191 align = 32; /* for 32 byte alignment */
192 offset = sizeof(struct rx_buffer_head);
193 }
194 else {
195 align = 4096;
196 offset = zatm_dev->pool_info[pool].offset+
197 sizeof(struct rx_buffer_head);
198 }
199 size += align;
200 spin_lock_irqsave(&zatm_dev->lock, flags);
201 free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
202 uPD98401_RXFP_REMAIN;
203 spin_unlock_irqrestore(&zatm_dev->lock, flags);
204 if (free >= zatm_dev->pool_info[pool].low_water) return;
205 EVENT("starting ... POOL: 0x%x, 0x%x\n",
206 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
207 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
208 EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
209 count = 0;
210 first = NULL;
211 while (free < zatm_dev->pool_info[pool].high_water) {
212 struct rx_buffer_head *head;
213
214 skb = alloc_skb(size,GFP_ATOMIC);
215 if (!skb) {
216 printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new "
217 "skb (%d) with %d free\n",dev->number,size,free);
218 break;
219 }
220 skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
221 align+offset-1) & ~(unsigned long) (align-1))-offset)-
222 skb->data);
223 head = (struct rx_buffer_head *) skb->data;
224 skb_reserve(skb,sizeof(struct rx_buffer_head));
225 if (!first) first = head;
226 count++;
227 head->buffer = virt_to_bus(skb->data);
228 head->link = 0;
229 head->skb = skb;
230 EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
231 (unsigned long) head);
232 spin_lock_irqsave(&zatm_dev->lock, flags);
233 if (zatm_dev->last_free[pool])
234 ((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
235 data))[-1].link = virt_to_bus(head);
236 zatm_dev->last_free[pool] = skb;
237 skb_queue_tail(&zatm_dev->pool[pool],skb);
238 spin_unlock_irqrestore(&zatm_dev->lock, flags);
239 free++;
240 }
241 if (first) {
242 spin_lock_irqsave(&zatm_dev->lock, flags);
243 zwait;
244 zout(virt_to_bus(first),CER);
245 zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
246 CMR);
247 spin_unlock_irqrestore(&zatm_dev->lock, flags);
248 EVENT ("POOL: 0x%x, 0x%x\n",
249 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
250 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
251 EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
252 }
253 }
254
255
256 static void drain_free(struct atm_dev *dev,int pool)
257 {
258 skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
259 }
260
261
262 static int pool_index(int max_pdu)
263 {
264 int i;
265
266 if (max_pdu % ATM_CELL_PAYLOAD)
267 printk(KERN_ERR DEV_LABEL ": driver error in pool_index: "
268 "max_pdu is %d\n",max_pdu);
269 if (max_pdu > 65536) return -1;
270 for (i = 0; (64 << i) < max_pdu; i++);
271 return i+ZATM_AAL5_POOL_BASE;
272 }
273
274
275 /* use_pool isn't reentrant */
276
277
278 static void use_pool(struct atm_dev *dev,int pool)
279 {
280 struct zatm_dev *zatm_dev;
281 unsigned long flags;
282 int size;
283
284 zatm_dev = ZATM_DEV(dev);
285 if (!(zatm_dev->pool_info[pool].ref_count++)) {
286 skb_queue_head_init(&zatm_dev->pool[pool]);
287 size = pool-ZATM_AAL5_POOL_BASE;
288 if (size < 0) size = 0; /* 64B... */
289 else if (size > 10) size = 10; /* ... 64kB */
290 spin_lock_irqsave(&zatm_dev->lock, flags);
291 zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
292 uPD98401_RXFP_ALERT_SHIFT) |
293 (1 << uPD98401_RXFP_BTSZ_SHIFT) |
294 (size << uPD98401_RXFP_BFSZ_SHIFT),
295 zatm_dev->pool_base+pool*2);
296 zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+
297 pool*2+1);
298 spin_unlock_irqrestore(&zatm_dev->lock, flags);
299 zatm_dev->last_free[pool] = NULL;
300 refill_pool(dev,pool);
301 }
302 DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
303 }
304
305
306 static void unuse_pool(struct atm_dev *dev,int pool)
307 {
308 if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
309 drain_free(dev,pool);
310 }
311
312 /*----------------------------------- RX ------------------------------------*/
313
314
315 #if 0
316 static void exception(struct atm_vcc *vcc)
317 {
318 static int count = 0;
319 struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev);
320 struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc);
321 unsigned long *qrp;
322 int i;
323
324 if (count++ > 2) return;
325 for (i = 0; i < 8; i++)
326 printk("TX%d: 0x%08lx\n",i,
327 zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i));
328 for (i = 0; i < 5; i++)
329 printk("SH%d: 0x%08lx\n",i,
330 zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i));
331 qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
332 uPD98401_TXVC_QRP);
333 printk("qrp=0x%08lx\n",(unsigned long) qrp);
334 for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]);
335 }
336 #endif
337
338
339 static const char *err_txt[] = {
340 "No error",
341 "RX buf underflow",
342 "RX FIFO overrun",
343 "Maximum len violation",
344 "CRC error",
345 "User abort",
346 "Length violation",
347 "T1 error",
348 "Deactivated",
349 "???",
350 "???",
351 "???",
352 "???",
353 "???",
354 "???",
355 "???"
356 };
357
358
359 static void poll_rx(struct atm_dev *dev,int mbx)
360 {
361 struct zatm_dev *zatm_dev;
362 unsigned long pos;
363 u32 x;
364 int error;
365
366 EVENT("poll_rx\n",0,0);
367 zatm_dev = ZATM_DEV(dev);
368 pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
369 while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
370 u32 *here;
371 struct sk_buff *skb;
372 struct atm_vcc *vcc;
373 int cells,size,chan;
374
375 EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
376 here = (u32 *) pos;
377 if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
378 pos = zatm_dev->mbx_start[mbx];
379 cells = here[0] & uPD98401_AAL5_SIZE;
380 #if 0
381 printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
382 {
383 unsigned long *x;
384 printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
385 zatm_dev->pool_base),
386 zpeekl(zatm_dev,zatm_dev->pool_base+1));
387 x = (unsigned long *) here[2];
388 printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
389 x[0],x[1],x[2],x[3]);
390 }
391 #endif
392 error = 0;
393 if (here[3] & uPD98401_AAL5_ERR) {
394 error = (here[3] & uPD98401_AAL5_ES) >>
395 uPD98401_AAL5_ES_SHIFT;
396 if (error == uPD98401_AAL5_ES_DEACT ||
397 error == uPD98401_AAL5_ES_FREE) continue;
398 }
399 EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
400 uPD98401_AAL5_ES_SHIFT,error);
401 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
402 __net_timestamp(skb);
403 #if 0
404 printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
405 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
406 ((unsigned *) skb->data)[0]);
407 #endif
408 EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
409 (unsigned long) here);
410 #if 0
411 printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
412 #endif
413 size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
414 ATM_CELL_PAYLOAD/sizeof(u16)-3]);
415 EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
416 chan = (here[3] & uPD98401_AAL5_CHAN) >>
417 uPD98401_AAL5_CHAN_SHIFT;
418 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
419 int pos;
420 vcc = zatm_dev->rx_map[chan];
421 pos = ZATM_VCC(vcc)->pool;
422 if (skb == zatm_dev->last_free[pos])
423 zatm_dev->last_free[pos] = NULL;
424 skb_unlink(skb, zatm_dev->pool + pos);
425 }
426 else {
427 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
428 "for non-existing channel\n",dev->number);
429 size = 0;
430 vcc = NULL;
431 event_dump();
432 }
433 if (error) {
434 static unsigned long silence = 0;
435 static int last_error = 0;
436
437 if (error != last_error ||
438 time_after(jiffies, silence) || silence == 0){
439 printk(KERN_WARNING DEV_LABEL "(itf %d): "
440 "chan %d error %s\n",dev->number,chan,
441 err_txt[error]);
442 last_error = error;
443 silence = (jiffies+2*HZ)|1;
444 }
445 size = 0;
446 }
447 if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
448 size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
449 printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
450 "cells\n",dev->number,size,cells);
451 size = 0;
452 event_dump();
453 }
454 if (size > ATM_MAX_AAL5_PDU) {
455 printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
456 "(%d)\n",dev->number,size);
457 size = 0;
458 event_dump();
459 }
460 if (!size) {
461 dev_kfree_skb_irq(skb);
462 if (vcc) atomic_inc(&vcc->stats->rx_err);
463 continue;
464 }
465 if (!atm_charge(vcc,skb->truesize)) {
466 dev_kfree_skb_irq(skb);
467 continue;
468 }
469 skb->len = size;
470 ATM_SKB(skb)->vcc = vcc;
471 vcc->push(vcc,skb);
472 atomic_inc(&vcc->stats->rx);
473 }
474 zout(pos & 0xffff,MTA(mbx));
475 #if 0 /* probably a stupid idea */
476 refill_pool(dev,zatm_vcc->pool);
477 /* maybe this saves us a few interrupts */
478 #endif
479 }
480
481
482 static int open_rx_first(struct atm_vcc *vcc)
483 {
484 struct zatm_dev *zatm_dev;
485 struct zatm_vcc *zatm_vcc;
486 unsigned long flags;
487 unsigned short chan;
488 int cells;
489
490 DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053));
491 zatm_dev = ZATM_DEV(vcc->dev);
492 zatm_vcc = ZATM_VCC(vcc);
493 zatm_vcc->rx_chan = 0;
494 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
495 if (vcc->qos.aal == ATM_AAL5) {
496 if (vcc->qos.rxtp.max_sdu > 65464)
497 vcc->qos.rxtp.max_sdu = 65464;
498 /* fix this - we may want to receive 64kB SDUs
499 later */
500 cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
501 ATM_CELL_PAYLOAD);
502 zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
503 }
504 else {
505 cells = 1;
506 zatm_vcc->pool = ZATM_AAL0_POOL;
507 }
508 if (zatm_vcc->pool < 0) return -EMSGSIZE;
509 spin_lock_irqsave(&zatm_dev->lock, flags);
510 zwait;
511 zout(uPD98401_OPEN_CHAN,CMR);
512 zwait;
513 DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
514 chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
515 spin_unlock_irqrestore(&zatm_dev->lock, flags);
516 DPRINTK("chan is %d\n",chan);
517 if (!chan) return -EAGAIN;
518 use_pool(vcc->dev,zatm_vcc->pool);
519 DPRINTK("pool %d\n",zatm_vcc->pool);
520 /* set up VC descriptor */
521 spin_lock_irqsave(&zatm_dev->lock, flags);
522 zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
523 chan*VC_SIZE/4);
524 zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ?
525 uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1);
526 zpokel(zatm_dev,0,chan*VC_SIZE/4+2);
527 zatm_vcc->rx_chan = chan;
528 zatm_dev->rx_map[chan] = vcc;
529 spin_unlock_irqrestore(&zatm_dev->lock, flags);
530 return 0;
531 }
532
533
534 static int open_rx_second(struct atm_vcc *vcc)
535 {
536 struct zatm_dev *zatm_dev;
537 struct zatm_vcc *zatm_vcc;
538 unsigned long flags;
539 int pos,shift;
540
541 DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053));
542 zatm_dev = ZATM_DEV(vcc->dev);
543 zatm_vcc = ZATM_VCC(vcc);
544 if (!zatm_vcc->rx_chan) return 0;
545 spin_lock_irqsave(&zatm_dev->lock, flags);
546 /* should also handle VPI @@@ */
547 pos = vcc->vci >> 1;
548 shift = (1-(vcc->vci & 1)) << 4;
549 zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) |
550 ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos);
551 spin_unlock_irqrestore(&zatm_dev->lock, flags);
552 return 0;
553 }
554
555
556 static void close_rx(struct atm_vcc *vcc)
557 {
558 struct zatm_dev *zatm_dev;
559 struct zatm_vcc *zatm_vcc;
560 unsigned long flags;
561 int pos,shift;
562
563 zatm_vcc = ZATM_VCC(vcc);
564 zatm_dev = ZATM_DEV(vcc->dev);
565 if (!zatm_vcc->rx_chan) return;
566 DPRINTK("close_rx\n");
567 /* disable receiver */
568 if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) {
569 spin_lock_irqsave(&zatm_dev->lock, flags);
570 pos = vcc->vci >> 1;
571 shift = (1-(vcc->vci & 1)) << 4;
572 zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
573 zwait;
574 zout(uPD98401_NOP,CMR);
575 zwait;
576 zout(uPD98401_NOP,CMR);
577 spin_unlock_irqrestore(&zatm_dev->lock, flags);
578 }
579 spin_lock_irqsave(&zatm_dev->lock, flags);
580 zwait;
581 zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
582 uPD98401_CHAN_ADDR_SHIFT),CMR);
583 zwait;
584 udelay(10); /* why oh why ... ? */
585 zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
586 uPD98401_CHAN_ADDR_SHIFT),CMR);
587 zwait;
588 if (!(zin(CMR) & uPD98401_CHAN_ADDR))
589 printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
590 "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
591 spin_unlock_irqrestore(&zatm_dev->lock, flags);
592 zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL;
593 zatm_vcc->rx_chan = 0;
594 unuse_pool(vcc->dev,zatm_vcc->pool);
595 }
596
597
598 static int start_rx(struct atm_dev *dev)
599 {
600 struct zatm_dev *zatm_dev;
601 int i;
602
603 DPRINTK("start_rx\n");
604 zatm_dev = ZATM_DEV(dev);
605 zatm_dev->rx_map = kcalloc(zatm_dev->chans,
606 sizeof(*zatm_dev->rx_map),
607 GFP_KERNEL);
608 if (!zatm_dev->rx_map) return -ENOMEM;
609 /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
610 zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
611 /* prepare free buffer pools */
612 for (i = 0; i <= ZATM_LAST_POOL; i++) {
613 zatm_dev->pool_info[i].ref_count = 0;
614 zatm_dev->pool_info[i].rqa_count = 0;
615 zatm_dev->pool_info[i].rqu_count = 0;
616 zatm_dev->pool_info[i].low_water = LOW_MARK;
617 zatm_dev->pool_info[i].high_water = HIGH_MARK;
618 zatm_dev->pool_info[i].offset = 0;
619 zatm_dev->pool_info[i].next_off = 0;
620 zatm_dev->pool_info[i].next_cnt = 0;
621 zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES;
622 }
623 return 0;
624 }
625
626
627 /*----------------------------------- TX ------------------------------------*/
628
629
630 static int do_tx(struct sk_buff *skb)
631 {
632 struct atm_vcc *vcc;
633 struct zatm_dev *zatm_dev;
634 struct zatm_vcc *zatm_vcc;
635 u32 *dsc;
636 unsigned long flags;
637
638 EVENT("do_tx\n",0,0);
639 DPRINTK("sending skb %p\n",skb);
640 vcc = ATM_SKB(skb)->vcc;
641 zatm_dev = ZATM_DEV(vcc->dev);
642 zatm_vcc = ZATM_VCC(vcc);
643 EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
644 spin_lock_irqsave(&zatm_dev->lock, flags);
645 if (!skb_shinfo(skb)->nr_frags) {
646 if (zatm_vcc->txing == RING_ENTRIES-1) {
647 spin_unlock_irqrestore(&zatm_dev->lock, flags);
648 return RING_BUSY;
649 }
650 zatm_vcc->txing++;
651 dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
652 zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) &
653 (RING_ENTRIES*RING_WORDS-1);
654 dsc[1] = 0;
655 dsc[2] = skb->len;
656 dsc[3] = virt_to_bus(skb->data);
657 mb();
658 dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM
659 | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
660 (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
661 uPD98401_CLPM_1 : uPD98401_CLPM_0));
662 EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0);
663 }
664 else {
665 printk("NONONONOO!!!!\n");
666 dsc = NULL;
667 #if 0
668 u32 *put;
669 int i;
670
671 dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
672 uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
673 if (!dsc) {
674 if (vcc->pop)
675 vcc->pop(vcc, skb);
676 else
677 dev_kfree_skb_irq(skb);
678 return -EAGAIN;
679 }
680 /* @@@ should check alignment */
681 put = dsc+8;
682 dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP |
683 (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
684 (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
685 uPD98401_CLPM_1 : uPD98401_CLPM_0));
686 dsc[1] = 0;
687 dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
688 dsc[3] = virt_to_bus(put);
689 for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
690 *put++ = ((struct iovec *) skb->data)[i].iov_len;
691 *put++ = virt_to_bus(((struct iovec *)
692 skb->data)[i].iov_base);
693 }
694 put[-2] |= uPD98401_TXBD_LAST;
695 #endif
696 }
697 ZATM_PRV_DSC(skb) = dsc;
698 skb_queue_tail(&zatm_vcc->tx_queue,skb);
699 DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
700 uPD98401_TXVC_QRP));
701 zwait;
702 zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
703 uPD98401_CHAN_ADDR_SHIFT),CMR);
704 spin_unlock_irqrestore(&zatm_dev->lock, flags);
705 EVENT("done\n",0,0);
706 return 0;
707 }
708
709
710 static inline void dequeue_tx(struct atm_vcc *vcc)
711 {
712 struct zatm_vcc *zatm_vcc;
713 struct sk_buff *skb;
714
715 EVENT("dequeue_tx\n",0,0);
716 zatm_vcc = ZATM_VCC(vcc);
717 skb = skb_dequeue(&zatm_vcc->tx_queue);
718 if (!skb) {
719 printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not "
720 "txing\n",vcc->dev->number);
721 return;
722 }
723 #if 0 /* @@@ would fail on CLP */
724 if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
725 uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n",
726 *ZATM_PRV_DSC(skb));
727 #endif
728 *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
729 zatm_vcc->txing--;
730 if (vcc->pop) vcc->pop(vcc,skb);
731 else dev_kfree_skb_irq(skb);
732 while ((skb = skb_dequeue(&zatm_vcc->backlog)))
733 if (do_tx(skb) == RING_BUSY) {
734 skb_queue_head(&zatm_vcc->backlog,skb);
735 break;
736 }
737 atomic_inc(&vcc->stats->tx);
738 wake_up(&zatm_vcc->tx_wait);
739 }
740
741
742 static void poll_tx(struct atm_dev *dev,int mbx)
743 {
744 struct zatm_dev *zatm_dev;
745 unsigned long pos;
746 u32 x;
747
748 EVENT("poll_tx\n",0,0);
749 zatm_dev = ZATM_DEV(dev);
750 pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
751 while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
752 int chan;
753
754 #if 1
755 u32 data,*addr;
756
757 EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
758 addr = (u32 *) pos;
759 data = *addr;
760 chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT;
761 EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr,
762 data);
763 EVENT("chan = %d\n",chan,0);
764 #else
765 NO !
766 chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN)
767 >> uPD98401_TXI_CONN_SHIFT;
768 #endif
769 if (chan < zatm_dev->chans && zatm_dev->tx_map[chan])
770 dequeue_tx(zatm_dev->tx_map[chan]);
771 else {
772 printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication "
773 "for non-existing channel %d\n",dev->number,chan);
774 event_dump();
775 }
776 if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx])
777 pos = zatm_dev->mbx_start[mbx];
778 }
779 zout(pos & 0xffff,MTA(mbx));
780 }
781
782
783 /*
784 * BUG BUG BUG: Doesn't handle "new-style" rate specification yet.
785 */
786
787 static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
788 {
789 struct zatm_dev *zatm_dev;
790 unsigned long flags;
791 unsigned long i,m,c;
792 int shaper;
793
794 DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max);
795 zatm_dev = ZATM_DEV(dev);
796 if (!zatm_dev->free_shapers) return -EAGAIN;
797 for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++);
798 zatm_dev->free_shapers &= ~1 << shaper;
799 if (ubr) {
800 c = 5;
801 i = m = 1;
802 zatm_dev->ubr_ref_cnt++;
803 zatm_dev->ubr = shaper;
804 *pcr = 0;
805 }
806 else {
807 if (min) {
808 if (min <= 255) {
809 i = min;
810 m = ATM_OC3_PCR;
811 }
812 else {
813 i = 255;
814 m = ATM_OC3_PCR*255/min;
815 }
816 }
817 else {
818 if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw;
819 if (max <= 255) {
820 i = max;
821 m = ATM_OC3_PCR;
822 }
823 else {
824 i = 255;
825 m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
826 }
827 }
828 if (i > m) {
829 printk(KERN_CRIT DEV_LABEL "shaper algorithm botched "
830 "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m);
831 m = i;
832 }
833 *pcr = i*ATM_OC3_PCR/m;
834 c = 20; /* @@@ should use max_cdv ! */
835 if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL;
836 if (zatm_dev->tx_bw < *pcr) return -EAGAIN;
837 zatm_dev->tx_bw -= *pcr;
838 }
839 spin_lock_irqsave(&zatm_dev->lock, flags);
840 DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr);
841 zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper));
842 zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper));
843 zpokel(zatm_dev,0,uPD98401_X(shaper));
844 zpokel(zatm_dev,0,uPD98401_Y(shaper));
845 zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper));
846 spin_unlock_irqrestore(&zatm_dev->lock, flags);
847 return shaper;
848 }
849
850
851 static void dealloc_shaper(struct atm_dev *dev,int shaper)
852 {
853 struct zatm_dev *zatm_dev;
854 unsigned long flags;
855
856 zatm_dev = ZATM_DEV(dev);
857 if (shaper == zatm_dev->ubr) {
858 if (--zatm_dev->ubr_ref_cnt) return;
859 zatm_dev->ubr = -1;
860 }
861 spin_lock_irqsave(&zatm_dev->lock, flags);
862 zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E,
863 uPD98401_PS(shaper));
864 spin_unlock_irqrestore(&zatm_dev->lock, flags);
865 zatm_dev->free_shapers |= 1 << shaper;
866 }
867
868
869 static void close_tx(struct atm_vcc *vcc)
870 {
871 struct zatm_dev *zatm_dev;
872 struct zatm_vcc *zatm_vcc;
873 unsigned long flags;
874 int chan;
875
876 zatm_vcc = ZATM_VCC(vcc);
877 zatm_dev = ZATM_DEV(vcc->dev);
878 chan = zatm_vcc->tx_chan;
879 if (!chan) return;
880 DPRINTK("close_tx\n");
881 if (skb_peek(&zatm_vcc->backlog)) {
882 printk("waiting for backlog to drain ...\n");
883 event_dump();
884 wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
885 }
886 if (skb_peek(&zatm_vcc->tx_queue)) {
887 printk("waiting for TX queue to drain ...\n");
888 event_dump();
889 wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue));
890 }
891 spin_lock_irqsave(&zatm_dev->lock, flags);
892 #if 0
893 zwait;
894 zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
895 #endif
896 zwait;
897 zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
898 zwait;
899 if (!(zin(CMR) & uPD98401_CHAN_ADDR))
900 printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
901 "%d\n",vcc->dev->number,chan);
902 spin_unlock_irqrestore(&zatm_dev->lock, flags);
903 zatm_vcc->tx_chan = 0;
904 zatm_dev->tx_map[chan] = NULL;
905 if (zatm_vcc->shaper != zatm_dev->ubr) {
906 zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
907 dealloc_shaper(vcc->dev,zatm_vcc->shaper);
908 }
909 kfree(zatm_vcc->ring);
910 }
911
912
913 static int open_tx_first(struct atm_vcc *vcc)
914 {
915 struct zatm_dev *zatm_dev;
916 struct zatm_vcc *zatm_vcc;
917 unsigned long flags;
918 u32 *loop;
919 unsigned short chan;
920 int unlimited;
921
922 DPRINTK("open_tx_first\n");
923 zatm_dev = ZATM_DEV(vcc->dev);
924 zatm_vcc = ZATM_VCC(vcc);
925 zatm_vcc->tx_chan = 0;
926 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
927 spin_lock_irqsave(&zatm_dev->lock, flags);
928 zwait;
929 zout(uPD98401_OPEN_CHAN,CMR);
930 zwait;
931 DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
932 chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
933 spin_unlock_irqrestore(&zatm_dev->lock, flags);
934 DPRINTK("chan is %d\n",chan);
935 if (!chan) return -EAGAIN;
936 unlimited = vcc->qos.txtp.traffic_class == ATM_UBR &&
937 (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR ||
938 vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
939 if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
940 else {
941 int uninitialized_var(pcr);
942
943 if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
944 if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
945 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
946 < 0) {
947 close_tx(vcc);
948 return zatm_vcc->shaper;
949 }
950 if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR;
951 vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr;
952 }
953 zatm_vcc->tx_chan = chan;
954 skb_queue_head_init(&zatm_vcc->tx_queue);
955 init_waitqueue_head(&zatm_vcc->tx_wait);
956 /* initialize ring */
957 zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
958 if (!zatm_vcc->ring) return -ENOMEM;
959 loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
960 loop[0] = uPD98401_TXPD_V;
961 loop[1] = loop[2] = 0;
962 loop[3] = virt_to_bus(zatm_vcc->ring);
963 zatm_vcc->ring_curr = 0;
964 zatm_vcc->txing = 0;
965 skb_queue_head_init(&zatm_vcc->backlog);
966 zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
967 chan*VC_SIZE/4+uPD98401_TXVC_QRP);
968 return 0;
969 }
970
971
972 static int open_tx_second(struct atm_vcc *vcc)
973 {
974 struct zatm_dev *zatm_dev;
975 struct zatm_vcc *zatm_vcc;
976 unsigned long flags;
977
978 DPRINTK("open_tx_second\n");
979 zatm_dev = ZATM_DEV(vcc->dev);
980 zatm_vcc = ZATM_VCC(vcc);
981 if (!zatm_vcc->tx_chan) return 0;
982 /* set up VC descriptor */
983 spin_lock_irqsave(&zatm_dev->lock, flags);
984 zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4);
985 zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper <<
986 uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) |
987 vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1);
988 zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2);
989 spin_unlock_irqrestore(&zatm_dev->lock, flags);
990 zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc;
991 return 0;
992 }
993
994
995 static int start_tx(struct atm_dev *dev)
996 {
997 struct zatm_dev *zatm_dev;
998 int i;
999
1000 DPRINTK("start_tx\n");
1001 zatm_dev = ZATM_DEV(dev);
1002 zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
1003 sizeof(*zatm_dev->tx_map),
1004 GFP_KERNEL);
1005 if (!zatm_dev->tx_map) return -ENOMEM;
1006 zatm_dev->tx_bw = ATM_OC3_PCR;
1007 zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
1008 zatm_dev->ubr = -1;
1009 zatm_dev->ubr_ref_cnt = 0;
1010 /* initialize shapers */
1011 for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i));
1012 return 0;
1013 }
1014
1015
1016 /*------------------------------- interrupts --------------------------------*/
1017
1018
1019 static irqreturn_t zatm_int(int irq,void *dev_id)
1020 {
1021 struct atm_dev *dev;
1022 struct zatm_dev *zatm_dev;
1023 u32 reason;
1024 int handled = 0;
1025
1026 dev = dev_id;
1027 zatm_dev = ZATM_DEV(dev);
1028 while ((reason = zin(GSR))) {
1029 handled = 1;
1030 EVENT("reason 0x%x\n",reason,0);
1031 if (reason & uPD98401_INT_PI) {
1032 EVENT("PHY int\n",0,0);
1033 dev->phy->interrupt(dev);
1034 }
1035 if (reason & uPD98401_INT_RQA) {
1036 unsigned long pools;
1037 int i;
1038
1039 pools = zin(RQA);
1040 EVENT("RQA (0x%08x)\n",pools,0);
1041 for (i = 0; pools; i++) {
1042 if (pools & 1) {
1043 refill_pool(dev,i);
1044 zatm_dev->pool_info[i].rqa_count++;
1045 }
1046 pools >>= 1;
1047 }
1048 }
1049 if (reason & uPD98401_INT_RQU) {
1050 unsigned long pools;
1051 int i;
1052 pools = zin(RQU);
1053 printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n",
1054 dev->number,pools);
1055 event_dump();
1056 for (i = 0; pools; i++) {
1057 if (pools & 1) {
1058 refill_pool(dev,i);
1059 zatm_dev->pool_info[i].rqu_count++;
1060 }
1061 pools >>= 1;
1062 }
1063 }
1064 /* don't handle RD */
1065 if (reason & uPD98401_INT_SPE)
1066 printk(KERN_ALERT DEV_LABEL "(itf %d): system parity "
1067 "error at 0x%08x\n",dev->number,zin(ADDR));
1068 if (reason & uPD98401_INT_CPE)
1069 printk(KERN_ALERT DEV_LABEL "(itf %d): control memory "
1070 "parity error at 0x%08x\n",dev->number,zin(ADDR));
1071 if (reason & uPD98401_INT_SBE) {
1072 printk(KERN_ALERT DEV_LABEL "(itf %d): system bus "
1073 "error at 0x%08x\n",dev->number,zin(ADDR));
1074 event_dump();
1075 }
1076 /* don't handle IND */
1077 if (reason & uPD98401_INT_MF) {
1078 printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full "
1079 "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF)
1080 >> uPD98401_INT_MF_SHIFT);
1081 event_dump();
1082 /* @@@ should try to recover */
1083 }
1084 if (reason & uPD98401_INT_MM) {
1085 if (reason & 1) poll_rx(dev,0);
1086 if (reason & 2) poll_rx(dev,1);
1087 if (reason & 4) poll_tx(dev,2);
1088 if (reason & 8) poll_tx(dev,3);
1089 }
1090 /* @@@ handle RCRn */
1091 }
1092 return IRQ_RETVAL(handled);
1093 }
1094
1095
1096 /*----------------------------- (E)EPROM access -----------------------------*/
1097
1098
1099 static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
1100 unsigned short cmd)
1101 {
1102 int error;
1103
1104 if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value)))
1105 printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n",
1106 error);
1107 }
1108
1109
1110 static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
1111 {
1112 unsigned int value;
1113 int error;
1114
1115 if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value)))
1116 printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n",
1117 error);
1118 return value;
1119 }
1120
1121
1122 static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
1123 int bits, unsigned short cmd)
1124 {
1125 unsigned long value;
1126 int i;
1127
1128 for (i = bits-1; i >= 0; i--) {
1129 value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0);
1130 eprom_set(zatm_dev,value,cmd);
1131 eprom_set(zatm_dev,value | ZEPROM_SK,cmd);
1132 eprom_set(zatm_dev,value,cmd);
1133 }
1134 }
1135
1136
1137 static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
1138 unsigned short cmd)
1139 {
1140 int i;
1141
1142 *byte = 0;
1143 for (i = 8; i; i--) {
1144 eprom_set(zatm_dev,ZEPROM_CS,cmd);
1145 eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd);
1146 *byte <<= 1;
1147 if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1;
1148 eprom_set(zatm_dev,ZEPROM_CS,cmd);
1149 }
1150 }
1151
1152
1153 static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
1154 int offset, int swap)
1155 {
1156 unsigned char buf[ZEPROM_SIZE];
1157 struct zatm_dev *zatm_dev;
1158 int i;
1159
1160 zatm_dev = ZATM_DEV(dev);
1161 for (i = 0; i < ZEPROM_SIZE; i += 2) {
1162 eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */
1163 eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd);
1164 eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd);
1165 eprom_get_byte(zatm_dev,buf+i+swap,cmd);
1166 eprom_get_byte(zatm_dev,buf+i+1-swap,cmd);
1167 eprom_set(zatm_dev,0,cmd); /* deselect EPROM */
1168 }
1169 memcpy(dev->esi,buf+offset,ESI_LEN);
1170 return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */
1171 }
1172
1173
1174 static void eprom_get_esi(struct atm_dev *dev)
1175 {
1176 if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
1177 (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
1178 }
1179
1180
1181 /*--------------------------------- entries ---------------------------------*/
1182
1183
1184 static int zatm_init(struct atm_dev *dev)
1185 {
1186 struct zatm_dev *zatm_dev;
1187 struct pci_dev *pci_dev;
1188 unsigned short command;
1189 int error,i,last;
1190 unsigned long t0,t1,t2;
1191
1192 DPRINTK(">zatm_init\n");
1193 zatm_dev = ZATM_DEV(dev);
1194 spin_lock_init(&zatm_dev->lock);
1195 pci_dev = zatm_dev->pci_dev;
1196 zatm_dev->base = pci_resource_start(pci_dev, 0);
1197 zatm_dev->irq = pci_dev->irq;
1198 if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
1199 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
1200 dev->number,error);
1201 return -EINVAL;
1202 }
1203 if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
1204 command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) {
1205 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)"
1206 "\n",dev->number,error);
1207 return -EIO;
1208 }
1209 eprom_get_esi(dev);
1210 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
1211 dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
1212 /* reset uPD98401 */
1213 zout(0,SWR);
1214 while (!(zin(GSR) & uPD98401_INT_IND));
1215 zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR);
1216 last = MAX_CRAM_SIZE;
1217 for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) {
1218 zpokel(zatm_dev,0x55555555,i);
1219 if (zpeekl(zatm_dev,i) != 0x55555555) last = i;
1220 else {
1221 zpokel(zatm_dev,0xAAAAAAAA,i);
1222 if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i;
1223 else zpokel(zatm_dev,i,i);
1224 }
1225 }
1226 for (i = 0; i < last; i += RAM_INCREMENT)
1227 if (zpeekl(zatm_dev,i) != i) break;
1228 zatm_dev->mem = i << 2;
1229 while (i) zpokel(zatm_dev,0,--i);
1230 /* reset again to rebuild memory pointers */
1231 zout(0,SWR);
1232 while (!(zin(GSR) & uPD98401_INT_IND));
1233 zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 |
1234 uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR);
1235 /* TODO: should shrink allocation now */
1236 printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" :
1237 "MMF");
1238 for (i = 0; i < ESI_LEN; i++)
1239 printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-");
1240 do {
1241 unsigned long flags;
1242
1243 spin_lock_irqsave(&zatm_dev->lock, flags);
1244 t0 = zpeekl(zatm_dev,uPD98401_TSR);
1245 udelay(10);
1246 t1 = zpeekl(zatm_dev,uPD98401_TSR);
1247 udelay(1010);
1248 t2 = zpeekl(zatm_dev,uPD98401_TSR);
1249 spin_unlock_irqrestore(&zatm_dev->lock, flags);
1250 }
1251 while (t0 > t1 || t1 > t2); /* loop if wrapping ... */
1252 zatm_dev->khz = t2-2*t1+t0;
1253 printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d "
1254 "MHz\n",dev->number,
1255 (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT,
1256 zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000);
1257 return uPD98402_init(dev);
1258 }
1259
1260
1261 static int zatm_start(struct atm_dev *dev)
1262 {
1263 struct zatm_dev *zatm_dev = ZATM_DEV(dev);
1264 struct pci_dev *pdev = zatm_dev->pci_dev;
1265 unsigned long curr;
1266 int pools,vccs,rx;
1267 int error, i, ld;
1268
1269 DPRINTK("zatm_start\n");
1270 zatm_dev->rx_map = zatm_dev->tx_map = NULL;
1271 for (i = 0; i < NR_MBX; i++)
1272 zatm_dev->mbx_start[i] = 0;
1273 error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev);
1274 if (error < 0) {
1275 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
1276 dev->number,zatm_dev->irq);
1277 goto done;
1278 }
1279 /* define memory regions */
1280 pools = NR_POOLS;
1281 if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
1282 pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
1283 vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
1284 (2*VC_SIZE+RX_SIZE);
1285 ld = -1;
1286 for (rx = 1; rx < vccs; rx <<= 1) ld++;
1287 dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */
1288 dev->ci_range.vci_bits = ld;
1289 dev->link_rate = ATM_OC3_PCR;
1290 zatm_dev->chans = vccs; /* ??? */
1291 curr = rx*RX_SIZE/4;
1292 DPRINTK("RX pool 0x%08lx\n",curr);
1293 zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
1294 zatm_dev->pool_base = curr;
1295 curr += pools*POOL_SIZE/4;
1296 DPRINTK("Shapers 0x%08lx\n",curr);
1297 zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
1298 curr += NR_SHAPERS*SHAPER_SIZE/4;
1299 DPRINTK("Free 0x%08lx\n",curr);
1300 zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
1301 printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, "
1302 "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
1303 (zatm_dev->mem-curr*4)/VC_SIZE);
1304 /* create mailboxes */
1305 for (i = 0; i < NR_MBX; i++) {
1306 void *mbx;
1307 dma_addr_t mbx_dma;
1308
1309 if (!mbx_entries[i])
1310 continue;
1311 mbx = dma_alloc_coherent(&pdev->dev,
1312 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
1313 if (!mbx) {
1314 error = -ENOMEM;
1315 goto out;
1316 }
1317 /*
1318 * Alignment provided by dma_alloc_coherent() isn't enough
1319 * for this device.
1320 */
1321 if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
1322 printk(KERN_ERR DEV_LABEL "(itf %d): system "
1323 "bus incompatible with driver\n", dev->number);
1324 dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
1325 error = -ENODEV;
1326 goto out;
1327 }
1328 DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
1329 zatm_dev->mbx_start[i] = (unsigned long)mbx;
1330 zatm_dev->mbx_dma[i] = mbx_dma;
1331 zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
1332 0xffff;
1333 zout(mbx_dma >> 16, MSH(i));
1334 zout(mbx_dma, MSL(i));
1335 zout(zatm_dev->mbx_end[i], MBA(i));
1336 zout((unsigned long)mbx & 0xffff, MTA(i));
1337 zout((unsigned long)mbx & 0xffff, MWA(i));
1338 }
1339 error = start_tx(dev);
1340 if (error)
1341 goto out;
1342 error = start_rx(dev);
1343 if (error)
1344 goto out_tx;
1345 error = dev->phy->start(dev);
1346 if (error)
1347 goto out_rx;
1348 zout(0xffffffff,IMR); /* enable interrupts */
1349 /* enable TX & RX */
1350 zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
1351 done:
1352 return error;
1353
1354 out_rx:
1355 kfree(zatm_dev->rx_map);
1356 out_tx:
1357 kfree(zatm_dev->tx_map);
1358 out:
1359 while (i-- > 0) {
1360 dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
1361 (void *)zatm_dev->mbx_start[i],
1362 zatm_dev->mbx_dma[i]);
1363 }
1364 free_irq(zatm_dev->irq, dev);
1365 goto done;
1366 }
1367
1368
1369 static void zatm_close(struct atm_vcc *vcc)
1370 {
1371 DPRINTK(">zatm_close\n");
1372 if (!ZATM_VCC(vcc)) return;
1373 clear_bit(ATM_VF_READY,&vcc->flags);
1374 close_rx(vcc);
1375 EVENT("close_tx\n",0,0);
1376 close_tx(vcc);
1377 DPRINTK("zatm_close: done waiting\n");
1378 /* deallocate memory */
1379 kfree(ZATM_VCC(vcc));
1380 vcc->dev_data = NULL;
1381 clear_bit(ATM_VF_ADDR,&vcc->flags);
1382 }
1383
1384
1385 static int zatm_open(struct atm_vcc *vcc)
1386 {
1387 struct zatm_dev *zatm_dev;
1388 struct zatm_vcc *zatm_vcc;
1389 short vpi = vcc->vpi;
1390 int vci = vcc->vci;
1391 int error;
1392
1393 DPRINTK(">zatm_open\n");
1394 zatm_dev = ZATM_DEV(vcc->dev);
1395 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
1396 vcc->dev_data = NULL;
1397 if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
1398 set_bit(ATM_VF_ADDR,&vcc->flags);
1399 if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */
1400 DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
1401 vcc->vci);
1402 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
1403 zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
1404 if (!zatm_vcc) {
1405 clear_bit(ATM_VF_ADDR,&vcc->flags);
1406 return -ENOMEM;
1407 }
1408 vcc->dev_data = zatm_vcc;
1409 ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */
1410 if ((error = open_rx_first(vcc))) {
1411 zatm_close(vcc);
1412 return error;
1413 }
1414 if ((error = open_tx_first(vcc))) {
1415 zatm_close(vcc);
1416 return error;
1417 }
1418 }
1419 if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0;
1420 if ((error = open_rx_second(vcc))) {
1421 zatm_close(vcc);
1422 return error;
1423 }
1424 if ((error = open_tx_second(vcc))) {
1425 zatm_close(vcc);
1426 return error;
1427 }
1428 set_bit(ATM_VF_READY,&vcc->flags);
1429 return 0;
1430 }
1431
1432
1433 static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
1434 {
1435 printk("Not yet implemented\n");
1436 return -ENOSYS;
1437 /* @@@ */
1438 }
1439
1440
1441 static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1442 {
1443 struct zatm_dev *zatm_dev;
1444 unsigned long flags;
1445
1446 zatm_dev = ZATM_DEV(dev);
1447 switch (cmd) {
1448 case ZATM_GETPOOLZ:
1449 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1450 /* fall through */
1451 case ZATM_GETPOOL:
1452 {
1453 struct zatm_pool_info info;
1454 int pool;
1455
1456 if (get_user(pool,
1457 &((struct zatm_pool_req __user *) arg)->pool_num))
1458 return -EFAULT;
1459 if (pool < 0 || pool > ZATM_LAST_POOL)
1460 return -EINVAL;
1461 spin_lock_irqsave(&zatm_dev->lock, flags);
1462 info = zatm_dev->pool_info[pool];
1463 if (cmd == ZATM_GETPOOLZ) {
1464 zatm_dev->pool_info[pool].rqa_count = 0;
1465 zatm_dev->pool_info[pool].rqu_count = 0;
1466 }
1467 spin_unlock_irqrestore(&zatm_dev->lock, flags);
1468 return copy_to_user(
1469 &((struct zatm_pool_req __user *) arg)->info,
1470 &info,sizeof(info)) ? -EFAULT : 0;
1471 }
1472 case ZATM_SETPOOL:
1473 {
1474 struct zatm_pool_info info;
1475 int pool;
1476
1477 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1478 if (get_user(pool,
1479 &((struct zatm_pool_req __user *) arg)->pool_num))
1480 return -EFAULT;
1481 if (pool < 0 || pool > ZATM_LAST_POOL)
1482 return -EINVAL;
1483 if (copy_from_user(&info,
1484 &((struct zatm_pool_req __user *) arg)->info,
1485 sizeof(info))) return -EFAULT;
1486 if (!info.low_water)
1487 info.low_water = zatm_dev->
1488 pool_info[pool].low_water;
1489 if (!info.high_water)
1490 info.high_water = zatm_dev->
1491 pool_info[pool].high_water;
1492 if (!info.next_thres)
1493 info.next_thres = zatm_dev->
1494 pool_info[pool].next_thres;
1495 if (info.low_water >= info.high_water ||
1496 info.low_water < 0)
1497 return -EINVAL;
1498 spin_lock_irqsave(&zatm_dev->lock, flags);
1499 zatm_dev->pool_info[pool].low_water =
1500 info.low_water;
1501 zatm_dev->pool_info[pool].high_water =
1502 info.high_water;
1503 zatm_dev->pool_info[pool].next_thres =
1504 info.next_thres;
1505 spin_unlock_irqrestore(&zatm_dev->lock, flags);
1506 return 0;
1507 }
1508 default:
1509 if (!dev->phy->ioctl) return -ENOIOCTLCMD;
1510 return dev->phy->ioctl(dev,cmd,arg);
1511 }
1512 }
1513
1514
1515 static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
1516 void __user *optval,int optlen)
1517 {
1518 return -EINVAL;
1519 }
1520
1521
1522 static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
1523 void __user *optval,unsigned int optlen)
1524 {
1525 return -EINVAL;
1526 }
1527
1528 static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
1529 {
1530 int error;
1531
1532 EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
1533 if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) {
1534 if (vcc->pop) vcc->pop(vcc,skb);
1535 else dev_kfree_skb(skb);
1536 return -EINVAL;
1537 }
1538 if (!skb) {
1539 printk(KERN_CRIT "!skb in zatm_send ?\n");
1540 if (vcc->pop) vcc->pop(vcc,skb);
1541 return -EINVAL;
1542 }
1543 ATM_SKB(skb)->vcc = vcc;
1544 error = do_tx(skb);
1545 if (error != RING_BUSY) return error;
1546 skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
1547 return 0;
1548 }
1549
1550
1551 static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
1552 unsigned long addr)
1553 {
1554 struct zatm_dev *zatm_dev;
1555
1556 zatm_dev = ZATM_DEV(dev);
1557 zwait;
1558 zout(value,CER);
1559 zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
1560 (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
1561 }
1562
1563
1564 static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
1565 {
1566 struct zatm_dev *zatm_dev;
1567
1568 zatm_dev = ZATM_DEV(dev);
1569 zwait;
1570 zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
1571 (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
1572 zwait;
1573 return zin(CER) & 0xff;
1574 }
1575
1576
1577 static const struct atmdev_ops ops = {
1578 .open = zatm_open,
1579 .close = zatm_close,
1580 .ioctl = zatm_ioctl,
1581 .getsockopt = zatm_getsockopt,
1582 .setsockopt = zatm_setsockopt,
1583 .send = zatm_send,
1584 .phy_put = zatm_phy_put,
1585 .phy_get = zatm_phy_get,
1586 .change_qos = zatm_change_qos,
1587 };
1588
1589 static int zatm_init_one(struct pci_dev *pci_dev,
1590 const struct pci_device_id *ent)
1591 {
1592 struct atm_dev *dev;
1593 struct zatm_dev *zatm_dev;
1594 int ret = -ENOMEM;
1595
1596 zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
1597 if (!zatm_dev) {
1598 printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
1599 goto out;
1600 }
1601
1602 dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
1603 if (!dev)
1604 goto out_free;
1605
1606 ret = pci_enable_device(pci_dev);
1607 if (ret < 0)
1608 goto out_deregister;
1609
1610 ret = pci_request_regions(pci_dev, DEV_LABEL);
1611 if (ret < 0)
1612 goto out_disable;
1613
1614 ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
1615 if (ret < 0)
1616 goto out_release;
1617
1618 zatm_dev->pci_dev = pci_dev;
1619 dev->dev_data = zatm_dev;
1620 zatm_dev->copper = (int)ent->driver_data;
1621 if ((ret = zatm_init(dev)) || (ret = zatm_start(dev)))
1622 goto out_release;
1623
1624 pci_set_drvdata(pci_dev, dev);
1625 zatm_dev->more = zatm_boards;
1626 zatm_boards = dev;
1627 ret = 0;
1628 out:
1629 return ret;
1630
1631 out_release:
1632 pci_release_regions(pci_dev);
1633 out_disable:
1634 pci_disable_device(pci_dev);
1635 out_deregister:
1636 atm_dev_deregister(dev);
1637 out_free:
1638 kfree(zatm_dev);
1639 goto out;
1640 }
1641
1642
1643 MODULE_LICENSE("GPL");
1644
1645 static struct pci_device_id zatm_pci_tbl[] = {
1646 { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
1647 { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
1648 { 0, }
1649 };
1650 MODULE_DEVICE_TABLE(pci, zatm_pci_tbl);
1651
1652 static struct pci_driver zatm_driver = {
1653 .name = DEV_LABEL,
1654 .id_table = zatm_pci_tbl,
1655 .probe = zatm_init_one,
1656 };
1657
1658 static int __init zatm_init_module(void)
1659 {
1660 return pci_register_driver(&zatm_driver);
1661 }
1662
1663 module_init(zatm_init_module);
1664 /* module_exit not defined so not unloadable */