]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/atm/he.c
Merge branch 'eseries' into pxa
[mirror_ubuntu-kernels.git] / drivers / atm / he.c
1 /*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22 */
23
24 /*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <asm/io.h>
71 #include <asm/byteorder.h>
72 #include <asm/uaccess.h>
73
74 #include <linux/atmdev.h>
75 #include <linux/atm.h>
76 #include <linux/sonet.h>
77
78 #define USE_TASKLET
79 #undef USE_SCATTERGATHER
80 #undef USE_CHECKSUM_HW /* still confused about this */
81 #define USE_RBPS
82 #undef USE_RBPS_POOL /* if memory is tight try this */
83 #undef USE_RBPL_POOL /* if memory is tight try this */
84 #define USE_TPD_POOL
85 /* #undef CONFIG_ATM_HE_USE_SUNI */
86 /* #undef HE_DEBUG */
87
88 #include "he.h"
89 #include "suni.h"
90 #include <linux/atm_he.h>
91
92 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93
94 #ifdef HE_DEBUG
95 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96 #else /* !HE_DEBUG */
97 #define HPRINTK(fmt,args...) do { } while (0)
98 #endif /* HE_DEBUG */
99
100 /* declarations */
101
102 static int he_open(struct atm_vcc *vcc);
103 static void he_close(struct atm_vcc *vcc);
104 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
105 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
106 static irqreturn_t he_irq_handler(int irq, void *dev_id);
107 static void he_tasklet(unsigned long data);
108 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
109 static int he_start(struct atm_dev *dev);
110 static void he_stop(struct he_dev *dev);
111 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
112 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
113
114 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
115
116 /* globals */
117
118 static struct he_dev *he_devs;
119 static int disable64;
120 static short nvpibits = -1;
121 static short nvcibits = -1;
122 static short rx_skb_reserve = 16;
123 static int irq_coalesce = 1;
124 static int sdh = 0;
125
126 /* Read from EEPROM = 0000 0011b */
127 static unsigned int readtab[] = {
128 CS_HIGH | CLK_HIGH,
129 CS_LOW | CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW,
138 CLK_HIGH, /* 0 */
139 CLK_LOW,
140 CLK_HIGH, /* 0 */
141 CLK_LOW | SI_HIGH,
142 CLK_HIGH | SI_HIGH, /* 1 */
143 CLK_LOW | SI_HIGH,
144 CLK_HIGH | SI_HIGH /* 1 */
145 };
146
147 /* Clock to read from/write to the EEPROM */
148 static unsigned int clocktab[] = {
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW,
162 CLK_HIGH,
163 CLK_LOW,
164 CLK_HIGH,
165 CLK_LOW
166 };
167
168 static struct atmdev_ops he_ops =
169 {
170 .open = he_open,
171 .close = he_close,
172 .ioctl = he_ioctl,
173 .send = he_send,
174 .phy_put = he_phy_put,
175 .phy_get = he_phy_get,
176 .proc_read = he_proc_read,
177 .owner = THIS_MODULE
178 };
179
180 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
181 #define he_readl(dev, reg) readl((dev)->membase + (reg))
182
183 /* section 2.12 connection memory access */
184
185 static __inline__ void
186 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
187 unsigned flags)
188 {
189 he_writel(he_dev, val, CON_DAT);
190 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
191 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
192 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
193 }
194
195 #define he_writel_rcm(dev, val, reg) \
196 he_writel_internal(dev, val, reg, CON_CTL_RCM)
197
198 #define he_writel_tcm(dev, val, reg) \
199 he_writel_internal(dev, val, reg, CON_CTL_TCM)
200
201 #define he_writel_mbox(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
203
204 static unsigned
205 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
206 {
207 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
208 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
209 return he_readl(he_dev, CON_DAT);
210 }
211
212 #define he_readl_rcm(dev, reg) \
213 he_readl_internal(dev, reg, CON_CTL_RCM)
214
215 #define he_readl_tcm(dev, reg) \
216 he_readl_internal(dev, reg, CON_CTL_TCM)
217
218 #define he_readl_mbox(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_MBOX)
220
221
222 /* figure 2.2 connection id */
223
224 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
225
226 /* 2.5.1 per connection transmit state registers */
227
228 #define he_writel_tsr0(dev, val, cid) \
229 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
230 #define he_readl_tsr0(dev, cid) \
231 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
232
233 #define he_writel_tsr1(dev, val, cid) \
234 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
235
236 #define he_writel_tsr2(dev, val, cid) \
237 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
238
239 #define he_writel_tsr3(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
241
242 #define he_writel_tsr4(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
244
245 /* from page 2-20
246 *
247 * NOTE While the transmit connection is active, bits 23 through 0
248 * of this register must not be written by the host. Byte
249 * enables should be used during normal operation when writing
250 * the most significant byte.
251 */
252
253 #define he_writel_tsr4_upper(dev, val, cid) \
254 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
255 CON_CTL_TCM \
256 | CON_BYTE_DISABLE_2 \
257 | CON_BYTE_DISABLE_1 \
258 | CON_BYTE_DISABLE_0)
259
260 #define he_readl_tsr4(dev, cid) \
261 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
262
263 #define he_writel_tsr5(dev, val, cid) \
264 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
265
266 #define he_writel_tsr6(dev, val, cid) \
267 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
268
269 #define he_writel_tsr7(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
271
272
273 #define he_writel_tsr8(dev, val, cid) \
274 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
275
276 #define he_writel_tsr9(dev, val, cid) \
277 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
278
279 #define he_writel_tsr10(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
281
282 #define he_writel_tsr11(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
284
285
286 #define he_writel_tsr12(dev, val, cid) \
287 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
288
289 #define he_writel_tsr13(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
291
292
293 #define he_writel_tsr14(dev, val, cid) \
294 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
295
296 #define he_writel_tsr14_upper(dev, val, cid) \
297 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
298 CON_CTL_TCM \
299 | CON_BYTE_DISABLE_2 \
300 | CON_BYTE_DISABLE_1 \
301 | CON_BYTE_DISABLE_0)
302
303 /* 2.7.1 per connection receive state registers */
304
305 #define he_writel_rsr0(dev, val, cid) \
306 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
307 #define he_readl_rsr0(dev, cid) \
308 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
309
310 #define he_writel_rsr1(dev, val, cid) \
311 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
312
313 #define he_writel_rsr2(dev, val, cid) \
314 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
315
316 #define he_writel_rsr3(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
318
319 #define he_writel_rsr4(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
321
322 #define he_writel_rsr5(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
324
325 #define he_writel_rsr6(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
327
328 #define he_writel_rsr7(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
330
331 static __inline__ struct atm_vcc*
332 __find_vcc(struct he_dev *he_dev, unsigned cid)
333 {
334 struct hlist_head *head;
335 struct atm_vcc *vcc;
336 struct hlist_node *node;
337 struct sock *s;
338 short vpi;
339 int vci;
340
341 vpi = cid >> he_dev->vcibits;
342 vci = cid & ((1 << he_dev->vcibits) - 1);
343 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
344
345 sk_for_each(s, node, head) {
346 vcc = atm_sk(s);
347 if (vcc->dev == he_dev->atm_dev &&
348 vcc->vci == vci && vcc->vpi == vpi &&
349 vcc->qos.rxtp.traffic_class != ATM_NONE) {
350 return vcc;
351 }
352 }
353 return NULL;
354 }
355
356 static int __devinit
357 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
358 {
359 struct atm_dev *atm_dev = NULL;
360 struct he_dev *he_dev = NULL;
361 int err = 0;
362
363 printk(KERN_INFO "ATM he driver\n");
364
365 if (pci_enable_device(pci_dev))
366 return -EIO;
367 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
368 printk(KERN_WARNING "he: no suitable dma available\n");
369 err = -EIO;
370 goto init_one_failure;
371 }
372
373 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
374 if (!atm_dev) {
375 err = -ENODEV;
376 goto init_one_failure;
377 }
378 pci_set_drvdata(pci_dev, atm_dev);
379
380 he_dev = kzalloc(sizeof(struct he_dev),
381 GFP_KERNEL);
382 if (!he_dev) {
383 err = -ENOMEM;
384 goto init_one_failure;
385 }
386 he_dev->pci_dev = pci_dev;
387 he_dev->atm_dev = atm_dev;
388 he_dev->atm_dev->dev_data = he_dev;
389 atm_dev->dev_data = he_dev;
390 he_dev->number = atm_dev->number;
391 #ifdef USE_TASKLET
392 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
393 #endif
394 spin_lock_init(&he_dev->global_lock);
395
396 if (he_start(atm_dev)) {
397 he_stop(he_dev);
398 err = -ENODEV;
399 goto init_one_failure;
400 }
401 he_dev->next = NULL;
402 if (he_devs)
403 he_dev->next = he_devs;
404 he_devs = he_dev;
405 return 0;
406
407 init_one_failure:
408 if (atm_dev)
409 atm_dev_deregister(atm_dev);
410 kfree(he_dev);
411 pci_disable_device(pci_dev);
412 return err;
413 }
414
415 static void __devexit
416 he_remove_one (struct pci_dev *pci_dev)
417 {
418 struct atm_dev *atm_dev;
419 struct he_dev *he_dev;
420
421 atm_dev = pci_get_drvdata(pci_dev);
422 he_dev = HE_DEV(atm_dev);
423
424 /* need to remove from he_devs */
425
426 he_stop(he_dev);
427 atm_dev_deregister(atm_dev);
428 kfree(he_dev);
429
430 pci_set_drvdata(pci_dev, NULL);
431 pci_disable_device(pci_dev);
432 }
433
434
435 static unsigned
436 rate_to_atmf(unsigned rate) /* cps to atm forum format */
437 {
438 #define NONZERO (1 << 14)
439
440 unsigned exp = 0;
441
442 if (rate == 0)
443 return 0;
444
445 rate <<= 9;
446 while (rate > 0x3ff) {
447 ++exp;
448 rate >>= 1;
449 }
450
451 return (NONZERO | (exp << 9) | (rate & 0x1ff));
452 }
453
454 static void __devinit
455 he_init_rx_lbfp0(struct he_dev *he_dev)
456 {
457 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
458 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
459 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
460 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
461
462 lbufd_index = 0;
463 lbm_offset = he_readl(he_dev, RCMLBM_BA);
464
465 he_writel(he_dev, lbufd_index, RLBF0_H);
466
467 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
468 lbufd_index += 2;
469 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
470
471 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
472 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
473
474 if (++lbuf_count == lbufs_per_row) {
475 lbuf_count = 0;
476 row_offset += he_dev->bytes_per_row;
477 }
478 lbm_offset += 4;
479 }
480
481 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
482 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
483 }
484
485 static void __devinit
486 he_init_rx_lbfp1(struct he_dev *he_dev)
487 {
488 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
489 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
490 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
491 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
492
493 lbufd_index = 1;
494 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
495
496 he_writel(he_dev, lbufd_index, RLBF1_H);
497
498 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
499 lbufd_index += 2;
500 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
501
502 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
503 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
504
505 if (++lbuf_count == lbufs_per_row) {
506 lbuf_count = 0;
507 row_offset += he_dev->bytes_per_row;
508 }
509 lbm_offset += 4;
510 }
511
512 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
513 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
514 }
515
516 static void __devinit
517 he_init_tx_lbfp(struct he_dev *he_dev)
518 {
519 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
520 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
521 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
522 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
523
524 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
525 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
526
527 he_writel(he_dev, lbufd_index, TLBF_H);
528
529 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
530 lbufd_index += 1;
531 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
532
533 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
534 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
535
536 if (++lbuf_count == lbufs_per_row) {
537 lbuf_count = 0;
538 row_offset += he_dev->bytes_per_row;
539 }
540 lbm_offset += 2;
541 }
542
543 he_writel(he_dev, lbufd_index - 1, TLBF_T);
544 }
545
546 static int __devinit
547 he_init_tpdrq(struct he_dev *he_dev)
548 {
549 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
550 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
551 if (he_dev->tpdrq_base == NULL) {
552 hprintk("failed to alloc tpdrq\n");
553 return -ENOMEM;
554 }
555 memset(he_dev->tpdrq_base, 0,
556 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
557
558 he_dev->tpdrq_tail = he_dev->tpdrq_base;
559 he_dev->tpdrq_head = he_dev->tpdrq_base;
560
561 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
562 he_writel(he_dev, 0, TPDRQ_T);
563 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
564
565 return 0;
566 }
567
568 static void __devinit
569 he_init_cs_block(struct he_dev *he_dev)
570 {
571 unsigned clock, rate, delta;
572 int reg;
573
574 /* 5.1.7 cs block initialization */
575
576 for (reg = 0; reg < 0x20; ++reg)
577 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
578
579 /* rate grid timer reload values */
580
581 clock = he_is622(he_dev) ? 66667000 : 50000000;
582 rate = he_dev->atm_dev->link_rate;
583 delta = rate / 16 / 2;
584
585 for (reg = 0; reg < 0x10; ++reg) {
586 /* 2.4 internal transmit function
587 *
588 * we initialize the first row in the rate grid.
589 * values are period (in clock cycles) of timer
590 */
591 unsigned period = clock / rate;
592
593 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
594 rate -= delta;
595 }
596
597 if (he_is622(he_dev)) {
598 /* table 5.2 (4 cells per lbuf) */
599 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
600 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
601 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
602 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
603 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
604
605 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
606 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
607 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
608 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
609 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
610 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
611 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
612
613 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
614
615 /* table 5.8 */
616 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
617 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
618 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
619 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
620 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
621 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
622
623 /* table 5.9 */
624 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
625 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
626 } else {
627 /* table 5.1 (4 cells per lbuf) */
628 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
629 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
630 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
631 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
632 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
633
634 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
635 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
636 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
637 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
638 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
639 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
640 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
641
642 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
643
644 /* table 5.8 */
645 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
646 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
647 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
648 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
649 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
650 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
651
652 /* table 5.9 */
653 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
654 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
655 }
656
657 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
658
659 for (reg = 0; reg < 0x8; ++reg)
660 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
661
662 }
663
664 static int __devinit
665 he_init_cs_block_rcm(struct he_dev *he_dev)
666 {
667 unsigned (*rategrid)[16][16];
668 unsigned rate, delta;
669 int i, j, reg;
670
671 unsigned rate_atmf, exp, man;
672 unsigned long long rate_cps;
673 int mult, buf, buf_limit = 4;
674
675 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
676 if (!rategrid)
677 return -ENOMEM;
678
679 /* initialize rate grid group table */
680
681 for (reg = 0x0; reg < 0xff; ++reg)
682 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
683
684 /* initialize rate controller groups */
685
686 for (reg = 0x100; reg < 0x1ff; ++reg)
687 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
688
689 /* initialize tNrm lookup table */
690
691 /* the manual makes reference to a routine in a sample driver
692 for proper configuration; fortunately, we only need this
693 in order to support abr connection */
694
695 /* initialize rate to group table */
696
697 rate = he_dev->atm_dev->link_rate;
698 delta = rate / 32;
699
700 /*
701 * 2.4 transmit internal functions
702 *
703 * we construct a copy of the rate grid used by the scheduler
704 * in order to construct the rate to group table below
705 */
706
707 for (j = 0; j < 16; j++) {
708 (*rategrid)[0][j] = rate;
709 rate -= delta;
710 }
711
712 for (i = 1; i < 16; i++)
713 for (j = 0; j < 16; j++)
714 if (i > 14)
715 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
716 else
717 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
718
719 /*
720 * 2.4 transmit internal function
721 *
722 * this table maps the upper 5 bits of exponent and mantissa
723 * of the atm forum representation of the rate into an index
724 * on rate grid
725 */
726
727 rate_atmf = 0;
728 while (rate_atmf < 0x400) {
729 man = (rate_atmf & 0x1f) << 4;
730 exp = rate_atmf >> 5;
731
732 /*
733 instead of '/ 512', use '>> 9' to prevent a call
734 to divdu3 on x86 platforms
735 */
736 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
737
738 if (rate_cps < 10)
739 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
740
741 for (i = 255; i > 0; i--)
742 if ((*rategrid)[i/16][i%16] >= rate_cps)
743 break; /* pick nearest rate instead? */
744
745 /*
746 * each table entry is 16 bits: (rate grid index (8 bits)
747 * and a buffer limit (8 bits)
748 * there are two table entries in each 32-bit register
749 */
750
751 #ifdef notdef
752 buf = rate_cps * he_dev->tx_numbuffs /
753 (he_dev->atm_dev->link_rate * 2);
754 #else
755 /* this is pretty, but avoids _divdu3 and is mostly correct */
756 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
757 if (rate_cps > (272 * mult))
758 buf = 4;
759 else if (rate_cps > (204 * mult))
760 buf = 3;
761 else if (rate_cps > (136 * mult))
762 buf = 2;
763 else if (rate_cps > (68 * mult))
764 buf = 1;
765 else
766 buf = 0;
767 #endif
768 if (buf > buf_limit)
769 buf = buf_limit;
770 reg = (reg << 16) | ((i << 8) | buf);
771
772 #define RTGTBL_OFFSET 0x400
773
774 if (rate_atmf & 0x1)
775 he_writel_rcm(he_dev, reg,
776 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
777
778 ++rate_atmf;
779 }
780
781 kfree(rategrid);
782 return 0;
783 }
784
785 static int __devinit
786 he_init_group(struct he_dev *he_dev, int group)
787 {
788 int i;
789
790 #ifdef USE_RBPS
791 /* small buffer pool */
792 #ifdef USE_RBPS_POOL
793 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
794 CONFIG_RBPS_BUFSIZE, 8, 0);
795 if (he_dev->rbps_pool == NULL) {
796 hprintk("unable to create rbps pages\n");
797 return -ENOMEM;
798 }
799 #else /* !USE_RBPS_POOL */
800 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
801 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
802 if (he_dev->rbps_pages == NULL) {
803 hprintk("unable to create rbps page pool\n");
804 return -ENOMEM;
805 }
806 #endif /* USE_RBPS_POOL */
807
808 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
809 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
810 if (he_dev->rbps_base == NULL) {
811 hprintk("failed to alloc rbps\n");
812 return -ENOMEM;
813 }
814 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
815 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
816
817 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
818 dma_addr_t dma_handle;
819 void *cpuaddr;
820
821 #ifdef USE_RBPS_POOL
822 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
823 if (cpuaddr == NULL)
824 return -ENOMEM;
825 #else
826 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
827 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
828 #endif
829
830 he_dev->rbps_virt[i].virt = cpuaddr;
831 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
832 he_dev->rbps_base[i].phys = dma_handle;
833
834 }
835 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
836
837 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
838 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
839 G0_RBPS_T + (group * 32));
840 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
841 G0_RBPS_BS + (group * 32));
842 he_writel(he_dev,
843 RBP_THRESH(CONFIG_RBPS_THRESH) |
844 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
845 RBP_INT_ENB,
846 G0_RBPS_QI + (group * 32));
847 #else /* !USE_RBPS */
848 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
849 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
850 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
851 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
852 G0_RBPS_BS + (group * 32));
853 #endif /* USE_RBPS */
854
855 /* large buffer pool */
856 #ifdef USE_RBPL_POOL
857 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
858 CONFIG_RBPL_BUFSIZE, 8, 0);
859 if (he_dev->rbpl_pool == NULL) {
860 hprintk("unable to create rbpl pool\n");
861 return -ENOMEM;
862 }
863 #else /* !USE_RBPL_POOL */
864 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
865 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
866 if (he_dev->rbpl_pages == NULL) {
867 hprintk("unable to create rbpl pages\n");
868 return -ENOMEM;
869 }
870 #endif /* USE_RBPL_POOL */
871
872 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
873 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
874 if (he_dev->rbpl_base == NULL) {
875 hprintk("failed to alloc rbpl\n");
876 return -ENOMEM;
877 }
878 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
879 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
880
881 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
882 dma_addr_t dma_handle;
883 void *cpuaddr;
884
885 #ifdef USE_RBPL_POOL
886 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
887 if (cpuaddr == NULL)
888 return -ENOMEM;
889 #else
890 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
891 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
892 #endif
893
894 he_dev->rbpl_virt[i].virt = cpuaddr;
895 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
896 he_dev->rbpl_base[i].phys = dma_handle;
897 }
898 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
899
900 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
901 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
902 G0_RBPL_T + (group * 32));
903 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
904 G0_RBPL_BS + (group * 32));
905 he_writel(he_dev,
906 RBP_THRESH(CONFIG_RBPL_THRESH) |
907 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
908 RBP_INT_ENB,
909 G0_RBPL_QI + (group * 32));
910
911 /* rx buffer ready queue */
912
913 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
914 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
915 if (he_dev->rbrq_base == NULL) {
916 hprintk("failed to allocate rbrq\n");
917 return -ENOMEM;
918 }
919 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
920
921 he_dev->rbrq_head = he_dev->rbrq_base;
922 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
923 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
924 he_writel(he_dev,
925 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
926 G0_RBRQ_Q + (group * 16));
927 if (irq_coalesce) {
928 hprintk("coalescing interrupts\n");
929 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
930 G0_RBRQ_I + (group * 16));
931 } else
932 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
933 G0_RBRQ_I + (group * 16));
934
935 /* tx buffer ready queue */
936
937 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
938 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
939 if (he_dev->tbrq_base == NULL) {
940 hprintk("failed to allocate tbrq\n");
941 return -ENOMEM;
942 }
943 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
944
945 he_dev->tbrq_head = he_dev->tbrq_base;
946
947 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
948 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
949 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
950 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
951
952 return 0;
953 }
954
955 static int __devinit
956 he_init_irq(struct he_dev *he_dev)
957 {
958 int i;
959
960 /* 2.9.3.5 tail offset for each interrupt queue is located after the
961 end of the interrupt queue */
962
963 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
964 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
965 if (he_dev->irq_base == NULL) {
966 hprintk("failed to allocate irq\n");
967 return -ENOMEM;
968 }
969 he_dev->irq_tailoffset = (unsigned *)
970 &he_dev->irq_base[CONFIG_IRQ_SIZE];
971 *he_dev->irq_tailoffset = 0;
972 he_dev->irq_head = he_dev->irq_base;
973 he_dev->irq_tail = he_dev->irq_base;
974
975 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
976 he_dev->irq_base[i].isw = ITYPE_INVALID;
977
978 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
979 he_writel(he_dev,
980 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
981 IRQ0_HEAD);
982 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
983 he_writel(he_dev, 0x0, IRQ0_DATA);
984
985 he_writel(he_dev, 0x0, IRQ1_BASE);
986 he_writel(he_dev, 0x0, IRQ1_HEAD);
987 he_writel(he_dev, 0x0, IRQ1_CNTL);
988 he_writel(he_dev, 0x0, IRQ1_DATA);
989
990 he_writel(he_dev, 0x0, IRQ2_BASE);
991 he_writel(he_dev, 0x0, IRQ2_HEAD);
992 he_writel(he_dev, 0x0, IRQ2_CNTL);
993 he_writel(he_dev, 0x0, IRQ2_DATA);
994
995 he_writel(he_dev, 0x0, IRQ3_BASE);
996 he_writel(he_dev, 0x0, IRQ3_HEAD);
997 he_writel(he_dev, 0x0, IRQ3_CNTL);
998 he_writel(he_dev, 0x0, IRQ3_DATA);
999
1000 /* 2.9.3.2 interrupt queue mapping registers */
1001
1002 he_writel(he_dev, 0x0, GRP_10_MAP);
1003 he_writel(he_dev, 0x0, GRP_32_MAP);
1004 he_writel(he_dev, 0x0, GRP_54_MAP);
1005 he_writel(he_dev, 0x0, GRP_76_MAP);
1006
1007 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1008 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1009 return -EINVAL;
1010 }
1011
1012 he_dev->irq = he_dev->pci_dev->irq;
1013
1014 return 0;
1015 }
1016
1017 static int __devinit
1018 he_start(struct atm_dev *dev)
1019 {
1020 struct he_dev *he_dev;
1021 struct pci_dev *pci_dev;
1022 unsigned long membase;
1023
1024 u16 command;
1025 u32 gen_cntl_0, host_cntl, lb_swap;
1026 u8 cache_size, timer;
1027
1028 unsigned err;
1029 unsigned int status, reg;
1030 int i, group;
1031
1032 he_dev = HE_DEV(dev);
1033 pci_dev = he_dev->pci_dev;
1034
1035 membase = pci_resource_start(pci_dev, 0);
1036 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1037
1038 /*
1039 * pci bus controller initialization
1040 */
1041
1042 /* 4.3 pci bus controller-specific initialization */
1043 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1044 hprintk("can't read GEN_CNTL_0\n");
1045 return -EINVAL;
1046 }
1047 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1048 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1049 hprintk("can't write GEN_CNTL_0.\n");
1050 return -EINVAL;
1051 }
1052
1053 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1054 hprintk("can't read PCI_COMMAND.\n");
1055 return -EINVAL;
1056 }
1057
1058 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1059 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1060 hprintk("can't enable memory.\n");
1061 return -EINVAL;
1062 }
1063
1064 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1065 hprintk("can't read cache line size?\n");
1066 return -EINVAL;
1067 }
1068
1069 if (cache_size < 16) {
1070 cache_size = 16;
1071 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1072 hprintk("can't set cache line size to %d\n", cache_size);
1073 }
1074
1075 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1076 hprintk("can't read latency timer?\n");
1077 return -EINVAL;
1078 }
1079
1080 /* from table 3.9
1081 *
1082 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1083 *
1084 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1085 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1086 *
1087 */
1088 #define LAT_TIMER 209
1089 if (timer < LAT_TIMER) {
1090 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1091 timer = LAT_TIMER;
1092 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1093 hprintk("can't set latency timer to %d\n", timer);
1094 }
1095
1096 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1097 hprintk("can't set up page mapping\n");
1098 return -EINVAL;
1099 }
1100
1101 /* 4.4 card reset */
1102 he_writel(he_dev, 0x0, RESET_CNTL);
1103 he_writel(he_dev, 0xff, RESET_CNTL);
1104
1105 udelay(16*1000); /* 16 ms */
1106 status = he_readl(he_dev, RESET_CNTL);
1107 if ((status & BOARD_RST_STATUS) == 0) {
1108 hprintk("reset failed\n");
1109 return -EINVAL;
1110 }
1111
1112 /* 4.5 set bus width */
1113 host_cntl = he_readl(he_dev, HOST_CNTL);
1114 if (host_cntl & PCI_BUS_SIZE64)
1115 gen_cntl_0 |= ENBL_64;
1116 else
1117 gen_cntl_0 &= ~ENBL_64;
1118
1119 if (disable64 == 1) {
1120 hprintk("disabling 64-bit pci bus transfers\n");
1121 gen_cntl_0 &= ~ENBL_64;
1122 }
1123
1124 if (gen_cntl_0 & ENBL_64)
1125 hprintk("64-bit transfers enabled\n");
1126
1127 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1128
1129 /* 4.7 read prom contents */
1130 for (i = 0; i < PROD_ID_LEN; ++i)
1131 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1132
1133 he_dev->media = read_prom_byte(he_dev, MEDIA);
1134
1135 for (i = 0; i < 6; ++i)
1136 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1137
1138 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1139 he_dev->prod_id,
1140 he_dev->media & 0x40 ? "SM" : "MM",
1141 dev->esi[0],
1142 dev->esi[1],
1143 dev->esi[2],
1144 dev->esi[3],
1145 dev->esi[4],
1146 dev->esi[5]);
1147 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1148 ATM_OC12_PCR : ATM_OC3_PCR;
1149
1150 /* 4.6 set host endianess */
1151 lb_swap = he_readl(he_dev, LB_SWAP);
1152 if (he_is622(he_dev))
1153 lb_swap &= ~XFER_SIZE; /* 4 cells */
1154 else
1155 lb_swap |= XFER_SIZE; /* 8 cells */
1156 #ifdef __BIG_ENDIAN
1157 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1158 #else
1159 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1160 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1161 #endif /* __BIG_ENDIAN */
1162 he_writel(he_dev, lb_swap, LB_SWAP);
1163
1164 /* 4.8 sdram controller initialization */
1165 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1166
1167 /* 4.9 initialize rnum value */
1168 lb_swap |= SWAP_RNUM_MAX(0xf);
1169 he_writel(he_dev, lb_swap, LB_SWAP);
1170
1171 /* 4.10 initialize the interrupt queues */
1172 if ((err = he_init_irq(he_dev)) != 0)
1173 return err;
1174
1175 /* 4.11 enable pci bus controller state machines */
1176 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1177 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1178 he_writel(he_dev, host_cntl, HOST_CNTL);
1179
1180 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1181 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1182
1183 /*
1184 * atm network controller initialization
1185 */
1186
1187 /* 5.1.1 generic configuration state */
1188
1189 /*
1190 * local (cell) buffer memory map
1191 *
1192 * HE155 HE622
1193 *
1194 * 0 ____________1023 bytes 0 _______________________2047 bytes
1195 * | | | | |
1196 * | utility | | rx0 | |
1197 * 5|____________| 255|___________________| u |
1198 * 6| | 256| | t |
1199 * | | | | i |
1200 * | rx0 | row | tx | l |
1201 * | | | | i |
1202 * | | 767|___________________| t |
1203 * 517|____________| 768| | y |
1204 * row 518| | | rx1 | |
1205 * | | 1023|___________________|___|
1206 * | |
1207 * | tx |
1208 * | |
1209 * | |
1210 * 1535|____________|
1211 * 1536| |
1212 * | rx1 |
1213 * 2047|____________|
1214 *
1215 */
1216
1217 /* total 4096 connections */
1218 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1219 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1220
1221 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1222 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1223 return -ENODEV;
1224 }
1225
1226 if (nvpibits != -1) {
1227 he_dev->vpibits = nvpibits;
1228 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1229 }
1230
1231 if (nvcibits != -1) {
1232 he_dev->vcibits = nvcibits;
1233 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1234 }
1235
1236
1237 if (he_is622(he_dev)) {
1238 he_dev->cells_per_row = 40;
1239 he_dev->bytes_per_row = 2048;
1240 he_dev->r0_numrows = 256;
1241 he_dev->tx_numrows = 512;
1242 he_dev->r1_numrows = 256;
1243 he_dev->r0_startrow = 0;
1244 he_dev->tx_startrow = 256;
1245 he_dev->r1_startrow = 768;
1246 } else {
1247 he_dev->cells_per_row = 20;
1248 he_dev->bytes_per_row = 1024;
1249 he_dev->r0_numrows = 512;
1250 he_dev->tx_numrows = 1018;
1251 he_dev->r1_numrows = 512;
1252 he_dev->r0_startrow = 6;
1253 he_dev->tx_startrow = 518;
1254 he_dev->r1_startrow = 1536;
1255 }
1256
1257 he_dev->cells_per_lbuf = 4;
1258 he_dev->buffer_limit = 4;
1259 he_dev->r0_numbuffs = he_dev->r0_numrows *
1260 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1261 if (he_dev->r0_numbuffs > 2560)
1262 he_dev->r0_numbuffs = 2560;
1263
1264 he_dev->r1_numbuffs = he_dev->r1_numrows *
1265 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1266 if (he_dev->r1_numbuffs > 2560)
1267 he_dev->r1_numbuffs = 2560;
1268
1269 he_dev->tx_numbuffs = he_dev->tx_numrows *
1270 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1271 if (he_dev->tx_numbuffs > 5120)
1272 he_dev->tx_numbuffs = 5120;
1273
1274 /* 5.1.2 configure hardware dependent registers */
1275
1276 he_writel(he_dev,
1277 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1278 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1279 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1280 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1281 LBARB);
1282
1283 he_writel(he_dev, BANK_ON |
1284 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1285 SDRAMCON);
1286
1287 he_writel(he_dev,
1288 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1289 RM_RW_WAIT(1), RCMCONFIG);
1290 he_writel(he_dev,
1291 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1292 TM_RW_WAIT(1), TCMCONFIG);
1293
1294 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1295
1296 he_writel(he_dev,
1297 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1298 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1299 RX_VALVP(he_dev->vpibits) |
1300 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1301
1302 he_writel(he_dev, DRF_THRESH(0x20) |
1303 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1304 TX_VCI_MASK(he_dev->vcibits) |
1305 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1306
1307 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1308
1309 he_writel(he_dev, PHY_INT_ENB |
1310 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1311 RH_CONFIG);
1312
1313 /* 5.1.3 initialize connection memory */
1314
1315 for (i = 0; i < TCM_MEM_SIZE; ++i)
1316 he_writel_tcm(he_dev, 0, i);
1317
1318 for (i = 0; i < RCM_MEM_SIZE; ++i)
1319 he_writel_rcm(he_dev, 0, i);
1320
1321 /*
1322 * transmit connection memory map
1323 *
1324 * tx memory
1325 * 0x0 ___________________
1326 * | |
1327 * | |
1328 * | TSRa |
1329 * | |
1330 * | |
1331 * 0x8000|___________________|
1332 * | |
1333 * | TSRb |
1334 * 0xc000|___________________|
1335 * | |
1336 * | TSRc |
1337 * 0xe000|___________________|
1338 * | TSRd |
1339 * 0xf000|___________________|
1340 * | tmABR |
1341 * 0x10000|___________________|
1342 * | |
1343 * | tmTPD |
1344 * |___________________|
1345 * | |
1346 * ....
1347 * 0x1ffff|___________________|
1348 *
1349 *
1350 */
1351
1352 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1353 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1354 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1355 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1356 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1357
1358
1359 /*
1360 * receive connection memory map
1361 *
1362 * 0x0 ___________________
1363 * | |
1364 * | |
1365 * | RSRa |
1366 * | |
1367 * | |
1368 * 0x8000|___________________|
1369 * | |
1370 * | rx0/1 |
1371 * | LBM | link lists of local
1372 * | tx | buffer memory
1373 * | |
1374 * 0xd000|___________________|
1375 * | |
1376 * | rmABR |
1377 * 0xe000|___________________|
1378 * | |
1379 * | RSRb |
1380 * |___________________|
1381 * | |
1382 * ....
1383 * 0xffff|___________________|
1384 */
1385
1386 he_writel(he_dev, 0x08000, RCMLBM_BA);
1387 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1388 he_writel(he_dev, 0x0d800, RCMABR_BA);
1389
1390 /* 5.1.4 initialize local buffer free pools linked lists */
1391
1392 he_init_rx_lbfp0(he_dev);
1393 he_init_rx_lbfp1(he_dev);
1394
1395 he_writel(he_dev, 0x0, RLBC_H);
1396 he_writel(he_dev, 0x0, RLBC_T);
1397 he_writel(he_dev, 0x0, RLBC_H2);
1398
1399 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1400 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1401
1402 he_init_tx_lbfp(he_dev);
1403
1404 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1405
1406 /* 5.1.5 initialize intermediate receive queues */
1407
1408 if (he_is622(he_dev)) {
1409 he_writel(he_dev, 0x000f, G0_INMQ_S);
1410 he_writel(he_dev, 0x200f, G0_INMQ_L);
1411
1412 he_writel(he_dev, 0x001f, G1_INMQ_S);
1413 he_writel(he_dev, 0x201f, G1_INMQ_L);
1414
1415 he_writel(he_dev, 0x002f, G2_INMQ_S);
1416 he_writel(he_dev, 0x202f, G2_INMQ_L);
1417
1418 he_writel(he_dev, 0x003f, G3_INMQ_S);
1419 he_writel(he_dev, 0x203f, G3_INMQ_L);
1420
1421 he_writel(he_dev, 0x004f, G4_INMQ_S);
1422 he_writel(he_dev, 0x204f, G4_INMQ_L);
1423
1424 he_writel(he_dev, 0x005f, G5_INMQ_S);
1425 he_writel(he_dev, 0x205f, G5_INMQ_L);
1426
1427 he_writel(he_dev, 0x006f, G6_INMQ_S);
1428 he_writel(he_dev, 0x206f, G6_INMQ_L);
1429
1430 he_writel(he_dev, 0x007f, G7_INMQ_S);
1431 he_writel(he_dev, 0x207f, G7_INMQ_L);
1432 } else {
1433 he_writel(he_dev, 0x0000, G0_INMQ_S);
1434 he_writel(he_dev, 0x0008, G0_INMQ_L);
1435
1436 he_writel(he_dev, 0x0001, G1_INMQ_S);
1437 he_writel(he_dev, 0x0009, G1_INMQ_L);
1438
1439 he_writel(he_dev, 0x0002, G2_INMQ_S);
1440 he_writel(he_dev, 0x000a, G2_INMQ_L);
1441
1442 he_writel(he_dev, 0x0003, G3_INMQ_S);
1443 he_writel(he_dev, 0x000b, G3_INMQ_L);
1444
1445 he_writel(he_dev, 0x0004, G4_INMQ_S);
1446 he_writel(he_dev, 0x000c, G4_INMQ_L);
1447
1448 he_writel(he_dev, 0x0005, G5_INMQ_S);
1449 he_writel(he_dev, 0x000d, G5_INMQ_L);
1450
1451 he_writel(he_dev, 0x0006, G6_INMQ_S);
1452 he_writel(he_dev, 0x000e, G6_INMQ_L);
1453
1454 he_writel(he_dev, 0x0007, G7_INMQ_S);
1455 he_writel(he_dev, 0x000f, G7_INMQ_L);
1456 }
1457
1458 /* 5.1.6 application tunable parameters */
1459
1460 he_writel(he_dev, 0x0, MCC);
1461 he_writel(he_dev, 0x0, OEC);
1462 he_writel(he_dev, 0x0, DCC);
1463 he_writel(he_dev, 0x0, CEC);
1464
1465 /* 5.1.7 cs block initialization */
1466
1467 he_init_cs_block(he_dev);
1468
1469 /* 5.1.8 cs block connection memory initialization */
1470
1471 if (he_init_cs_block_rcm(he_dev) < 0)
1472 return -ENOMEM;
1473
1474 /* 5.1.10 initialize host structures */
1475
1476 he_init_tpdrq(he_dev);
1477
1478 #ifdef USE_TPD_POOL
1479 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1480 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1481 if (he_dev->tpd_pool == NULL) {
1482 hprintk("unable to create tpd pci_pool\n");
1483 return -ENOMEM;
1484 }
1485
1486 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1487 #else
1488 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1489 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1490 if (!he_dev->tpd_base)
1491 return -ENOMEM;
1492
1493 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1494 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1495 he_dev->tpd_base[i].inuse = 0;
1496 }
1497
1498 he_dev->tpd_head = he_dev->tpd_base;
1499 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1500 #endif
1501
1502 if (he_init_group(he_dev, 0) != 0)
1503 return -ENOMEM;
1504
1505 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1506 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1507 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1508 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1509 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1510 G0_RBPS_BS + (group * 32));
1511
1512 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1513 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1514 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1515 G0_RBPL_QI + (group * 32));
1516 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1517
1518 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1519 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1520 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1521 G0_RBRQ_Q + (group * 16));
1522 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1523
1524 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1525 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1526 he_writel(he_dev, TBRQ_THRESH(0x1),
1527 G0_TBRQ_THRESH + (group * 16));
1528 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1529 }
1530
1531 /* host status page */
1532
1533 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1534 sizeof(struct he_hsp), &he_dev->hsp_phys);
1535 if (he_dev->hsp == NULL) {
1536 hprintk("failed to allocate host status page\n");
1537 return -ENOMEM;
1538 }
1539 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1540 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1541
1542 /* initialize framer */
1543
1544 #ifdef CONFIG_ATM_HE_USE_SUNI
1545 if (he_isMM(he_dev))
1546 suni_init(he_dev->atm_dev);
1547 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1548 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1549 #endif /* CONFIG_ATM_HE_USE_SUNI */
1550
1551 if (sdh) {
1552 /* this really should be in suni.c but for now... */
1553 int val;
1554
1555 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1556 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1557 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1558 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1559 }
1560
1561 /* 5.1.12 enable transmit and receive */
1562
1563 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1564 reg |= TX_ENABLE|ER_ENABLE;
1565 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1566
1567 reg = he_readl(he_dev, RC_CONFIG);
1568 reg |= RX_ENABLE;
1569 he_writel(he_dev, reg, RC_CONFIG);
1570
1571 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1572 he_dev->cs_stper[i].inuse = 0;
1573 he_dev->cs_stper[i].pcr = -1;
1574 }
1575 he_dev->total_bw = 0;
1576
1577
1578 /* atm linux initialization */
1579
1580 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1581 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1582
1583 he_dev->irq_peak = 0;
1584 he_dev->rbrq_peak = 0;
1585 he_dev->rbpl_peak = 0;
1586 he_dev->tbrq_peak = 0;
1587
1588 HPRINTK("hell bent for leather!\n");
1589
1590 return 0;
1591 }
1592
1593 static void
1594 he_stop(struct he_dev *he_dev)
1595 {
1596 u16 command;
1597 u32 gen_cntl_0, reg;
1598 struct pci_dev *pci_dev;
1599
1600 pci_dev = he_dev->pci_dev;
1601
1602 /* disable interrupts */
1603
1604 if (he_dev->membase) {
1605 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1606 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1607 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1608
1609 #ifdef USE_TASKLET
1610 tasklet_disable(&he_dev->tasklet);
1611 #endif
1612
1613 /* disable recv and transmit */
1614
1615 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1616 reg &= ~(TX_ENABLE|ER_ENABLE);
1617 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1618
1619 reg = he_readl(he_dev, RC_CONFIG);
1620 reg &= ~(RX_ENABLE);
1621 he_writel(he_dev, reg, RC_CONFIG);
1622 }
1623
1624 #ifdef CONFIG_ATM_HE_USE_SUNI
1625 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1626 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1627 #endif /* CONFIG_ATM_HE_USE_SUNI */
1628
1629 if (he_dev->irq)
1630 free_irq(he_dev->irq, he_dev);
1631
1632 if (he_dev->irq_base)
1633 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1634 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1635
1636 if (he_dev->hsp)
1637 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1638 he_dev->hsp, he_dev->hsp_phys);
1639
1640 if (he_dev->rbpl_base) {
1641 #ifdef USE_RBPL_POOL
1642 int i;
1643
1644 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1645 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1646 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1647
1648 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1649 }
1650 #else
1651 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1652 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1653 #endif
1654 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1655 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1656 }
1657
1658 #ifdef USE_RBPL_POOL
1659 if (he_dev->rbpl_pool)
1660 pci_pool_destroy(he_dev->rbpl_pool);
1661 #endif
1662
1663 #ifdef USE_RBPS
1664 if (he_dev->rbps_base) {
1665 #ifdef USE_RBPS_POOL
1666 int i;
1667
1668 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1669 void *cpuaddr = he_dev->rbps_virt[i].virt;
1670 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1671
1672 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1673 }
1674 #else
1675 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1676 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1677 #endif
1678 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1679 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1680 }
1681
1682 #ifdef USE_RBPS_POOL
1683 if (he_dev->rbps_pool)
1684 pci_pool_destroy(he_dev->rbps_pool);
1685 #endif
1686
1687 #endif /* USE_RBPS */
1688
1689 if (he_dev->rbrq_base)
1690 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1691 he_dev->rbrq_base, he_dev->rbrq_phys);
1692
1693 if (he_dev->tbrq_base)
1694 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1695 he_dev->tbrq_base, he_dev->tbrq_phys);
1696
1697 if (he_dev->tpdrq_base)
1698 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1699 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1700
1701 #ifdef USE_TPD_POOL
1702 if (he_dev->tpd_pool)
1703 pci_pool_destroy(he_dev->tpd_pool);
1704 #else
1705 if (he_dev->tpd_base)
1706 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1707 he_dev->tpd_base, he_dev->tpd_base_phys);
1708 #endif
1709
1710 if (he_dev->pci_dev) {
1711 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1712 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1713 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1714 }
1715
1716 if (he_dev->membase)
1717 iounmap(he_dev->membase);
1718 }
1719
1720 static struct he_tpd *
1721 __alloc_tpd(struct he_dev *he_dev)
1722 {
1723 #ifdef USE_TPD_POOL
1724 struct he_tpd *tpd;
1725 dma_addr_t dma_handle;
1726
1727 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1728 if (tpd == NULL)
1729 return NULL;
1730
1731 tpd->status = TPD_ADDR(dma_handle);
1732 tpd->reserved = 0;
1733 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1734 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1735 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1736
1737 return tpd;
1738 #else
1739 int i;
1740
1741 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1742 ++he_dev->tpd_head;
1743 if (he_dev->tpd_head > he_dev->tpd_end) {
1744 he_dev->tpd_head = he_dev->tpd_base;
1745 }
1746
1747 if (!he_dev->tpd_head->inuse) {
1748 he_dev->tpd_head->inuse = 1;
1749 he_dev->tpd_head->status &= TPD_MASK;
1750 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1751 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1752 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1753 return he_dev->tpd_head;
1754 }
1755 }
1756 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1757 return NULL;
1758 #endif
1759 }
1760
1761 #define AAL5_LEN(buf,len) \
1762 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1763 (((unsigned char *)(buf))[(len)-5]))
1764
1765 /* 2.10.1.2 receive
1766 *
1767 * aal5 packets can optionally return the tcp checksum in the lower
1768 * 16 bits of the crc (RSR0_TCP_CKSUM)
1769 */
1770
1771 #define TCP_CKSUM(buf,len) \
1772 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1773 (((unsigned char *)(buf))[(len-1)]))
1774
1775 static int
1776 he_service_rbrq(struct he_dev *he_dev, int group)
1777 {
1778 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1779 ((unsigned long)he_dev->rbrq_base |
1780 he_dev->hsp->group[group].rbrq_tail);
1781 struct he_rbp *rbp = NULL;
1782 unsigned cid, lastcid = -1;
1783 unsigned buf_len = 0;
1784 struct sk_buff *skb;
1785 struct atm_vcc *vcc = NULL;
1786 struct he_vcc *he_vcc;
1787 struct he_iovec *iov;
1788 int pdus_assembled = 0;
1789 int updated = 0;
1790
1791 read_lock(&vcc_sklist_lock);
1792 while (he_dev->rbrq_head != rbrq_tail) {
1793 ++updated;
1794
1795 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1796 he_dev->rbrq_head, group,
1797 RBRQ_ADDR(he_dev->rbrq_head),
1798 RBRQ_BUFLEN(he_dev->rbrq_head),
1799 RBRQ_CID(he_dev->rbrq_head),
1800 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1801 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1802 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1803 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1804 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1805 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1806
1807 #ifdef USE_RBPS
1808 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1809 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1810 else
1811 #endif
1812 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1813
1814 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1815 cid = RBRQ_CID(he_dev->rbrq_head);
1816
1817 if (cid != lastcid)
1818 vcc = __find_vcc(he_dev, cid);
1819 lastcid = cid;
1820
1821 if (vcc == NULL) {
1822 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1823 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1824 rbp->status &= ~RBP_LOANED;
1825
1826 goto next_rbrq_entry;
1827 }
1828
1829 he_vcc = HE_VCC(vcc);
1830 if (he_vcc == NULL) {
1831 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1832 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1833 rbp->status &= ~RBP_LOANED;
1834 goto next_rbrq_entry;
1835 }
1836
1837 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1838 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1839 atomic_inc(&vcc->stats->rx_drop);
1840 goto return_host_buffers;
1841 }
1842
1843 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1844 he_vcc->iov_tail->iov_len = buf_len;
1845 he_vcc->pdu_len += buf_len;
1846 ++he_vcc->iov_tail;
1847
1848 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1849 lastcid = -1;
1850 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1851 wake_up(&he_vcc->rx_waitq);
1852 goto return_host_buffers;
1853 }
1854
1855 #ifdef notdef
1856 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1857 hprintk("iovec full! cid 0x%x\n", cid);
1858 goto return_host_buffers;
1859 }
1860 #endif
1861 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1862 goto next_rbrq_entry;
1863
1864 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1865 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1866 HPRINTK("%s%s (%d.%d)\n",
1867 RBRQ_CRC_ERR(he_dev->rbrq_head)
1868 ? "CRC_ERR " : "",
1869 RBRQ_LEN_ERR(he_dev->rbrq_head)
1870 ? "LEN_ERR" : "",
1871 vcc->vpi, vcc->vci);
1872 atomic_inc(&vcc->stats->rx_err);
1873 goto return_host_buffers;
1874 }
1875
1876 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1877 GFP_ATOMIC);
1878 if (!skb) {
1879 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1880 goto return_host_buffers;
1881 }
1882
1883 if (rx_skb_reserve > 0)
1884 skb_reserve(skb, rx_skb_reserve);
1885
1886 __net_timestamp(skb);
1887
1888 for (iov = he_vcc->iov_head;
1889 iov < he_vcc->iov_tail; ++iov) {
1890 #ifdef USE_RBPS
1891 if (iov->iov_base & RBP_SMALLBUF)
1892 memcpy(skb_put(skb, iov->iov_len),
1893 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1894 else
1895 #endif
1896 memcpy(skb_put(skb, iov->iov_len),
1897 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1898 }
1899
1900 switch (vcc->qos.aal) {
1901 case ATM_AAL0:
1902 /* 2.10.1.5 raw cell receive */
1903 skb->len = ATM_AAL0_SDU;
1904 skb_set_tail_pointer(skb, skb->len);
1905 break;
1906 case ATM_AAL5:
1907 /* 2.10.1.2 aal5 receive */
1908
1909 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1910 skb_set_tail_pointer(skb, skb->len);
1911 #ifdef USE_CHECKSUM_HW
1912 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1913 skb->ip_summed = CHECKSUM_COMPLETE;
1914 skb->csum = TCP_CKSUM(skb->data,
1915 he_vcc->pdu_len);
1916 }
1917 #endif
1918 break;
1919 }
1920
1921 #ifdef should_never_happen
1922 if (skb->len > vcc->qos.rxtp.max_sdu)
1923 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1924 #endif
1925
1926 #ifdef notdef
1927 ATM_SKB(skb)->vcc = vcc;
1928 #endif
1929 spin_unlock(&he_dev->global_lock);
1930 vcc->push(vcc, skb);
1931 spin_lock(&he_dev->global_lock);
1932
1933 atomic_inc(&vcc->stats->rx);
1934
1935 return_host_buffers:
1936 ++pdus_assembled;
1937
1938 for (iov = he_vcc->iov_head;
1939 iov < he_vcc->iov_tail; ++iov) {
1940 #ifdef USE_RBPS
1941 if (iov->iov_base & RBP_SMALLBUF)
1942 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1943 else
1944 #endif
1945 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1946
1947 rbp->status &= ~RBP_LOANED;
1948 }
1949
1950 he_vcc->iov_tail = he_vcc->iov_head;
1951 he_vcc->pdu_len = 0;
1952
1953 next_rbrq_entry:
1954 he_dev->rbrq_head = (struct he_rbrq *)
1955 ((unsigned long) he_dev->rbrq_base |
1956 RBRQ_MASK(++he_dev->rbrq_head));
1957
1958 }
1959 read_unlock(&vcc_sklist_lock);
1960
1961 if (updated) {
1962 if (updated > he_dev->rbrq_peak)
1963 he_dev->rbrq_peak = updated;
1964
1965 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1966 G0_RBRQ_H + (group * 16));
1967 }
1968
1969 return pdus_assembled;
1970 }
1971
1972 static void
1973 he_service_tbrq(struct he_dev *he_dev, int group)
1974 {
1975 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1976 ((unsigned long)he_dev->tbrq_base |
1977 he_dev->hsp->group[group].tbrq_tail);
1978 struct he_tpd *tpd;
1979 int slot, updated = 0;
1980 #ifdef USE_TPD_POOL
1981 struct he_tpd *__tpd;
1982 #endif
1983
1984 /* 2.1.6 transmit buffer return queue */
1985
1986 while (he_dev->tbrq_head != tbrq_tail) {
1987 ++updated;
1988
1989 HPRINTK("tbrq%d 0x%x%s%s\n",
1990 group,
1991 TBRQ_TPD(he_dev->tbrq_head),
1992 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1993 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1994 #ifdef USE_TPD_POOL
1995 tpd = NULL;
1996 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1997 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1998 tpd = __tpd;
1999 list_del(&__tpd->entry);
2000 break;
2001 }
2002 }
2003
2004 if (tpd == NULL) {
2005 hprintk("unable to locate tpd for dma buffer %x\n",
2006 TBRQ_TPD(he_dev->tbrq_head));
2007 goto next_tbrq_entry;
2008 }
2009 #else
2010 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2011 #endif
2012
2013 if (TBRQ_EOS(he_dev->tbrq_head)) {
2014 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2015 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2016 if (tpd->vcc)
2017 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2018
2019 goto next_tbrq_entry;
2020 }
2021
2022 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2023 if (tpd->iovec[slot].addr)
2024 pci_unmap_single(he_dev->pci_dev,
2025 tpd->iovec[slot].addr,
2026 tpd->iovec[slot].len & TPD_LEN_MASK,
2027 PCI_DMA_TODEVICE);
2028 if (tpd->iovec[slot].len & TPD_LST)
2029 break;
2030
2031 }
2032
2033 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2034 if (tpd->vcc && tpd->vcc->pop)
2035 tpd->vcc->pop(tpd->vcc, tpd->skb);
2036 else
2037 dev_kfree_skb_any(tpd->skb);
2038 }
2039
2040 next_tbrq_entry:
2041 #ifdef USE_TPD_POOL
2042 if (tpd)
2043 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2044 #else
2045 tpd->inuse = 0;
2046 #endif
2047 he_dev->tbrq_head = (struct he_tbrq *)
2048 ((unsigned long) he_dev->tbrq_base |
2049 TBRQ_MASK(++he_dev->tbrq_head));
2050 }
2051
2052 if (updated) {
2053 if (updated > he_dev->tbrq_peak)
2054 he_dev->tbrq_peak = updated;
2055
2056 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2057 G0_TBRQ_H + (group * 16));
2058 }
2059 }
2060
2061
2062 static void
2063 he_service_rbpl(struct he_dev *he_dev, int group)
2064 {
2065 struct he_rbp *newtail;
2066 struct he_rbp *rbpl_head;
2067 int moved = 0;
2068
2069 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2070 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2071
2072 for (;;) {
2073 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2074 RBPL_MASK(he_dev->rbpl_tail+1));
2075
2076 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2077 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2078 break;
2079
2080 newtail->status |= RBP_LOANED;
2081 he_dev->rbpl_tail = newtail;
2082 ++moved;
2083 }
2084
2085 if (moved)
2086 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2087 }
2088
2089 #ifdef USE_RBPS
2090 static void
2091 he_service_rbps(struct he_dev *he_dev, int group)
2092 {
2093 struct he_rbp *newtail;
2094 struct he_rbp *rbps_head;
2095 int moved = 0;
2096
2097 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2098 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2099
2100 for (;;) {
2101 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2102 RBPS_MASK(he_dev->rbps_tail+1));
2103
2104 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2105 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2106 break;
2107
2108 newtail->status |= RBP_LOANED;
2109 he_dev->rbps_tail = newtail;
2110 ++moved;
2111 }
2112
2113 if (moved)
2114 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2115 }
2116 #endif /* USE_RBPS */
2117
2118 static void
2119 he_tasklet(unsigned long data)
2120 {
2121 unsigned long flags;
2122 struct he_dev *he_dev = (struct he_dev *) data;
2123 int group, type;
2124 int updated = 0;
2125
2126 HPRINTK("tasklet (0x%lx)\n", data);
2127 #ifdef USE_TASKLET
2128 spin_lock_irqsave(&he_dev->global_lock, flags);
2129 #endif
2130
2131 while (he_dev->irq_head != he_dev->irq_tail) {
2132 ++updated;
2133
2134 type = ITYPE_TYPE(he_dev->irq_head->isw);
2135 group = ITYPE_GROUP(he_dev->irq_head->isw);
2136
2137 switch (type) {
2138 case ITYPE_RBRQ_THRESH:
2139 HPRINTK("rbrq%d threshold\n", group);
2140 /* fall through */
2141 case ITYPE_RBRQ_TIMER:
2142 if (he_service_rbrq(he_dev, group)) {
2143 he_service_rbpl(he_dev, group);
2144 #ifdef USE_RBPS
2145 he_service_rbps(he_dev, group);
2146 #endif /* USE_RBPS */
2147 }
2148 break;
2149 case ITYPE_TBRQ_THRESH:
2150 HPRINTK("tbrq%d threshold\n", group);
2151 /* fall through */
2152 case ITYPE_TPD_COMPLETE:
2153 he_service_tbrq(he_dev, group);
2154 break;
2155 case ITYPE_RBPL_THRESH:
2156 he_service_rbpl(he_dev, group);
2157 break;
2158 case ITYPE_RBPS_THRESH:
2159 #ifdef USE_RBPS
2160 he_service_rbps(he_dev, group);
2161 #endif /* USE_RBPS */
2162 break;
2163 case ITYPE_PHY:
2164 HPRINTK("phy interrupt\n");
2165 #ifdef CONFIG_ATM_HE_USE_SUNI
2166 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2167 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2168 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2169 spin_lock_irqsave(&he_dev->global_lock, flags);
2170 #endif
2171 break;
2172 case ITYPE_OTHER:
2173 switch (type|group) {
2174 case ITYPE_PARITY:
2175 hprintk("parity error\n");
2176 break;
2177 case ITYPE_ABORT:
2178 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2179 break;
2180 }
2181 break;
2182 case ITYPE_TYPE(ITYPE_INVALID):
2183 /* see 8.1.1 -- check all queues */
2184
2185 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2186
2187 he_service_rbrq(he_dev, 0);
2188 he_service_rbpl(he_dev, 0);
2189 #ifdef USE_RBPS
2190 he_service_rbps(he_dev, 0);
2191 #endif /* USE_RBPS */
2192 he_service_tbrq(he_dev, 0);
2193 break;
2194 default:
2195 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2196 }
2197
2198 he_dev->irq_head->isw = ITYPE_INVALID;
2199
2200 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2201 }
2202
2203 if (updated) {
2204 if (updated > he_dev->irq_peak)
2205 he_dev->irq_peak = updated;
2206
2207 he_writel(he_dev,
2208 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2209 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2210 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2211 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2212 }
2213 #ifdef USE_TASKLET
2214 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2215 #endif
2216 }
2217
2218 static irqreturn_t
2219 he_irq_handler(int irq, void *dev_id)
2220 {
2221 unsigned long flags;
2222 struct he_dev *he_dev = (struct he_dev * )dev_id;
2223 int handled = 0;
2224
2225 if (he_dev == NULL)
2226 return IRQ_NONE;
2227
2228 spin_lock_irqsave(&he_dev->global_lock, flags);
2229
2230 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2231 (*he_dev->irq_tailoffset << 2));
2232
2233 if (he_dev->irq_tail == he_dev->irq_head) {
2234 HPRINTK("tailoffset not updated?\n");
2235 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2236 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2237 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2238 }
2239
2240 #ifdef DEBUG
2241 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2242 hprintk("spurious (or shared) interrupt?\n");
2243 #endif
2244
2245 if (he_dev->irq_head != he_dev->irq_tail) {
2246 handled = 1;
2247 #ifdef USE_TASKLET
2248 tasklet_schedule(&he_dev->tasklet);
2249 #else
2250 he_tasklet((unsigned long) he_dev);
2251 #endif
2252 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2253 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2254 }
2255 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2256 return IRQ_RETVAL(handled);
2257
2258 }
2259
2260 static __inline__ void
2261 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2262 {
2263 struct he_tpdrq *new_tail;
2264
2265 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2266 tpd, cid, he_dev->tpdrq_tail);
2267
2268 /* new_tail = he_dev->tpdrq_tail; */
2269 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2270 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2271
2272 /*
2273 * check to see if we are about to set the tail == head
2274 * if true, update the head pointer from the adapter
2275 * to see if this is really the case (reading the queue
2276 * head for every enqueue would be unnecessarily slow)
2277 */
2278
2279 if (new_tail == he_dev->tpdrq_head) {
2280 he_dev->tpdrq_head = (struct he_tpdrq *)
2281 (((unsigned long)he_dev->tpdrq_base) |
2282 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2283
2284 if (new_tail == he_dev->tpdrq_head) {
2285 int slot;
2286
2287 hprintk("tpdrq full (cid 0x%x)\n", cid);
2288 /*
2289 * FIXME
2290 * push tpd onto a transmit backlog queue
2291 * after service_tbrq, service the backlog
2292 * for now, we just drop the pdu
2293 */
2294 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2295 if (tpd->iovec[slot].addr)
2296 pci_unmap_single(he_dev->pci_dev,
2297 tpd->iovec[slot].addr,
2298 tpd->iovec[slot].len & TPD_LEN_MASK,
2299 PCI_DMA_TODEVICE);
2300 }
2301 if (tpd->skb) {
2302 if (tpd->vcc->pop)
2303 tpd->vcc->pop(tpd->vcc, tpd->skb);
2304 else
2305 dev_kfree_skb_any(tpd->skb);
2306 atomic_inc(&tpd->vcc->stats->tx_err);
2307 }
2308 #ifdef USE_TPD_POOL
2309 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2310 #else
2311 tpd->inuse = 0;
2312 #endif
2313 return;
2314 }
2315 }
2316
2317 /* 2.1.5 transmit packet descriptor ready queue */
2318 #ifdef USE_TPD_POOL
2319 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2320 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2321 #else
2322 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2323 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2324 #endif
2325 he_dev->tpdrq_tail->cid = cid;
2326 wmb();
2327
2328 he_dev->tpdrq_tail = new_tail;
2329
2330 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2331 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2332 }
2333
2334 static int
2335 he_open(struct atm_vcc *vcc)
2336 {
2337 unsigned long flags;
2338 struct he_dev *he_dev = HE_DEV(vcc->dev);
2339 struct he_vcc *he_vcc;
2340 int err = 0;
2341 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2342 short vpi = vcc->vpi;
2343 int vci = vcc->vci;
2344
2345 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2346 return 0;
2347
2348 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2349
2350 set_bit(ATM_VF_ADDR, &vcc->flags);
2351
2352 cid = he_mkcid(he_dev, vpi, vci);
2353
2354 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2355 if (he_vcc == NULL) {
2356 hprintk("unable to allocate he_vcc during open\n");
2357 return -ENOMEM;
2358 }
2359
2360 he_vcc->iov_tail = he_vcc->iov_head;
2361 he_vcc->pdu_len = 0;
2362 he_vcc->rc_index = -1;
2363
2364 init_waitqueue_head(&he_vcc->rx_waitq);
2365 init_waitqueue_head(&he_vcc->tx_waitq);
2366
2367 vcc->dev_data = he_vcc;
2368
2369 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2370 int pcr_goal;
2371
2372 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2373 if (pcr_goal == 0)
2374 pcr_goal = he_dev->atm_dev->link_rate;
2375 if (pcr_goal < 0) /* means round down, technically */
2376 pcr_goal = -pcr_goal;
2377
2378 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2379
2380 switch (vcc->qos.aal) {
2381 case ATM_AAL5:
2382 tsr0_aal = TSR0_AAL5;
2383 tsr4 = TSR4_AAL5;
2384 break;
2385 case ATM_AAL0:
2386 tsr0_aal = TSR0_AAL0_SDU;
2387 tsr4 = TSR4_AAL0_SDU;
2388 break;
2389 default:
2390 err = -EINVAL;
2391 goto open_failed;
2392 }
2393
2394 spin_lock_irqsave(&he_dev->global_lock, flags);
2395 tsr0 = he_readl_tsr0(he_dev, cid);
2396 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2397
2398 if (TSR0_CONN_STATE(tsr0) != 0) {
2399 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2400 err = -EBUSY;
2401 goto open_failed;
2402 }
2403
2404 switch (vcc->qos.txtp.traffic_class) {
2405 case ATM_UBR:
2406 /* 2.3.3.1 open connection ubr */
2407
2408 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2409 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2410 break;
2411
2412 case ATM_CBR:
2413 /* 2.3.3.2 open connection cbr */
2414
2415 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2416 if ((he_dev->total_bw + pcr_goal)
2417 > (he_dev->atm_dev->link_rate * 9 / 10))
2418 {
2419 err = -EBUSY;
2420 goto open_failed;
2421 }
2422
2423 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2424
2425 /* find an unused cs_stper register */
2426 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2427 if (he_dev->cs_stper[reg].inuse == 0 ||
2428 he_dev->cs_stper[reg].pcr == pcr_goal)
2429 break;
2430
2431 if (reg == HE_NUM_CS_STPER) {
2432 err = -EBUSY;
2433 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2434 goto open_failed;
2435 }
2436
2437 he_dev->total_bw += pcr_goal;
2438
2439 he_vcc->rc_index = reg;
2440 ++he_dev->cs_stper[reg].inuse;
2441 he_dev->cs_stper[reg].pcr = pcr_goal;
2442
2443 clock = he_is622(he_dev) ? 66667000 : 50000000;
2444 period = clock / pcr_goal;
2445
2446 HPRINTK("rc_index = %d period = %d\n",
2447 reg, period);
2448
2449 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2450 CS_STPER0 + reg);
2451 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2452
2453 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2454 TSR0_RC_INDEX(reg);
2455
2456 break;
2457 default:
2458 err = -EINVAL;
2459 goto open_failed;
2460 }
2461
2462 spin_lock_irqsave(&he_dev->global_lock, flags);
2463
2464 he_writel_tsr0(he_dev, tsr0, cid);
2465 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2466 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2467 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2468 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2469 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2470
2471 he_writel_tsr3(he_dev, 0x0, cid);
2472 he_writel_tsr5(he_dev, 0x0, cid);
2473 he_writel_tsr6(he_dev, 0x0, cid);
2474 he_writel_tsr7(he_dev, 0x0, cid);
2475 he_writel_tsr8(he_dev, 0x0, cid);
2476 he_writel_tsr10(he_dev, 0x0, cid);
2477 he_writel_tsr11(he_dev, 0x0, cid);
2478 he_writel_tsr12(he_dev, 0x0, cid);
2479 he_writel_tsr13(he_dev, 0x0, cid);
2480 he_writel_tsr14(he_dev, 0x0, cid);
2481 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2482 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2483 }
2484
2485 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2486 unsigned aal;
2487
2488 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2489 &HE_VCC(vcc)->rx_waitq);
2490
2491 switch (vcc->qos.aal) {
2492 case ATM_AAL5:
2493 aal = RSR0_AAL5;
2494 break;
2495 case ATM_AAL0:
2496 aal = RSR0_RAWCELL;
2497 break;
2498 default:
2499 err = -EINVAL;
2500 goto open_failed;
2501 }
2502
2503 spin_lock_irqsave(&he_dev->global_lock, flags);
2504
2505 rsr0 = he_readl_rsr0(he_dev, cid);
2506 if (rsr0 & RSR0_OPEN_CONN) {
2507 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2508
2509 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2510 err = -EBUSY;
2511 goto open_failed;
2512 }
2513
2514 #ifdef USE_RBPS
2515 rsr1 = RSR1_GROUP(0);
2516 rsr4 = RSR4_GROUP(0);
2517 #else /* !USE_RBPS */
2518 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2519 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2520 #endif /* USE_RBPS */
2521 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2522 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2523
2524 #ifdef USE_CHECKSUM_HW
2525 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2526 rsr0 |= RSR0_TCP_CKSUM;
2527 #endif
2528
2529 he_writel_rsr4(he_dev, rsr4, cid);
2530 he_writel_rsr1(he_dev, rsr1, cid);
2531 /* 5.1.11 last parameter initialized should be
2532 the open/closed indication in rsr0 */
2533 he_writel_rsr0(he_dev,
2534 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2535 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2536
2537 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2538 }
2539
2540 open_failed:
2541
2542 if (err) {
2543 kfree(he_vcc);
2544 clear_bit(ATM_VF_ADDR, &vcc->flags);
2545 }
2546 else
2547 set_bit(ATM_VF_READY, &vcc->flags);
2548
2549 return err;
2550 }
2551
2552 static void
2553 he_close(struct atm_vcc *vcc)
2554 {
2555 unsigned long flags;
2556 DECLARE_WAITQUEUE(wait, current);
2557 struct he_dev *he_dev = HE_DEV(vcc->dev);
2558 struct he_tpd *tpd;
2559 unsigned cid;
2560 struct he_vcc *he_vcc = HE_VCC(vcc);
2561 #define MAX_RETRY 30
2562 int retry = 0, sleep = 1, tx_inuse;
2563
2564 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2565
2566 clear_bit(ATM_VF_READY, &vcc->flags);
2567 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2568
2569 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2570 int timeout;
2571
2572 HPRINTK("close rx cid 0x%x\n", cid);
2573
2574 /* 2.7.2.2 close receive operation */
2575
2576 /* wait for previous close (if any) to finish */
2577
2578 spin_lock_irqsave(&he_dev->global_lock, flags);
2579 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2580 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2581 udelay(250);
2582 }
2583
2584 set_current_state(TASK_UNINTERRUPTIBLE);
2585 add_wait_queue(&he_vcc->rx_waitq, &wait);
2586
2587 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2588 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2589 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2590 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2591
2592 timeout = schedule_timeout(30*HZ);
2593
2594 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2595 set_current_state(TASK_RUNNING);
2596
2597 if (timeout == 0)
2598 hprintk("close rx timeout cid 0x%x\n", cid);
2599
2600 HPRINTK("close rx cid 0x%x complete\n", cid);
2601
2602 }
2603
2604 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2605 volatile unsigned tsr4, tsr0;
2606 int timeout;
2607
2608 HPRINTK("close tx cid 0x%x\n", cid);
2609
2610 /* 2.1.2
2611 *
2612 * ... the host must first stop queueing packets to the TPDRQ
2613 * on the connection to be closed, then wait for all outstanding
2614 * packets to be transmitted and their buffers returned to the
2615 * TBRQ. When the last packet on the connection arrives in the
2616 * TBRQ, the host issues the close command to the adapter.
2617 */
2618
2619 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2620 (retry < MAX_RETRY)) {
2621 msleep(sleep);
2622 if (sleep < 250)
2623 sleep = sleep * 2;
2624
2625 ++retry;
2626 }
2627
2628 if (tx_inuse)
2629 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2630
2631 /* 2.3.1.1 generic close operations with flush */
2632
2633 spin_lock_irqsave(&he_dev->global_lock, flags);
2634 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2635 /* also clears TSR4_SESSION_ENDED */
2636
2637 switch (vcc->qos.txtp.traffic_class) {
2638 case ATM_UBR:
2639 he_writel_tsr1(he_dev,
2640 TSR1_MCR(rate_to_atmf(200000))
2641 | TSR1_PCR(0), cid);
2642 break;
2643 case ATM_CBR:
2644 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2645 break;
2646 }
2647 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2648
2649 tpd = __alloc_tpd(he_dev);
2650 if (tpd == NULL) {
2651 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2652 goto close_tx_incomplete;
2653 }
2654 tpd->status |= TPD_EOS | TPD_INT;
2655 tpd->skb = NULL;
2656 tpd->vcc = vcc;
2657 wmb();
2658
2659 set_current_state(TASK_UNINTERRUPTIBLE);
2660 add_wait_queue(&he_vcc->tx_waitq, &wait);
2661 __enqueue_tpd(he_dev, tpd, cid);
2662 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2663
2664 timeout = schedule_timeout(30*HZ);
2665
2666 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2667 set_current_state(TASK_RUNNING);
2668
2669 spin_lock_irqsave(&he_dev->global_lock, flags);
2670
2671 if (timeout == 0) {
2672 hprintk("close tx timeout cid 0x%x\n", cid);
2673 goto close_tx_incomplete;
2674 }
2675
2676 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2677 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2678 udelay(250);
2679 }
2680
2681 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2682 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2683 udelay(250);
2684 }
2685
2686 close_tx_incomplete:
2687
2688 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2689 int reg = he_vcc->rc_index;
2690
2691 HPRINTK("cs_stper reg = %d\n", reg);
2692
2693 if (he_dev->cs_stper[reg].inuse == 0)
2694 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2695 else
2696 --he_dev->cs_stper[reg].inuse;
2697
2698 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2699 }
2700 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2701
2702 HPRINTK("close tx cid 0x%x complete\n", cid);
2703 }
2704
2705 kfree(he_vcc);
2706
2707 clear_bit(ATM_VF_ADDR, &vcc->flags);
2708 }
2709
2710 static int
2711 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2712 {
2713 unsigned long flags;
2714 struct he_dev *he_dev = HE_DEV(vcc->dev);
2715 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2716 struct he_tpd *tpd;
2717 #ifdef USE_SCATTERGATHER
2718 int i, slot = 0;
2719 #endif
2720
2721 #define HE_TPD_BUFSIZE 0xffff
2722
2723 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2724
2725 if ((skb->len > HE_TPD_BUFSIZE) ||
2726 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2727 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2728 if (vcc->pop)
2729 vcc->pop(vcc, skb);
2730 else
2731 dev_kfree_skb_any(skb);
2732 atomic_inc(&vcc->stats->tx_err);
2733 return -EINVAL;
2734 }
2735
2736 #ifndef USE_SCATTERGATHER
2737 if (skb_shinfo(skb)->nr_frags) {
2738 hprintk("no scatter/gather support\n");
2739 if (vcc->pop)
2740 vcc->pop(vcc, skb);
2741 else
2742 dev_kfree_skb_any(skb);
2743 atomic_inc(&vcc->stats->tx_err);
2744 return -EINVAL;
2745 }
2746 #endif
2747 spin_lock_irqsave(&he_dev->global_lock, flags);
2748
2749 tpd = __alloc_tpd(he_dev);
2750 if (tpd == NULL) {
2751 if (vcc->pop)
2752 vcc->pop(vcc, skb);
2753 else
2754 dev_kfree_skb_any(skb);
2755 atomic_inc(&vcc->stats->tx_err);
2756 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2757 return -ENOMEM;
2758 }
2759
2760 if (vcc->qos.aal == ATM_AAL5)
2761 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2762 else {
2763 char *pti_clp = (void *) (skb->data + 3);
2764 int clp, pti;
2765
2766 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2767 clp = (*pti_clp & ATM_HDR_CLP);
2768 tpd->status |= TPD_CELLTYPE(pti);
2769 if (clp)
2770 tpd->status |= TPD_CLP;
2771
2772 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2773 }
2774
2775 #ifdef USE_SCATTERGATHER
2776 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2777 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2778 tpd->iovec[slot].len = skb->len - skb->data_len;
2779 ++slot;
2780
2781 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2783
2784 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2785 tpd->vcc = vcc;
2786 tpd->skb = NULL; /* not the last fragment
2787 so dont ->push() yet */
2788 wmb();
2789
2790 __enqueue_tpd(he_dev, tpd, cid);
2791 tpd = __alloc_tpd(he_dev);
2792 if (tpd == NULL) {
2793 if (vcc->pop)
2794 vcc->pop(vcc, skb);
2795 else
2796 dev_kfree_skb_any(skb);
2797 atomic_inc(&vcc->stats->tx_err);
2798 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2799 return -ENOMEM;
2800 }
2801 tpd->status |= TPD_USERCELL;
2802 slot = 0;
2803 }
2804
2805 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2806 (void *) page_address(frag->page) + frag->page_offset,
2807 frag->size, PCI_DMA_TODEVICE);
2808 tpd->iovec[slot].len = frag->size;
2809 ++slot;
2810
2811 }
2812
2813 tpd->iovec[slot - 1].len |= TPD_LST;
2814 #else
2815 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2816 tpd->length0 = skb->len | TPD_LST;
2817 #endif
2818 tpd->status |= TPD_INT;
2819
2820 tpd->vcc = vcc;
2821 tpd->skb = skb;
2822 wmb();
2823 ATM_SKB(skb)->vcc = vcc;
2824
2825 __enqueue_tpd(he_dev, tpd, cid);
2826 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2827
2828 atomic_inc(&vcc->stats->tx);
2829
2830 return 0;
2831 }
2832
2833 static int
2834 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2835 {
2836 unsigned long flags;
2837 struct he_dev *he_dev = HE_DEV(atm_dev);
2838 struct he_ioctl_reg reg;
2839 int err = 0;
2840
2841 switch (cmd) {
2842 case HE_GET_REG:
2843 if (!capable(CAP_NET_ADMIN))
2844 return -EPERM;
2845
2846 if (copy_from_user(&reg, arg,
2847 sizeof(struct he_ioctl_reg)))
2848 return -EFAULT;
2849
2850 spin_lock_irqsave(&he_dev->global_lock, flags);
2851 switch (reg.type) {
2852 case HE_REGTYPE_PCI:
2853 if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) {
2854 err = -EINVAL;
2855 break;
2856 }
2857
2858 reg.val = he_readl(he_dev, reg.addr);
2859 break;
2860 case HE_REGTYPE_RCM:
2861 reg.val =
2862 he_readl_rcm(he_dev, reg.addr);
2863 break;
2864 case HE_REGTYPE_TCM:
2865 reg.val =
2866 he_readl_tcm(he_dev, reg.addr);
2867 break;
2868 case HE_REGTYPE_MBOX:
2869 reg.val =
2870 he_readl_mbox(he_dev, reg.addr);
2871 break;
2872 default:
2873 err = -EINVAL;
2874 break;
2875 }
2876 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2877 if (err == 0)
2878 if (copy_to_user(arg, &reg,
2879 sizeof(struct he_ioctl_reg)))
2880 return -EFAULT;
2881 break;
2882 default:
2883 #ifdef CONFIG_ATM_HE_USE_SUNI
2884 if (atm_dev->phy && atm_dev->phy->ioctl)
2885 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2886 #else /* CONFIG_ATM_HE_USE_SUNI */
2887 err = -EINVAL;
2888 #endif /* CONFIG_ATM_HE_USE_SUNI */
2889 break;
2890 }
2891
2892 return err;
2893 }
2894
2895 static void
2896 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2897 {
2898 unsigned long flags;
2899 struct he_dev *he_dev = HE_DEV(atm_dev);
2900
2901 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2902
2903 spin_lock_irqsave(&he_dev->global_lock, flags);
2904 he_writel(he_dev, val, FRAMER + (addr*4));
2905 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2906 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2907 }
2908
2909
2910 static unsigned char
2911 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2912 {
2913 unsigned long flags;
2914 struct he_dev *he_dev = HE_DEV(atm_dev);
2915 unsigned reg;
2916
2917 spin_lock_irqsave(&he_dev->global_lock, flags);
2918 reg = he_readl(he_dev, FRAMER + (addr*4));
2919 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2920
2921 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2922 return reg;
2923 }
2924
2925 static int
2926 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2927 {
2928 unsigned long flags;
2929 struct he_dev *he_dev = HE_DEV(dev);
2930 int left, i;
2931 #ifdef notdef
2932 struct he_rbrq *rbrq_tail;
2933 struct he_tpdrq *tpdrq_head;
2934 int rbpl_head, rbpl_tail;
2935 #endif
2936 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2937
2938
2939 left = *pos;
2940 if (!left--)
2941 return sprintf(page, "ATM he driver\n");
2942
2943 if (!left--)
2944 return sprintf(page, "%s%s\n\n",
2945 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2946
2947 if (!left--)
2948 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2949
2950 spin_lock_irqsave(&he_dev->global_lock, flags);
2951 mcc += he_readl(he_dev, MCC);
2952 oec += he_readl(he_dev, OEC);
2953 dcc += he_readl(he_dev, DCC);
2954 cec += he_readl(he_dev, CEC);
2955 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2956
2957 if (!left--)
2958 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2959 mcc, oec, dcc, cec);
2960
2961 if (!left--)
2962 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2963 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2964
2965 if (!left--)
2966 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2967 CONFIG_TPDRQ_SIZE);
2968
2969 if (!left--)
2970 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2971 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2972
2973 if (!left--)
2974 return sprintf(page, "tbrq_size = %d peak = %d\n",
2975 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2976
2977
2978 #ifdef notdef
2979 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2980 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2981
2982 inuse = rbpl_head - rbpl_tail;
2983 if (inuse < 0)
2984 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2985 inuse /= sizeof(struct he_rbp);
2986
2987 if (!left--)
2988 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2989 CONFIG_RBPL_SIZE, inuse);
2990 #endif
2991
2992 if (!left--)
2993 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2994
2995 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2996 if (!left--)
2997 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2998 he_dev->cs_stper[i].pcr,
2999 he_dev->cs_stper[i].inuse);
3000
3001 if (!left--)
3002 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
3003 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
3004
3005 return 0;
3006 }
3007
3008 /* eeprom routines -- see 4.7 */
3009
3010 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
3011 {
3012 u32 val = 0, tmp_read = 0;
3013 int i, j = 0;
3014 u8 byte_read = 0;
3015
3016 val = readl(he_dev->membase + HOST_CNTL);
3017 val &= 0xFFFFE0FF;
3018
3019 /* Turn on write enable */
3020 val |= 0x800;
3021 he_writel(he_dev, val, HOST_CNTL);
3022
3023 /* Send READ instruction */
3024 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
3025 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3026 udelay(EEPROM_DELAY);
3027 }
3028
3029 /* Next, we need to send the byte address to read from */
3030 for (i = 7; i >= 0; i--) {
3031 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3032 udelay(EEPROM_DELAY);
3033 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3034 udelay(EEPROM_DELAY);
3035 }
3036
3037 j = 0;
3038
3039 val &= 0xFFFFF7FF; /* Turn off write enable */
3040 he_writel(he_dev, val, HOST_CNTL);
3041
3042 /* Now, we can read data from the EEPROM by clocking it in */
3043 for (i = 7; i >= 0; i--) {
3044 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3045 udelay(EEPROM_DELAY);
3046 tmp_read = he_readl(he_dev, HOST_CNTL);
3047 byte_read |= (unsigned char)
3048 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3049 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3050 udelay(EEPROM_DELAY);
3051 }
3052
3053 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3054 udelay(EEPROM_DELAY);
3055
3056 return byte_read;
3057 }
3058
3059 MODULE_LICENSE("GPL");
3060 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3061 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3062 module_param(disable64, bool, 0);
3063 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3064 module_param(nvpibits, short, 0);
3065 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3066 module_param(nvcibits, short, 0);
3067 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3068 module_param(rx_skb_reserve, short, 0);
3069 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3070 module_param(irq_coalesce, bool, 0);
3071 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3072 module_param(sdh, bool, 0);
3073 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3074
3075 static struct pci_device_id he_pci_tbl[] = {
3076 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3077 0, 0, 0 },
3078 { 0, }
3079 };
3080
3081 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3082
3083 static struct pci_driver he_driver = {
3084 .name = "he",
3085 .probe = he_init_one,
3086 .remove = __devexit_p(he_remove_one),
3087 .id_table = he_pci_tbl,
3088 };
3089
3090 static int __init he_init(void)
3091 {
3092 return pci_register_driver(&he_driver);
3093 }
3094
3095 static void __exit he_cleanup(void)
3096 {
3097 pci_unregister_driver(&he_driver);
3098 }
3099
3100 module_init(he_init);
3101 module_exit(he_cleanup);