]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/atm/he.c
atm: [he] remove small buffer allocation/handling code
[mirror_ubuntu-zesty-kernel.git] / drivers / atm / he.c
CommitLineData
1da177e4
LT
1/*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22*/
23
24/*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
1da177e4 56#include <linux/module.h>
1da177e4
LT
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
e5695f08 69#include <linux/dma-mapping.h>
5a0e3ad6 70#include <linux/slab.h>
1da177e4
LT
71#include <asm/io.h>
72#include <asm/byteorder.h>
73#include <asm/uaccess.h>
74
75#include <linux/atmdev.h>
76#include <linux/atm.h>
77#include <linux/sonet.h>
78
1da177e4
LT
79#undef USE_SCATTERGATHER
80#undef USE_CHECKSUM_HW /* still confused about this */
1da177e4
LT
81/* #undef HE_DEBUG */
82
83#include "he.h"
84#include "suni.h"
85#include <linux/atm_he.h>
86
87#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
88
89#ifdef HE_DEBUG
90#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91#else /* !HE_DEBUG */
92#define HPRINTK(fmt,args...) do { } while (0)
93#endif /* HE_DEBUG */
94
1da177e4
LT
95/* declarations */
96
97static int he_open(struct atm_vcc *vcc);
98static void he_close(struct atm_vcc *vcc);
99static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
100static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
7d12e780 101static irqreturn_t he_irq_handler(int irq, void *dev_id);
1da177e4
LT
102static void he_tasklet(unsigned long data);
103static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
104static int he_start(struct atm_dev *dev);
105static void he_stop(struct he_dev *dev);
106static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
107static unsigned char he_phy_get(struct atm_dev *, unsigned long);
108
109static u8 read_prom_byte(struct he_dev *he_dev, int addr);
110
111/* globals */
112
113static struct he_dev *he_devs;
114static int disable64;
115static short nvpibits = -1;
116static short nvcibits = -1;
117static short rx_skb_reserve = 16;
118static int irq_coalesce = 1;
119static int sdh = 0;
120
121/* Read from EEPROM = 0000 0011b */
122static unsigned int readtab[] = {
123 CS_HIGH | CLK_HIGH,
124 CS_LOW | CLK_LOW,
125 CLK_HIGH, /* 0 */
126 CLK_LOW,
127 CLK_HIGH, /* 0 */
128 CLK_LOW,
129 CLK_HIGH, /* 0 */
130 CLK_LOW,
131 CLK_HIGH, /* 0 */
132 CLK_LOW,
133 CLK_HIGH, /* 0 */
134 CLK_LOW,
135 CLK_HIGH, /* 0 */
136 CLK_LOW | SI_HIGH,
137 CLK_HIGH | SI_HIGH, /* 1 */
138 CLK_LOW | SI_HIGH,
139 CLK_HIGH | SI_HIGH /* 1 */
140};
141
142/* Clock to read from/write to the EEPROM */
143static unsigned int clocktab[] = {
144 CLK_LOW,
145 CLK_HIGH,
146 CLK_LOW,
147 CLK_HIGH,
148 CLK_LOW,
149 CLK_HIGH,
150 CLK_LOW,
151 CLK_HIGH,
152 CLK_LOW,
153 CLK_HIGH,
154 CLK_LOW,
155 CLK_HIGH,
156 CLK_LOW,
157 CLK_HIGH,
158 CLK_LOW,
159 CLK_HIGH,
160 CLK_LOW
161};
162
163static struct atmdev_ops he_ops =
164{
165 .open = he_open,
166 .close = he_close,
167 .ioctl = he_ioctl,
168 .send = he_send,
169 .phy_put = he_phy_put,
170 .phy_get = he_phy_get,
171 .proc_read = he_proc_read,
172 .owner = THIS_MODULE
173};
174
175#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
176#define he_readl(dev, reg) readl((dev)->membase + (reg))
177
178/* section 2.12 connection memory access */
179
180static __inline__ void
181he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
182 unsigned flags)
183{
184 he_writel(he_dev, val, CON_DAT);
185 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
186 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
187 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
188}
189
190#define he_writel_rcm(dev, val, reg) \
191 he_writel_internal(dev, val, reg, CON_CTL_RCM)
192
193#define he_writel_tcm(dev, val, reg) \
194 he_writel_internal(dev, val, reg, CON_CTL_TCM)
195
196#define he_writel_mbox(dev, val, reg) \
197 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
198
199static unsigned
200he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
201{
202 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
203 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
204 return he_readl(he_dev, CON_DAT);
205}
206
207#define he_readl_rcm(dev, reg) \
208 he_readl_internal(dev, reg, CON_CTL_RCM)
209
210#define he_readl_tcm(dev, reg) \
211 he_readl_internal(dev, reg, CON_CTL_TCM)
212
213#define he_readl_mbox(dev, reg) \
214 he_readl_internal(dev, reg, CON_CTL_MBOX)
215
216
217/* figure 2.2 connection id */
218
219#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
220
221/* 2.5.1 per connection transmit state registers */
222
223#define he_writel_tsr0(dev, val, cid) \
224 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
225#define he_readl_tsr0(dev, cid) \
226 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
227
228#define he_writel_tsr1(dev, val, cid) \
229 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
230
231#define he_writel_tsr2(dev, val, cid) \
232 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
233
234#define he_writel_tsr3(dev, val, cid) \
235 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
236
237#define he_writel_tsr4(dev, val, cid) \
238 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
239
240 /* from page 2-20
241 *
242 * NOTE While the transmit connection is active, bits 23 through 0
243 * of this register must not be written by the host. Byte
244 * enables should be used during normal operation when writing
245 * the most significant byte.
246 */
247
248#define he_writel_tsr4_upper(dev, val, cid) \
249 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
250 CON_CTL_TCM \
251 | CON_BYTE_DISABLE_2 \
252 | CON_BYTE_DISABLE_1 \
253 | CON_BYTE_DISABLE_0)
254
255#define he_readl_tsr4(dev, cid) \
256 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
257
258#define he_writel_tsr5(dev, val, cid) \
259 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
260
261#define he_writel_tsr6(dev, val, cid) \
262 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
263
264#define he_writel_tsr7(dev, val, cid) \
265 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
266
267
268#define he_writel_tsr8(dev, val, cid) \
269 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
270
271#define he_writel_tsr9(dev, val, cid) \
272 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
273
274#define he_writel_tsr10(dev, val, cid) \
275 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
276
277#define he_writel_tsr11(dev, val, cid) \
278 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
279
280
281#define he_writel_tsr12(dev, val, cid) \
282 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
283
284#define he_writel_tsr13(dev, val, cid) \
285 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
286
287
288#define he_writel_tsr14(dev, val, cid) \
289 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
290
291#define he_writel_tsr14_upper(dev, val, cid) \
292 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
293 CON_CTL_TCM \
294 | CON_BYTE_DISABLE_2 \
295 | CON_BYTE_DISABLE_1 \
296 | CON_BYTE_DISABLE_0)
297
298/* 2.7.1 per connection receive state registers */
299
300#define he_writel_rsr0(dev, val, cid) \
301 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
302#define he_readl_rsr0(dev, cid) \
303 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
304
305#define he_writel_rsr1(dev, val, cid) \
306 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
307
308#define he_writel_rsr2(dev, val, cid) \
309 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
310
311#define he_writel_rsr3(dev, val, cid) \
312 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
313
314#define he_writel_rsr4(dev, val, cid) \
315 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
316
317#define he_writel_rsr5(dev, val, cid) \
318 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
319
320#define he_writel_rsr6(dev, val, cid) \
321 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
322
323#define he_writel_rsr7(dev, val, cid) \
324 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
325
326static __inline__ struct atm_vcc*
327__find_vcc(struct he_dev *he_dev, unsigned cid)
328{
329 struct hlist_head *head;
330 struct atm_vcc *vcc;
331 struct hlist_node *node;
332 struct sock *s;
333 short vpi;
334 int vci;
335
336 vpi = cid >> he_dev->vcibits;
337 vci = cid & ((1 << he_dev->vcibits) - 1);
338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340 sk_for_each(s, node, head) {
341 vcc = atm_sk(s);
342 if (vcc->dev == he_dev->atm_dev &&
343 vcc->vci == vci && vcc->vpi == vpi &&
344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 return vcc;
346 }
347 }
348 return NULL;
349}
350
351static int __devinit
352he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
353{
354 struct atm_dev *atm_dev = NULL;
355 struct he_dev *he_dev = NULL;
356 int err = 0;
357
900092a4 358 printk(KERN_INFO "ATM he driver\n");
1da177e4
LT
359
360 if (pci_enable_device(pci_dev))
361 return -EIO;
284901a9 362 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
1da177e4
LT
363 printk(KERN_WARNING "he: no suitable dma available\n");
364 err = -EIO;
365 goto init_one_failure;
366 }
367
368 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
369 if (!atm_dev) {
370 err = -ENODEV;
371 goto init_one_failure;
372 }
373 pci_set_drvdata(pci_dev, atm_dev);
374
0c1cca1d 375 he_dev = kzalloc(sizeof(struct he_dev),
1da177e4
LT
376 GFP_KERNEL);
377 if (!he_dev) {
378 err = -ENOMEM;
379 goto init_one_failure;
380 }
1da177e4
LT
381 he_dev->pci_dev = pci_dev;
382 he_dev->atm_dev = atm_dev;
383 he_dev->atm_dev->dev_data = he_dev;
384 atm_dev->dev_data = he_dev;
385 he_dev->number = atm_dev->number;
8a8037ac 386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
8a8037ac 387 spin_lock_init(&he_dev->global_lock);
388
1da177e4
LT
389 if (he_start(atm_dev)) {
390 he_stop(he_dev);
391 err = -ENODEV;
392 goto init_one_failure;
393 }
394 he_dev->next = NULL;
395 if (he_devs)
396 he_dev->next = he_devs;
397 he_devs = he_dev;
398 return 0;
399
400init_one_failure:
401 if (atm_dev)
402 atm_dev_deregister(atm_dev);
a2c1aa54 403 kfree(he_dev);
1da177e4
LT
404 pci_disable_device(pci_dev);
405 return err;
406}
407
408static void __devexit
409he_remove_one (struct pci_dev *pci_dev)
410{
411 struct atm_dev *atm_dev;
412 struct he_dev *he_dev;
413
414 atm_dev = pci_get_drvdata(pci_dev);
415 he_dev = HE_DEV(atm_dev);
416
417 /* need to remove from he_devs */
418
419 he_stop(he_dev);
420 atm_dev_deregister(atm_dev);
421 kfree(he_dev);
422
423 pci_set_drvdata(pci_dev, NULL);
424 pci_disable_device(pci_dev);
425}
426
427
428static unsigned
429rate_to_atmf(unsigned rate) /* cps to atm forum format */
430{
431#define NONZERO (1 << 14)
432
433 unsigned exp = 0;
434
435 if (rate == 0)
436 return 0;
437
438 rate <<= 9;
439 while (rate > 0x3ff) {
440 ++exp;
441 rate >>= 1;
442 }
443
444 return (NONZERO | (exp << 9) | (rate & 0x1ff));
445}
446
5b7c714e 447static void __devinit
1da177e4
LT
448he_init_rx_lbfp0(struct he_dev *he_dev)
449{
450 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
451 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
452 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
453 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
454
455 lbufd_index = 0;
456 lbm_offset = he_readl(he_dev, RCMLBM_BA);
457
458 he_writel(he_dev, lbufd_index, RLBF0_H);
459
460 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
461 lbufd_index += 2;
462 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
463
464 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
465 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
466
467 if (++lbuf_count == lbufs_per_row) {
468 lbuf_count = 0;
469 row_offset += he_dev->bytes_per_row;
470 }
471 lbm_offset += 4;
472 }
473
474 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
475 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
476}
477
5b7c714e 478static void __devinit
1da177e4
LT
479he_init_rx_lbfp1(struct he_dev *he_dev)
480{
481 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
482 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
483 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
484 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
485
486 lbufd_index = 1;
487 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
488
489 he_writel(he_dev, lbufd_index, RLBF1_H);
490
491 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
492 lbufd_index += 2;
493 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
494
495 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
496 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
497
498 if (++lbuf_count == lbufs_per_row) {
499 lbuf_count = 0;
500 row_offset += he_dev->bytes_per_row;
501 }
502 lbm_offset += 4;
503 }
504
505 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
506 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
507}
508
5b7c714e 509static void __devinit
1da177e4
LT
510he_init_tx_lbfp(struct he_dev *he_dev)
511{
512 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
513 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
514 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
515 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
516
517 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
518 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
519
520 he_writel(he_dev, lbufd_index, TLBF_H);
521
522 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
523 lbufd_index += 1;
524 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
525
526 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
527 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
528
529 if (++lbuf_count == lbufs_per_row) {
530 lbuf_count = 0;
531 row_offset += he_dev->bytes_per_row;
532 }
533 lbm_offset += 2;
534 }
535
536 he_writel(he_dev, lbufd_index - 1, TLBF_T);
537}
538
5b7c714e 539static int __devinit
1da177e4
LT
540he_init_tpdrq(struct he_dev *he_dev)
541{
542 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
543 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
544 if (he_dev->tpdrq_base == NULL) {
545 hprintk("failed to alloc tpdrq\n");
546 return -ENOMEM;
547 }
548 memset(he_dev->tpdrq_base, 0,
549 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
550
551 he_dev->tpdrq_tail = he_dev->tpdrq_base;
552 he_dev->tpdrq_head = he_dev->tpdrq_base;
553
554 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
555 he_writel(he_dev, 0, TPDRQ_T);
556 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
557
558 return 0;
559}
560
5b7c714e 561static void __devinit
1da177e4
LT
562he_init_cs_block(struct he_dev *he_dev)
563{
564 unsigned clock, rate, delta;
565 int reg;
566
567 /* 5.1.7 cs block initialization */
568
569 for (reg = 0; reg < 0x20; ++reg)
570 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
571
572 /* rate grid timer reload values */
573
574 clock = he_is622(he_dev) ? 66667000 : 50000000;
575 rate = he_dev->atm_dev->link_rate;
576 delta = rate / 16 / 2;
577
578 for (reg = 0; reg < 0x10; ++reg) {
579 /* 2.4 internal transmit function
580 *
581 * we initialize the first row in the rate grid.
582 * values are period (in clock cycles) of timer
583 */
584 unsigned period = clock / rate;
585
586 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
587 rate -= delta;
588 }
589
590 if (he_is622(he_dev)) {
591 /* table 5.2 (4 cells per lbuf) */
592 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
593 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
594 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
595 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
596 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
597
598 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
599 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
600 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
601 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
602 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
603 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
604 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
605
606 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
607
608 /* table 5.8 */
609 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
610 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
611 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
612 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
613 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
614 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
615
616 /* table 5.9 */
617 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
618 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
619 } else {
620 /* table 5.1 (4 cells per lbuf) */
621 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
622 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
623 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
624 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
625 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
626
627 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
628 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
629 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
630 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
631 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
632 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
633 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
634
635 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
636
637 /* table 5.8 */
638 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
639 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
640 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
641 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
642 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
643 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
644
645 /* table 5.9 */
646 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
647 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
648 }
649
650 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
651
652 for (reg = 0; reg < 0x8; ++reg)
653 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
654
655}
656
5b7c714e 657static int __devinit
1da177e4
LT
658he_init_cs_block_rcm(struct he_dev *he_dev)
659{
660 unsigned (*rategrid)[16][16];
661 unsigned rate, delta;
662 int i, j, reg;
663
664 unsigned rate_atmf, exp, man;
665 unsigned long long rate_cps;
666 int mult, buf, buf_limit = 4;
667
668 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
669 if (!rategrid)
670 return -ENOMEM;
671
672 /* initialize rate grid group table */
673
674 for (reg = 0x0; reg < 0xff; ++reg)
675 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
676
677 /* initialize rate controller groups */
678
679 for (reg = 0x100; reg < 0x1ff; ++reg)
680 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
681
682 /* initialize tNrm lookup table */
683
684 /* the manual makes reference to a routine in a sample driver
685 for proper configuration; fortunately, we only need this
686 in order to support abr connection */
687
688 /* initialize rate to group table */
689
690 rate = he_dev->atm_dev->link_rate;
691 delta = rate / 32;
692
693 /*
694 * 2.4 transmit internal functions
695 *
696 * we construct a copy of the rate grid used by the scheduler
697 * in order to construct the rate to group table below
698 */
699
700 for (j = 0; j < 16; j++) {
701 (*rategrid)[0][j] = rate;
702 rate -= delta;
703 }
704
705 for (i = 1; i < 16; i++)
706 for (j = 0; j < 16; j++)
707 if (i > 14)
708 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
709 else
710 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
711
712 /*
713 * 2.4 transmit internal function
714 *
715 * this table maps the upper 5 bits of exponent and mantissa
716 * of the atm forum representation of the rate into an index
717 * on rate grid
718 */
719
720 rate_atmf = 0;
721 while (rate_atmf < 0x400) {
722 man = (rate_atmf & 0x1f) << 4;
723 exp = rate_atmf >> 5;
724
725 /*
726 instead of '/ 512', use '>> 9' to prevent a call
727 to divdu3 on x86 platforms
728 */
729 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
730
731 if (rate_cps < 10)
732 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
733
734 for (i = 255; i > 0; i--)
735 if ((*rategrid)[i/16][i%16] >= rate_cps)
736 break; /* pick nearest rate instead? */
737
738 /*
739 * each table entry is 16 bits: (rate grid index (8 bits)
740 * and a buffer limit (8 bits)
741 * there are two table entries in each 32-bit register
742 */
743
744#ifdef notdef
745 buf = rate_cps * he_dev->tx_numbuffs /
746 (he_dev->atm_dev->link_rate * 2);
747#else
748 /* this is pretty, but avoids _divdu3 and is mostly correct */
749 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
750 if (rate_cps > (272 * mult))
751 buf = 4;
752 else if (rate_cps > (204 * mult))
753 buf = 3;
754 else if (rate_cps > (136 * mult))
755 buf = 2;
756 else if (rate_cps > (68 * mult))
757 buf = 1;
758 else
759 buf = 0;
760#endif
761 if (buf > buf_limit)
762 buf = buf_limit;
763 reg = (reg << 16) | ((i << 8) | buf);
764
765#define RTGTBL_OFFSET 0x400
766
767 if (rate_atmf & 0x1)
768 he_writel_rcm(he_dev, reg,
769 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
770
771 ++rate_atmf;
772 }
773
774 kfree(rategrid);
775 return 0;
776}
777
5b7c714e 778static int __devinit
1da177e4
LT
779he_init_group(struct he_dev *he_dev, int group)
780{
781 int i;
782
1d927870
C
783 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
784 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
785 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
786 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
787 G0_RBPS_BS + (group * 32));
1da177e4
LT
788
789 /* large buffer pool */
1da177e4
LT
790 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
791 CONFIG_RBPL_BUFSIZE, 8, 0);
792 if (he_dev->rbpl_pool == NULL) {
793 hprintk("unable to create rbpl pool\n");
1d927870 794 return -ENOMEM;
1da177e4 795 }
1da177e4
LT
796
797 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
798 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
799 if (he_dev->rbpl_base == NULL) {
5d5baa92 800 hprintk("failed to alloc rbpl_base\n");
801 goto out_destroy_rbpl_pool;
1da177e4
LT
802 }
803 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
804 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
5d5baa92 805 if (he_dev->rbpl_virt == NULL) {
806 hprintk("failed to alloc rbpl_virt\n");
807 goto out_free_rbpl_base;
808 }
1da177e4
LT
809
810 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
811 dma_addr_t dma_handle;
812 void *cpuaddr;
813
441e143e 814 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
1da177e4 815 if (cpuaddr == NULL)
5d5baa92 816 goto out_free_rbpl_virt;
1da177e4
LT
817
818 he_dev->rbpl_virt[i].virt = cpuaddr;
819 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
820 he_dev->rbpl_base[i].phys = dma_handle;
821 }
822 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
823
824 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
825 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
826 G0_RBPL_T + (group * 32));
827 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
828 G0_RBPL_BS + (group * 32));
829 he_writel(he_dev,
830 RBP_THRESH(CONFIG_RBPL_THRESH) |
831 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
832 RBP_INT_ENB,
833 G0_RBPL_QI + (group * 32));
834
835 /* rx buffer ready queue */
836
837 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
838 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
839 if (he_dev->rbrq_base == NULL) {
840 hprintk("failed to allocate rbrq\n");
5d5baa92 841 goto out_free_rbpl_virt;
1da177e4
LT
842 }
843 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
844
845 he_dev->rbrq_head = he_dev->rbrq_base;
846 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
847 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
848 he_writel(he_dev,
849 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
850 G0_RBRQ_Q + (group * 16));
851 if (irq_coalesce) {
852 hprintk("coalescing interrupts\n");
853 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
854 G0_RBRQ_I + (group * 16));
855 } else
856 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
857 G0_RBRQ_I + (group * 16));
858
859 /* tx buffer ready queue */
860
861 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
862 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
863 if (he_dev->tbrq_base == NULL) {
864 hprintk("failed to allocate tbrq\n");
5d5baa92 865 goto out_free_rbpq_base;
1da177e4
LT
866 }
867 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
868
869 he_dev->tbrq_head = he_dev->tbrq_base;
870
871 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
872 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
873 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
874 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
875
876 return 0;
5d5baa92 877
878out_free_rbpq_base:
879 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
880 sizeof(struct he_rbrq), he_dev->rbrq_base,
881 he_dev->rbrq_phys);
882 i = CONFIG_RBPL_SIZE;
883out_free_rbpl_virt:
1b66c1ef
JL
884 while (i--)
885 pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
886 he_dev->rbpl_base[i].phys);
5d5baa92 887 kfree(he_dev->rbpl_virt);
888
889out_free_rbpl_base:
890 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
891 sizeof(struct he_rbp), he_dev->rbpl_base,
892 he_dev->rbpl_phys);
893out_destroy_rbpl_pool:
894 pci_pool_destroy(he_dev->rbpl_pool);
895
5d5baa92 896 return -ENOMEM;
1da177e4
LT
897}
898
5b7c714e 899static int __devinit
1da177e4
LT
900he_init_irq(struct he_dev *he_dev)
901{
902 int i;
903
904 /* 2.9.3.5 tail offset for each interrupt queue is located after the
905 end of the interrupt queue */
906
907 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
908 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
909 if (he_dev->irq_base == NULL) {
910 hprintk("failed to allocate irq\n");
911 return -ENOMEM;
912 }
913 he_dev->irq_tailoffset = (unsigned *)
914 &he_dev->irq_base[CONFIG_IRQ_SIZE];
915 *he_dev->irq_tailoffset = 0;
916 he_dev->irq_head = he_dev->irq_base;
917 he_dev->irq_tail = he_dev->irq_base;
918
919 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
920 he_dev->irq_base[i].isw = ITYPE_INVALID;
921
922 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
923 he_writel(he_dev,
924 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
925 IRQ0_HEAD);
926 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
927 he_writel(he_dev, 0x0, IRQ0_DATA);
928
929 he_writel(he_dev, 0x0, IRQ1_BASE);
930 he_writel(he_dev, 0x0, IRQ1_HEAD);
931 he_writel(he_dev, 0x0, IRQ1_CNTL);
932 he_writel(he_dev, 0x0, IRQ1_DATA);
933
934 he_writel(he_dev, 0x0, IRQ2_BASE);
935 he_writel(he_dev, 0x0, IRQ2_HEAD);
936 he_writel(he_dev, 0x0, IRQ2_CNTL);
937 he_writel(he_dev, 0x0, IRQ2_DATA);
938
939 he_writel(he_dev, 0x0, IRQ3_BASE);
940 he_writel(he_dev, 0x0, IRQ3_HEAD);
941 he_writel(he_dev, 0x0, IRQ3_CNTL);
942 he_writel(he_dev, 0x0, IRQ3_DATA);
943
944 /* 2.9.3.2 interrupt queue mapping registers */
945
946 he_writel(he_dev, 0x0, GRP_10_MAP);
947 he_writel(he_dev, 0x0, GRP_32_MAP);
948 he_writel(he_dev, 0x0, GRP_54_MAP);
949 he_writel(he_dev, 0x0, GRP_76_MAP);
950
dace1453 951 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1da177e4
LT
952 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
953 return -EINVAL;
954 }
955
956 he_dev->irq = he_dev->pci_dev->irq;
957
958 return 0;
959}
960
d17f0865 961static int __devinit
1da177e4
LT
962he_start(struct atm_dev *dev)
963{
964 struct he_dev *he_dev;
965 struct pci_dev *pci_dev;
966 unsigned long membase;
967
968 u16 command;
969 u32 gen_cntl_0, host_cntl, lb_swap;
970 u8 cache_size, timer;
971
972 unsigned err;
973 unsigned int status, reg;
974 int i, group;
975
976 he_dev = HE_DEV(dev);
977 pci_dev = he_dev->pci_dev;
978
979 membase = pci_resource_start(pci_dev, 0);
980 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
981
982 /*
983 * pci bus controller initialization
984 */
985
986 /* 4.3 pci bus controller-specific initialization */
987 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
988 hprintk("can't read GEN_CNTL_0\n");
989 return -EINVAL;
990 }
991 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
992 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
993 hprintk("can't write GEN_CNTL_0.\n");
994 return -EINVAL;
995 }
996
997 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
998 hprintk("can't read PCI_COMMAND.\n");
999 return -EINVAL;
1000 }
1001
1002 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1003 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1004 hprintk("can't enable memory.\n");
1005 return -EINVAL;
1006 }
1007
1008 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1009 hprintk("can't read cache line size?\n");
1010 return -EINVAL;
1011 }
1012
1013 if (cache_size < 16) {
1014 cache_size = 16;
1015 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1016 hprintk("can't set cache line size to %d\n", cache_size);
1017 }
1018
1019 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1020 hprintk("can't read latency timer?\n");
1021 return -EINVAL;
1022 }
1023
1024 /* from table 3.9
1025 *
1026 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1027 *
1028 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1029 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1030 *
1031 */
1032#define LAT_TIMER 209
1033 if (timer < LAT_TIMER) {
1034 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1035 timer = LAT_TIMER;
1036 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1037 hprintk("can't set latency timer to %d\n", timer);
1038 }
1039
1040 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1041 hprintk("can't set up page mapping\n");
1042 return -EINVAL;
1043 }
1044
1045 /* 4.4 card reset */
1046 he_writel(he_dev, 0x0, RESET_CNTL);
1047 he_writel(he_dev, 0xff, RESET_CNTL);
1048
1049 udelay(16*1000); /* 16 ms */
1050 status = he_readl(he_dev, RESET_CNTL);
1051 if ((status & BOARD_RST_STATUS) == 0) {
1052 hprintk("reset failed\n");
1053 return -EINVAL;
1054 }
1055
1056 /* 4.5 set bus width */
1057 host_cntl = he_readl(he_dev, HOST_CNTL);
1058 if (host_cntl & PCI_BUS_SIZE64)
1059 gen_cntl_0 |= ENBL_64;
1060 else
1061 gen_cntl_0 &= ~ENBL_64;
1062
1063 if (disable64 == 1) {
1064 hprintk("disabling 64-bit pci bus transfers\n");
1065 gen_cntl_0 &= ~ENBL_64;
1066 }
1067
1068 if (gen_cntl_0 & ENBL_64)
1069 hprintk("64-bit transfers enabled\n");
1070
1071 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1072
1073 /* 4.7 read prom contents */
1074 for (i = 0; i < PROD_ID_LEN; ++i)
1075 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1076
1077 he_dev->media = read_prom_byte(he_dev, MEDIA);
1078
1079 for (i = 0; i < 6; ++i)
1080 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1081
1082 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1083 he_dev->prod_id,
1084 he_dev->media & 0x40 ? "SM" : "MM",
1085 dev->esi[0],
1086 dev->esi[1],
1087 dev->esi[2],
1088 dev->esi[3],
1089 dev->esi[4],
1090 dev->esi[5]);
1091 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1092 ATM_OC12_PCR : ATM_OC3_PCR;
1093
1094 /* 4.6 set host endianess */
1095 lb_swap = he_readl(he_dev, LB_SWAP);
1096 if (he_is622(he_dev))
1097 lb_swap &= ~XFER_SIZE; /* 4 cells */
1098 else
1099 lb_swap |= XFER_SIZE; /* 8 cells */
1100#ifdef __BIG_ENDIAN
1101 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1102#else
1103 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1104 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1105#endif /* __BIG_ENDIAN */
1106 he_writel(he_dev, lb_swap, LB_SWAP);
1107
1108 /* 4.8 sdram controller initialization */
1109 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1110
1111 /* 4.9 initialize rnum value */
1112 lb_swap |= SWAP_RNUM_MAX(0xf);
1113 he_writel(he_dev, lb_swap, LB_SWAP);
1114
1115 /* 4.10 initialize the interrupt queues */
1116 if ((err = he_init_irq(he_dev)) != 0)
1117 return err;
1118
1da177e4
LT
1119 /* 4.11 enable pci bus controller state machines */
1120 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1121 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1122 he_writel(he_dev, host_cntl, HOST_CNTL);
1123
1124 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1125 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1126
1127 /*
1128 * atm network controller initialization
1129 */
1130
1131 /* 5.1.1 generic configuration state */
1132
1133 /*
1134 * local (cell) buffer memory map
1135 *
1136 * HE155 HE622
1137 *
1138 * 0 ____________1023 bytes 0 _______________________2047 bytes
1139 * | | | | |
1140 * | utility | | rx0 | |
1141 * 5|____________| 255|___________________| u |
1142 * 6| | 256| | t |
1143 * | | | | i |
1144 * | rx0 | row | tx | l |
1145 * | | | | i |
1146 * | | 767|___________________| t |
1147 * 517|____________| 768| | y |
1148 * row 518| | | rx1 | |
1149 * | | 1023|___________________|___|
1150 * | |
1151 * | tx |
1152 * | |
1153 * | |
1154 * 1535|____________|
1155 * 1536| |
1156 * | rx1 |
1157 * 2047|____________|
1158 *
1159 */
1160
1161 /* total 4096 connections */
1162 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1163 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1164
1165 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1166 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1167 return -ENODEV;
1168 }
1169
1170 if (nvpibits != -1) {
1171 he_dev->vpibits = nvpibits;
1172 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1173 }
1174
1175 if (nvcibits != -1) {
1176 he_dev->vcibits = nvcibits;
1177 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1178 }
1179
1180
1181 if (he_is622(he_dev)) {
1182 he_dev->cells_per_row = 40;
1183 he_dev->bytes_per_row = 2048;
1184 he_dev->r0_numrows = 256;
1185 he_dev->tx_numrows = 512;
1186 he_dev->r1_numrows = 256;
1187 he_dev->r0_startrow = 0;
1188 he_dev->tx_startrow = 256;
1189 he_dev->r1_startrow = 768;
1190 } else {
1191 he_dev->cells_per_row = 20;
1192 he_dev->bytes_per_row = 1024;
1193 he_dev->r0_numrows = 512;
1194 he_dev->tx_numrows = 1018;
1195 he_dev->r1_numrows = 512;
1196 he_dev->r0_startrow = 6;
1197 he_dev->tx_startrow = 518;
1198 he_dev->r1_startrow = 1536;
1199 }
1200
1201 he_dev->cells_per_lbuf = 4;
1202 he_dev->buffer_limit = 4;
1203 he_dev->r0_numbuffs = he_dev->r0_numrows *
1204 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1205 if (he_dev->r0_numbuffs > 2560)
1206 he_dev->r0_numbuffs = 2560;
1207
1208 he_dev->r1_numbuffs = he_dev->r1_numrows *
1209 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1210 if (he_dev->r1_numbuffs > 2560)
1211 he_dev->r1_numbuffs = 2560;
1212
1213 he_dev->tx_numbuffs = he_dev->tx_numrows *
1214 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1215 if (he_dev->tx_numbuffs > 5120)
1216 he_dev->tx_numbuffs = 5120;
1217
1218 /* 5.1.2 configure hardware dependent registers */
1219
1220 he_writel(he_dev,
1221 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1222 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1223 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1224 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1225 LBARB);
1226
1227 he_writel(he_dev, BANK_ON |
1228 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1229 SDRAMCON);
1230
1231 he_writel(he_dev,
1232 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1233 RM_RW_WAIT(1), RCMCONFIG);
1234 he_writel(he_dev,
1235 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1236 TM_RW_WAIT(1), TCMCONFIG);
1237
1238 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1239
1240 he_writel(he_dev,
1241 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1242 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1243 RX_VALVP(he_dev->vpibits) |
1244 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1245
1246 he_writel(he_dev, DRF_THRESH(0x20) |
1247 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1248 TX_VCI_MASK(he_dev->vcibits) |
1249 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1250
1251 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1252
1253 he_writel(he_dev, PHY_INT_ENB |
1254 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1255 RH_CONFIG);
1256
1257 /* 5.1.3 initialize connection memory */
1258
1259 for (i = 0; i < TCM_MEM_SIZE; ++i)
1260 he_writel_tcm(he_dev, 0, i);
1261
1262 for (i = 0; i < RCM_MEM_SIZE; ++i)
1263 he_writel_rcm(he_dev, 0, i);
1264
1265 /*
1266 * transmit connection memory map
1267 *
1268 * tx memory
1269 * 0x0 ___________________
1270 * | |
1271 * | |
1272 * | TSRa |
1273 * | |
1274 * | |
1275 * 0x8000|___________________|
1276 * | |
1277 * | TSRb |
1278 * 0xc000|___________________|
1279 * | |
1280 * | TSRc |
1281 * 0xe000|___________________|
1282 * | TSRd |
1283 * 0xf000|___________________|
1284 * | tmABR |
1285 * 0x10000|___________________|
1286 * | |
1287 * | tmTPD |
1288 * |___________________|
1289 * | |
1290 * ....
1291 * 0x1ffff|___________________|
1292 *
1293 *
1294 */
1295
1296 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1297 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1298 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1299 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1300 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1301
1302
1303 /*
1304 * receive connection memory map
1305 *
1306 * 0x0 ___________________
1307 * | |
1308 * | |
1309 * | RSRa |
1310 * | |
1311 * | |
1312 * 0x8000|___________________|
1313 * | |
1314 * | rx0/1 |
1315 * | LBM | link lists of local
1316 * | tx | buffer memory
1317 * | |
1318 * 0xd000|___________________|
1319 * | |
1320 * | rmABR |
1321 * 0xe000|___________________|
1322 * | |
1323 * | RSRb |
1324 * |___________________|
1325 * | |
1326 * ....
1327 * 0xffff|___________________|
1328 */
1329
1330 he_writel(he_dev, 0x08000, RCMLBM_BA);
1331 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1332 he_writel(he_dev, 0x0d800, RCMABR_BA);
1333
1334 /* 5.1.4 initialize local buffer free pools linked lists */
1335
1336 he_init_rx_lbfp0(he_dev);
1337 he_init_rx_lbfp1(he_dev);
1338
1339 he_writel(he_dev, 0x0, RLBC_H);
1340 he_writel(he_dev, 0x0, RLBC_T);
1341 he_writel(he_dev, 0x0, RLBC_H2);
1342
1343 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1344 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1345
1346 he_init_tx_lbfp(he_dev);
1347
1348 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1349
1350 /* 5.1.5 initialize intermediate receive queues */
1351
1352 if (he_is622(he_dev)) {
1353 he_writel(he_dev, 0x000f, G0_INMQ_S);
1354 he_writel(he_dev, 0x200f, G0_INMQ_L);
1355
1356 he_writel(he_dev, 0x001f, G1_INMQ_S);
1357 he_writel(he_dev, 0x201f, G1_INMQ_L);
1358
1359 he_writel(he_dev, 0x002f, G2_INMQ_S);
1360 he_writel(he_dev, 0x202f, G2_INMQ_L);
1361
1362 he_writel(he_dev, 0x003f, G3_INMQ_S);
1363 he_writel(he_dev, 0x203f, G3_INMQ_L);
1364
1365 he_writel(he_dev, 0x004f, G4_INMQ_S);
1366 he_writel(he_dev, 0x204f, G4_INMQ_L);
1367
1368 he_writel(he_dev, 0x005f, G5_INMQ_S);
1369 he_writel(he_dev, 0x205f, G5_INMQ_L);
1370
1371 he_writel(he_dev, 0x006f, G6_INMQ_S);
1372 he_writel(he_dev, 0x206f, G6_INMQ_L);
1373
1374 he_writel(he_dev, 0x007f, G7_INMQ_S);
1375 he_writel(he_dev, 0x207f, G7_INMQ_L);
1376 } else {
1377 he_writel(he_dev, 0x0000, G0_INMQ_S);
1378 he_writel(he_dev, 0x0008, G0_INMQ_L);
1379
1380 he_writel(he_dev, 0x0001, G1_INMQ_S);
1381 he_writel(he_dev, 0x0009, G1_INMQ_L);
1382
1383 he_writel(he_dev, 0x0002, G2_INMQ_S);
1384 he_writel(he_dev, 0x000a, G2_INMQ_L);
1385
1386 he_writel(he_dev, 0x0003, G3_INMQ_S);
1387 he_writel(he_dev, 0x000b, G3_INMQ_L);
1388
1389 he_writel(he_dev, 0x0004, G4_INMQ_S);
1390 he_writel(he_dev, 0x000c, G4_INMQ_L);
1391
1392 he_writel(he_dev, 0x0005, G5_INMQ_S);
1393 he_writel(he_dev, 0x000d, G5_INMQ_L);
1394
1395 he_writel(he_dev, 0x0006, G6_INMQ_S);
1396 he_writel(he_dev, 0x000e, G6_INMQ_L);
1397
1398 he_writel(he_dev, 0x0007, G7_INMQ_S);
1399 he_writel(he_dev, 0x000f, G7_INMQ_L);
1400 }
1401
1402 /* 5.1.6 application tunable parameters */
1403
1404 he_writel(he_dev, 0x0, MCC);
1405 he_writel(he_dev, 0x0, OEC);
1406 he_writel(he_dev, 0x0, DCC);
1407 he_writel(he_dev, 0x0, CEC);
1408
1409 /* 5.1.7 cs block initialization */
1410
1411 he_init_cs_block(he_dev);
1412
1413 /* 5.1.8 cs block connection memory initialization */
1414
1415 if (he_init_cs_block_rcm(he_dev) < 0)
1416 return -ENOMEM;
1417
1418 /* 5.1.10 initialize host structures */
1419
1420 he_init_tpdrq(he_dev);
1421
1da177e4
LT
1422 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1423 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1424 if (he_dev->tpd_pool == NULL) {
1425 hprintk("unable to create tpd pci_pool\n");
1426 return -ENOMEM;
1427 }
1428
1429 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1da177e4
LT
1430
1431 if (he_init_group(he_dev, 0) != 0)
1432 return -ENOMEM;
1433
1434 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1435 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1436 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1437 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1438 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1439 G0_RBPS_BS + (group * 32));
1440
1441 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1442 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1443 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1444 G0_RBPL_QI + (group * 32));
1445 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1446
1447 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1448 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1449 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1450 G0_RBRQ_Q + (group * 16));
1451 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1452
1453 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1454 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1455 he_writel(he_dev, TBRQ_THRESH(0x1),
1456 G0_TBRQ_THRESH + (group * 16));
1457 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1458 }
1459
1460 /* host status page */
1461
1462 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1463 sizeof(struct he_hsp), &he_dev->hsp_phys);
1464 if (he_dev->hsp == NULL) {
1465 hprintk("failed to allocate host status page\n");
1466 return -ENOMEM;
1467 }
1468 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1469 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1470
1471 /* initialize framer */
1472
1473#ifdef CONFIG_ATM_HE_USE_SUNI
059e3779
CW
1474 if (he_isMM(he_dev))
1475 suni_init(he_dev->atm_dev);
1da177e4
LT
1476 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1477 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1478#endif /* CONFIG_ATM_HE_USE_SUNI */
1479
1480 if (sdh) {
1481 /* this really should be in suni.c but for now... */
1482 int val;
1483
1484 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1485 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1486 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
65c3e471 1487 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1da177e4
LT
1488 }
1489
1490 /* 5.1.12 enable transmit and receive */
1491
1492 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1493 reg |= TX_ENABLE|ER_ENABLE;
1494 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1495
1496 reg = he_readl(he_dev, RC_CONFIG);
1497 reg |= RX_ENABLE;
1498 he_writel(he_dev, reg, RC_CONFIG);
1499
1500 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1501 he_dev->cs_stper[i].inuse = 0;
1502 he_dev->cs_stper[i].pcr = -1;
1503 }
1504 he_dev->total_bw = 0;
1505
1506
1507 /* atm linux initialization */
1508
1509 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1510 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1511
1512 he_dev->irq_peak = 0;
1513 he_dev->rbrq_peak = 0;
1514 he_dev->rbpl_peak = 0;
1515 he_dev->tbrq_peak = 0;
1516
1517 HPRINTK("hell bent for leather!\n");
1518
1519 return 0;
1520}
1521
1522static void
1523he_stop(struct he_dev *he_dev)
1524{
1525 u16 command;
1526 u32 gen_cntl_0, reg;
1527 struct pci_dev *pci_dev;
1528
1529 pci_dev = he_dev->pci_dev;
1530
1531 /* disable interrupts */
1532
1533 if (he_dev->membase) {
1534 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1535 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1536 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1537
1da177e4 1538 tasklet_disable(&he_dev->tasklet);
1da177e4
LT
1539
1540 /* disable recv and transmit */
1541
1542 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1543 reg &= ~(TX_ENABLE|ER_ENABLE);
1544 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1545
1546 reg = he_readl(he_dev, RC_CONFIG);
1547 reg &= ~(RX_ENABLE);
1548 he_writel(he_dev, reg, RC_CONFIG);
1549 }
1550
1551#ifdef CONFIG_ATM_HE_USE_SUNI
1552 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1553 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1554#endif /* CONFIG_ATM_HE_USE_SUNI */
1555
1556 if (he_dev->irq)
1557 free_irq(he_dev->irq, he_dev);
1558
1559 if (he_dev->irq_base)
1560 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1561 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1562
1563 if (he_dev->hsp)
1564 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1565 he_dev->hsp, he_dev->hsp_phys);
1566
1567 if (he_dev->rbpl_base) {
7d1f8db4
JB
1568 int i;
1569
1da177e4
LT
1570 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1571 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1572 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1573
1574 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1575 }
1da177e4
LT
1576 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1577 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1578 }
1579
1da177e4
LT
1580 if (he_dev->rbpl_pool)
1581 pci_pool_destroy(he_dev->rbpl_pool);
1da177e4 1582
1da177e4
LT
1583 if (he_dev->rbrq_base)
1584 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1585 he_dev->rbrq_base, he_dev->rbrq_phys);
1586
1587 if (he_dev->tbrq_base)
1588 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1589 he_dev->tbrq_base, he_dev->tbrq_phys);
1590
1591 if (he_dev->tpdrq_base)
1592 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1593 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1594
1da177e4
LT
1595 if (he_dev->tpd_pool)
1596 pci_pool_destroy(he_dev->tpd_pool);
1da177e4
LT
1597
1598 if (he_dev->pci_dev) {
1599 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1600 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1601 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1602 }
1603
1604 if (he_dev->membase)
1605 iounmap(he_dev->membase);
1606}
1607
1608static struct he_tpd *
1609__alloc_tpd(struct he_dev *he_dev)
1610{
1da177e4
LT
1611 struct he_tpd *tpd;
1612 dma_addr_t dma_handle;
1613
441e143e 1614 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1da177e4
LT
1615 if (tpd == NULL)
1616 return NULL;
1617
1618 tpd->status = TPD_ADDR(dma_handle);
1619 tpd->reserved = 0;
1620 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1621 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1622 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1623
1624 return tpd;
1da177e4
LT
1625}
1626
1627#define AAL5_LEN(buf,len) \
1628 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1629 (((unsigned char *)(buf))[(len)-5]))
1630
1631/* 2.10.1.2 receive
1632 *
1633 * aal5 packets can optionally return the tcp checksum in the lower
1634 * 16 bits of the crc (RSR0_TCP_CKSUM)
1635 */
1636
1637#define TCP_CKSUM(buf,len) \
1638 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1639 (((unsigned char *)(buf))[(len-1)]))
1640
1641static int
1642he_service_rbrq(struct he_dev *he_dev, int group)
1643{
1644 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1645 ((unsigned long)he_dev->rbrq_base |
1646 he_dev->hsp->group[group].rbrq_tail);
1647 struct he_rbp *rbp = NULL;
1648 unsigned cid, lastcid = -1;
1649 unsigned buf_len = 0;
1650 struct sk_buff *skb;
1651 struct atm_vcc *vcc = NULL;
1652 struct he_vcc *he_vcc;
1653 struct he_iovec *iov;
1654 int pdus_assembled = 0;
1655 int updated = 0;
1656
1657 read_lock(&vcc_sklist_lock);
1658 while (he_dev->rbrq_head != rbrq_tail) {
1659 ++updated;
1660
1661 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1662 he_dev->rbrq_head, group,
1663 RBRQ_ADDR(he_dev->rbrq_head),
1664 RBRQ_BUFLEN(he_dev->rbrq_head),
1665 RBRQ_CID(he_dev->rbrq_head),
1666 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1667 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1668 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1669 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1670 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1671 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1672
1d927870 1673 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1da177e4
LT
1674
1675 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1676 cid = RBRQ_CID(he_dev->rbrq_head);
1677
1678 if (cid != lastcid)
1679 vcc = __find_vcc(he_dev, cid);
1680 lastcid = cid;
1681
1682 if (vcc == NULL) {
1683 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1684 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1685 rbp->status &= ~RBP_LOANED;
1686
1687 goto next_rbrq_entry;
1688 }
1689
1690 he_vcc = HE_VCC(vcc);
1691 if (he_vcc == NULL) {
1692 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1693 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1694 rbp->status &= ~RBP_LOANED;
1695 goto next_rbrq_entry;
1696 }
1697
1698 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1699 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1700 atomic_inc(&vcc->stats->rx_drop);
1701 goto return_host_buffers;
1702 }
1703
1704 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1705 he_vcc->iov_tail->iov_len = buf_len;
1706 he_vcc->pdu_len += buf_len;
1707 ++he_vcc->iov_tail;
1708
1709 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1710 lastcid = -1;
1711 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1712 wake_up(&he_vcc->rx_waitq);
1713 goto return_host_buffers;
1714 }
1715
1716#ifdef notdef
1717 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1718 hprintk("iovec full! cid 0x%x\n", cid);
1719 goto return_host_buffers;
1720 }
1721#endif
1722 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1723 goto next_rbrq_entry;
1724
1725 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1726 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1727 HPRINTK("%s%s (%d.%d)\n",
1728 RBRQ_CRC_ERR(he_dev->rbrq_head)
1729 ? "CRC_ERR " : "",
1730 RBRQ_LEN_ERR(he_dev->rbrq_head)
1731 ? "LEN_ERR" : "",
1732 vcc->vpi, vcc->vci);
1733 atomic_inc(&vcc->stats->rx_err);
1734 goto return_host_buffers;
1735 }
1736
1737 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1738 GFP_ATOMIC);
1739 if (!skb) {
1740 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1741 goto return_host_buffers;
1742 }
1743
1744 if (rx_skb_reserve > 0)
1745 skb_reserve(skb, rx_skb_reserve);
1746
a61bbcf2 1747 __net_timestamp(skb);
1da177e4 1748
1d927870
C
1749 for (iov = he_vcc->iov_head; iov < he_vcc->iov_tail; ++iov)
1750 memcpy(skb_put(skb, iov->iov_len),
1751 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1da177e4
LT
1752
1753 switch (vcc->qos.aal) {
1754 case ATM_AAL0:
1755 /* 2.10.1.5 raw cell receive */
1756 skb->len = ATM_AAL0_SDU;
27a884dc 1757 skb_set_tail_pointer(skb, skb->len);
1da177e4
LT
1758 break;
1759 case ATM_AAL5:
1760 /* 2.10.1.2 aal5 receive */
1761
1762 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
27a884dc 1763 skb_set_tail_pointer(skb, skb->len);
1da177e4
LT
1764#ifdef USE_CHECKSUM_HW
1765 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
84fa7933 1766 skb->ip_summed = CHECKSUM_COMPLETE;
1da177e4
LT
1767 skb->csum = TCP_CKSUM(skb->data,
1768 he_vcc->pdu_len);
1769 }
1770#endif
1771 break;
1772 }
1773
1774#ifdef should_never_happen
1775 if (skb->len > vcc->qos.rxtp.max_sdu)
1776 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1777#endif
1778
1779#ifdef notdef
1780 ATM_SKB(skb)->vcc = vcc;
1781#endif
7f81dc00 1782 spin_unlock(&he_dev->global_lock);
1da177e4 1783 vcc->push(vcc, skb);
7f81dc00 1784 spin_lock(&he_dev->global_lock);
1da177e4
LT
1785
1786 atomic_inc(&vcc->stats->rx);
1787
1788return_host_buffers:
1789 ++pdus_assembled;
1790
1d927870
C
1791 for (iov = he_vcc->iov_head; iov < he_vcc->iov_tail; ++iov) {
1792 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1da177e4
LT
1793 rbp->status &= ~RBP_LOANED;
1794 }
1795
1796 he_vcc->iov_tail = he_vcc->iov_head;
1797 he_vcc->pdu_len = 0;
1798
1799next_rbrq_entry:
1800 he_dev->rbrq_head = (struct he_rbrq *)
1801 ((unsigned long) he_dev->rbrq_base |
1802 RBRQ_MASK(++he_dev->rbrq_head));
1803
1804 }
1805 read_unlock(&vcc_sklist_lock);
1806
1807 if (updated) {
1808 if (updated > he_dev->rbrq_peak)
1809 he_dev->rbrq_peak = updated;
1810
1811 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1812 G0_RBRQ_H + (group * 16));
1813 }
1814
1815 return pdus_assembled;
1816}
1817
1818static void
1819he_service_tbrq(struct he_dev *he_dev, int group)
1820{
1821 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1822 ((unsigned long)he_dev->tbrq_base |
1823 he_dev->hsp->group[group].tbrq_tail);
1824 struct he_tpd *tpd;
1825 int slot, updated = 0;
1da177e4 1826 struct he_tpd *__tpd;
1da177e4
LT
1827
1828 /* 2.1.6 transmit buffer return queue */
1829
1830 while (he_dev->tbrq_head != tbrq_tail) {
1831 ++updated;
1832
1833 HPRINTK("tbrq%d 0x%x%s%s\n",
1834 group,
1835 TBRQ_TPD(he_dev->tbrq_head),
1836 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1837 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1da177e4
LT
1838 tpd = NULL;
1839 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1840 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1841 tpd = __tpd;
1842 list_del(&__tpd->entry);
1843 break;
1844 }
1845 }
1846
1847 if (tpd == NULL) {
1848 hprintk("unable to locate tpd for dma buffer %x\n",
1849 TBRQ_TPD(he_dev->tbrq_head));
1850 goto next_tbrq_entry;
1851 }
1da177e4
LT
1852
1853 if (TBRQ_EOS(he_dev->tbrq_head)) {
1854 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1855 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1856 if (tpd->vcc)
1857 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1858
1859 goto next_tbrq_entry;
1860 }
1861
1862 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1863 if (tpd->iovec[slot].addr)
1864 pci_unmap_single(he_dev->pci_dev,
1865 tpd->iovec[slot].addr,
1866 tpd->iovec[slot].len & TPD_LEN_MASK,
1867 PCI_DMA_TODEVICE);
1868 if (tpd->iovec[slot].len & TPD_LST)
1869 break;
1870
1871 }
1872
1873 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1874 if (tpd->vcc && tpd->vcc->pop)
1875 tpd->vcc->pop(tpd->vcc, tpd->skb);
1876 else
1877 dev_kfree_skb_any(tpd->skb);
1878 }
1879
1880next_tbrq_entry:
1da177e4
LT
1881 if (tpd)
1882 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1da177e4
LT
1883 he_dev->tbrq_head = (struct he_tbrq *)
1884 ((unsigned long) he_dev->tbrq_base |
1885 TBRQ_MASK(++he_dev->tbrq_head));
1886 }
1887
1888 if (updated) {
1889 if (updated > he_dev->tbrq_peak)
1890 he_dev->tbrq_peak = updated;
1891
1892 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1893 G0_TBRQ_H + (group * 16));
1894 }
1895}
1896
1da177e4
LT
1897static void
1898he_service_rbpl(struct he_dev *he_dev, int group)
1899{
1900 struct he_rbp *newtail;
1901 struct he_rbp *rbpl_head;
1902 int moved = 0;
1903
1904 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1905 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1906
1907 for (;;) {
1908 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1909 RBPL_MASK(he_dev->rbpl_tail+1));
1910
1911 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1912 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
1913 break;
1914
1915 newtail->status |= RBP_LOANED;
1916 he_dev->rbpl_tail = newtail;
1917 ++moved;
1918 }
1919
1920 if (moved)
1921 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1922}
1923
1da177e4
LT
1924static void
1925he_tasklet(unsigned long data)
1926{
1927 unsigned long flags;
1928 struct he_dev *he_dev = (struct he_dev *) data;
1929 int group, type;
1930 int updated = 0;
1931
1932 HPRINTK("tasklet (0x%lx)\n", data);
1da177e4 1933 spin_lock_irqsave(&he_dev->global_lock, flags);
1da177e4
LT
1934
1935 while (he_dev->irq_head != he_dev->irq_tail) {
1936 ++updated;
1937
1938 type = ITYPE_TYPE(he_dev->irq_head->isw);
1939 group = ITYPE_GROUP(he_dev->irq_head->isw);
1940
1941 switch (type) {
1942 case ITYPE_RBRQ_THRESH:
1943 HPRINTK("rbrq%d threshold\n", group);
1944 /* fall through */
1945 case ITYPE_RBRQ_TIMER:
1d927870 1946 if (he_service_rbrq(he_dev, group))
1da177e4 1947 he_service_rbpl(he_dev, group);
1da177e4
LT
1948 break;
1949 case ITYPE_TBRQ_THRESH:
1950 HPRINTK("tbrq%d threshold\n", group);
1951 /* fall through */
1952 case ITYPE_TPD_COMPLETE:
1953 he_service_tbrq(he_dev, group);
1954 break;
1955 case ITYPE_RBPL_THRESH:
1956 he_service_rbpl(he_dev, group);
1957 break;
1958 case ITYPE_RBPS_THRESH:
1d927870 1959 /* shouldn't happen unless small buffers enabled */
1da177e4
LT
1960 break;
1961 case ITYPE_PHY:
1962 HPRINTK("phy interrupt\n");
1963#ifdef CONFIG_ATM_HE_USE_SUNI
1964 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1965 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1966 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1967 spin_lock_irqsave(&he_dev->global_lock, flags);
1968#endif
1969 break;
1970 case ITYPE_OTHER:
1971 switch (type|group) {
1972 case ITYPE_PARITY:
1973 hprintk("parity error\n");
1974 break;
1975 case ITYPE_ABORT:
1976 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1977 break;
1978 }
1979 break;
1980 case ITYPE_TYPE(ITYPE_INVALID):
1981 /* see 8.1.1 -- check all queues */
1982
1983 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1984
1985 he_service_rbrq(he_dev, 0);
1986 he_service_rbpl(he_dev, 0);
1da177e4
LT
1987 he_service_tbrq(he_dev, 0);
1988 break;
1989 default:
1990 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1991 }
1992
1993 he_dev->irq_head->isw = ITYPE_INVALID;
1994
1995 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1996 }
1997
1998 if (updated) {
1999 if (updated > he_dev->irq_peak)
2000 he_dev->irq_peak = updated;
2001
2002 he_writel(he_dev,
2003 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2004 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2005 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2006 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2007 }
1da177e4 2008 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1da177e4
LT
2009}
2010
2011static irqreturn_t
7d12e780 2012he_irq_handler(int irq, void *dev_id)
1da177e4
LT
2013{
2014 unsigned long flags;
2015 struct he_dev *he_dev = (struct he_dev * )dev_id;
2016 int handled = 0;
2017
2018 if (he_dev == NULL)
2019 return IRQ_NONE;
2020
2021 spin_lock_irqsave(&he_dev->global_lock, flags);
2022
2023 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2024 (*he_dev->irq_tailoffset << 2));
2025
2026 if (he_dev->irq_tail == he_dev->irq_head) {
2027 HPRINTK("tailoffset not updated?\n");
2028 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2029 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2030 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2031 }
2032
2033#ifdef DEBUG
2034 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2035 hprintk("spurious (or shared) interrupt?\n");
2036#endif
2037
2038 if (he_dev->irq_head != he_dev->irq_tail) {
2039 handled = 1;
1da177e4 2040 tasklet_schedule(&he_dev->tasklet);
1da177e4
LT
2041 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2042 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2043 }
2044 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2045 return IRQ_RETVAL(handled);
2046
2047}
2048
2049static __inline__ void
2050__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2051{
2052 struct he_tpdrq *new_tail;
2053
2054 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2055 tpd, cid, he_dev->tpdrq_tail);
2056
2057 /* new_tail = he_dev->tpdrq_tail; */
2058 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2059 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2060
2061 /*
2062 * check to see if we are about to set the tail == head
2063 * if true, update the head pointer from the adapter
2064 * to see if this is really the case (reading the queue
2065 * head for every enqueue would be unnecessarily slow)
2066 */
2067
2068 if (new_tail == he_dev->tpdrq_head) {
2069 he_dev->tpdrq_head = (struct he_tpdrq *)
2070 (((unsigned long)he_dev->tpdrq_base) |
2071 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2072
2073 if (new_tail == he_dev->tpdrq_head) {
d730e103
CW
2074 int slot;
2075
1da177e4
LT
2076 hprintk("tpdrq full (cid 0x%x)\n", cid);
2077 /*
2078 * FIXME
2079 * push tpd onto a transmit backlog queue
2080 * after service_tbrq, service the backlog
2081 * for now, we just drop the pdu
2082 */
d730e103
CW
2083 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2084 if (tpd->iovec[slot].addr)
2085 pci_unmap_single(he_dev->pci_dev,
2086 tpd->iovec[slot].addr,
2087 tpd->iovec[slot].len & TPD_LEN_MASK,
2088 PCI_DMA_TODEVICE);
2089 }
1da177e4
LT
2090 if (tpd->skb) {
2091 if (tpd->vcc->pop)
2092 tpd->vcc->pop(tpd->vcc, tpd->skb);
2093 else
2094 dev_kfree_skb_any(tpd->skb);
2095 atomic_inc(&tpd->vcc->stats->tx_err);
2096 }
1da177e4 2097 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1da177e4
LT
2098 return;
2099 }
2100 }
2101
2102 /* 2.1.5 transmit packet descriptor ready queue */
1da177e4
LT
2103 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2104 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
1da177e4
LT
2105 he_dev->tpdrq_tail->cid = cid;
2106 wmb();
2107
2108 he_dev->tpdrq_tail = new_tail;
2109
2110 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2111 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2112}
2113
2114static int
2115he_open(struct atm_vcc *vcc)
2116{
2117 unsigned long flags;
2118 struct he_dev *he_dev = HE_DEV(vcc->dev);
2119 struct he_vcc *he_vcc;
2120 int err = 0;
2121 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2122 short vpi = vcc->vpi;
2123 int vci = vcc->vci;
2124
2125 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2126 return 0;
2127
2128 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2129
2130 set_bit(ATM_VF_ADDR, &vcc->flags);
2131
2132 cid = he_mkcid(he_dev, vpi, vci);
2133
5cbded58 2134 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
1da177e4
LT
2135 if (he_vcc == NULL) {
2136 hprintk("unable to allocate he_vcc during open\n");
2137 return -ENOMEM;
2138 }
2139
2140 he_vcc->iov_tail = he_vcc->iov_head;
2141 he_vcc->pdu_len = 0;
2142 he_vcc->rc_index = -1;
2143
2144 init_waitqueue_head(&he_vcc->rx_waitq);
2145 init_waitqueue_head(&he_vcc->tx_waitq);
2146
2147 vcc->dev_data = he_vcc;
2148
2149 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2150 int pcr_goal;
2151
2152 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2153 if (pcr_goal == 0)
2154 pcr_goal = he_dev->atm_dev->link_rate;
2155 if (pcr_goal < 0) /* means round down, technically */
2156 pcr_goal = -pcr_goal;
2157
2158 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2159
2160 switch (vcc->qos.aal) {
2161 case ATM_AAL5:
2162 tsr0_aal = TSR0_AAL5;
2163 tsr4 = TSR4_AAL5;
2164 break;
2165 case ATM_AAL0:
2166 tsr0_aal = TSR0_AAL0_SDU;
2167 tsr4 = TSR4_AAL0_SDU;
2168 break;
2169 default:
2170 err = -EINVAL;
2171 goto open_failed;
2172 }
2173
2174 spin_lock_irqsave(&he_dev->global_lock, flags);
2175 tsr0 = he_readl_tsr0(he_dev, cid);
2176 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2177
2178 if (TSR0_CONN_STATE(tsr0) != 0) {
2179 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2180 err = -EBUSY;
2181 goto open_failed;
2182 }
2183
2184 switch (vcc->qos.txtp.traffic_class) {
2185 case ATM_UBR:
2186 /* 2.3.3.1 open connection ubr */
2187
2188 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2189 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2190 break;
2191
2192 case ATM_CBR:
2193 /* 2.3.3.2 open connection cbr */
2194
2195 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2196 if ((he_dev->total_bw + pcr_goal)
2197 > (he_dev->atm_dev->link_rate * 9 / 10))
2198 {
2199 err = -EBUSY;
2200 goto open_failed;
2201 }
2202
2203 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2204
2205 /* find an unused cs_stper register */
2206 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2207 if (he_dev->cs_stper[reg].inuse == 0 ||
2208 he_dev->cs_stper[reg].pcr == pcr_goal)
2209 break;
2210
2211 if (reg == HE_NUM_CS_STPER) {
2212 err = -EBUSY;
2213 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2214 goto open_failed;
2215 }
2216
2217 he_dev->total_bw += pcr_goal;
2218
2219 he_vcc->rc_index = reg;
2220 ++he_dev->cs_stper[reg].inuse;
2221 he_dev->cs_stper[reg].pcr = pcr_goal;
2222
2223 clock = he_is622(he_dev) ? 66667000 : 50000000;
2224 period = clock / pcr_goal;
2225
2226 HPRINTK("rc_index = %d period = %d\n",
2227 reg, period);
2228
2229 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2230 CS_STPER0 + reg);
2231 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2232
2233 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2234 TSR0_RC_INDEX(reg);
2235
2236 break;
2237 default:
2238 err = -EINVAL;
2239 goto open_failed;
2240 }
2241
2242 spin_lock_irqsave(&he_dev->global_lock, flags);
2243
2244 he_writel_tsr0(he_dev, tsr0, cid);
2245 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2246 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2247 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2248 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2249 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2250
2251 he_writel_tsr3(he_dev, 0x0, cid);
2252 he_writel_tsr5(he_dev, 0x0, cid);
2253 he_writel_tsr6(he_dev, 0x0, cid);
2254 he_writel_tsr7(he_dev, 0x0, cid);
2255 he_writel_tsr8(he_dev, 0x0, cid);
2256 he_writel_tsr10(he_dev, 0x0, cid);
2257 he_writel_tsr11(he_dev, 0x0, cid);
2258 he_writel_tsr12(he_dev, 0x0, cid);
2259 he_writel_tsr13(he_dev, 0x0, cid);
2260 he_writel_tsr14(he_dev, 0x0, cid);
2261 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2262 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2263 }
2264
2265 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2266 unsigned aal;
2267
2268 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2269 &HE_VCC(vcc)->rx_waitq);
2270
2271 switch (vcc->qos.aal) {
2272 case ATM_AAL5:
2273 aal = RSR0_AAL5;
2274 break;
2275 case ATM_AAL0:
2276 aal = RSR0_RAWCELL;
2277 break;
2278 default:
2279 err = -EINVAL;
2280 goto open_failed;
2281 }
2282
2283 spin_lock_irqsave(&he_dev->global_lock, flags);
2284
2285 rsr0 = he_readl_rsr0(he_dev, cid);
2286 if (rsr0 & RSR0_OPEN_CONN) {
2287 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2288
2289 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2290 err = -EBUSY;
2291 goto open_failed;
2292 }
2293
1d927870
C
2294 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2295 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
1da177e4
LT
2296 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2297 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2298
2299#ifdef USE_CHECKSUM_HW
2300 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2301 rsr0 |= RSR0_TCP_CKSUM;
2302#endif
2303
2304 he_writel_rsr4(he_dev, rsr4, cid);
2305 he_writel_rsr1(he_dev, rsr1, cid);
2306 /* 5.1.11 last parameter initialized should be
2307 the open/closed indication in rsr0 */
2308 he_writel_rsr0(he_dev,
2309 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2310 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2311
2312 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2313 }
2314
2315open_failed:
2316
2317 if (err) {
a2c1aa54 2318 kfree(he_vcc);
1da177e4
LT
2319 clear_bit(ATM_VF_ADDR, &vcc->flags);
2320 }
2321 else
2322 set_bit(ATM_VF_READY, &vcc->flags);
2323
2324 return err;
2325}
2326
2327static void
2328he_close(struct atm_vcc *vcc)
2329{
2330 unsigned long flags;
2331 DECLARE_WAITQUEUE(wait, current);
2332 struct he_dev *he_dev = HE_DEV(vcc->dev);
2333 struct he_tpd *tpd;
2334 unsigned cid;
2335 struct he_vcc *he_vcc = HE_VCC(vcc);
2336#define MAX_RETRY 30
2337 int retry = 0, sleep = 1, tx_inuse;
2338
2339 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2340
2341 clear_bit(ATM_VF_READY, &vcc->flags);
2342 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2343
2344 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2345 int timeout;
2346
2347 HPRINTK("close rx cid 0x%x\n", cid);
2348
2349 /* 2.7.2.2 close receive operation */
2350
2351 /* wait for previous close (if any) to finish */
2352
2353 spin_lock_irqsave(&he_dev->global_lock, flags);
2354 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2355 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2356 udelay(250);
2357 }
2358
2359 set_current_state(TASK_UNINTERRUPTIBLE);
2360 add_wait_queue(&he_vcc->rx_waitq, &wait);
2361
2362 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2363 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2364 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2365 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2366
2367 timeout = schedule_timeout(30*HZ);
2368
2369 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2370 set_current_state(TASK_RUNNING);
2371
2372 if (timeout == 0)
2373 hprintk("close rx timeout cid 0x%x\n", cid);
2374
2375 HPRINTK("close rx cid 0x%x complete\n", cid);
2376
2377 }
2378
2379 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2380 volatile unsigned tsr4, tsr0;
2381 int timeout;
2382
2383 HPRINTK("close tx cid 0x%x\n", cid);
2384
2385 /* 2.1.2
2386 *
2387 * ... the host must first stop queueing packets to the TPDRQ
2388 * on the connection to be closed, then wait for all outstanding
2389 * packets to be transmitted and their buffers returned to the
2390 * TBRQ. When the last packet on the connection arrives in the
2391 * TBRQ, the host issues the close command to the adapter.
2392 */
2393
504bb3b5 2394 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
1da177e4
LT
2395 (retry < MAX_RETRY)) {
2396 msleep(sleep);
2397 if (sleep < 250)
2398 sleep = sleep * 2;
2399
2400 ++retry;
2401 }
2402
504bb3b5 2403 if (tx_inuse > 1)
1da177e4
LT
2404 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2405
2406 /* 2.3.1.1 generic close operations with flush */
2407
2408 spin_lock_irqsave(&he_dev->global_lock, flags);
2409 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2410 /* also clears TSR4_SESSION_ENDED */
2411
2412 switch (vcc->qos.txtp.traffic_class) {
2413 case ATM_UBR:
2414 he_writel_tsr1(he_dev,
2415 TSR1_MCR(rate_to_atmf(200000))
2416 | TSR1_PCR(0), cid);
2417 break;
2418 case ATM_CBR:
2419 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2420 break;
2421 }
2422 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2423
2424 tpd = __alloc_tpd(he_dev);
2425 if (tpd == NULL) {
2426 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2427 goto close_tx_incomplete;
2428 }
2429 tpd->status |= TPD_EOS | TPD_INT;
2430 tpd->skb = NULL;
2431 tpd->vcc = vcc;
2432 wmb();
2433
2434 set_current_state(TASK_UNINTERRUPTIBLE);
2435 add_wait_queue(&he_vcc->tx_waitq, &wait);
2436 __enqueue_tpd(he_dev, tpd, cid);
2437 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2438
2439 timeout = schedule_timeout(30*HZ);
2440
2441 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2442 set_current_state(TASK_RUNNING);
2443
2444 spin_lock_irqsave(&he_dev->global_lock, flags);
2445
2446 if (timeout == 0) {
2447 hprintk("close tx timeout cid 0x%x\n", cid);
2448 goto close_tx_incomplete;
2449 }
2450
2451 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2452 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2453 udelay(250);
2454 }
2455
2456 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2457 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2458 udelay(250);
2459 }
2460
2461close_tx_incomplete:
2462
2463 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2464 int reg = he_vcc->rc_index;
2465
2466 HPRINTK("cs_stper reg = %d\n", reg);
2467
2468 if (he_dev->cs_stper[reg].inuse == 0)
2469 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2470 else
2471 --he_dev->cs_stper[reg].inuse;
2472
2473 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2474 }
2475 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2476
2477 HPRINTK("close tx cid 0x%x complete\n", cid);
2478 }
2479
2480 kfree(he_vcc);
2481
2482 clear_bit(ATM_VF_ADDR, &vcc->flags);
2483}
2484
2485static int
2486he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2487{
2488 unsigned long flags;
2489 struct he_dev *he_dev = HE_DEV(vcc->dev);
2490 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2491 struct he_tpd *tpd;
2492#ifdef USE_SCATTERGATHER
2493 int i, slot = 0;
2494#endif
2495
2496#define HE_TPD_BUFSIZE 0xffff
2497
2498 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2499
2500 if ((skb->len > HE_TPD_BUFSIZE) ||
2501 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2502 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2503 if (vcc->pop)
2504 vcc->pop(vcc, skb);
2505 else
2506 dev_kfree_skb_any(skb);
2507 atomic_inc(&vcc->stats->tx_err);
2508 return -EINVAL;
2509 }
2510
2511#ifndef USE_SCATTERGATHER
2512 if (skb_shinfo(skb)->nr_frags) {
2513 hprintk("no scatter/gather support\n");
2514 if (vcc->pop)
2515 vcc->pop(vcc, skb);
2516 else
2517 dev_kfree_skb_any(skb);
2518 atomic_inc(&vcc->stats->tx_err);
2519 return -EINVAL;
2520 }
2521#endif
2522 spin_lock_irqsave(&he_dev->global_lock, flags);
2523
2524 tpd = __alloc_tpd(he_dev);
2525 if (tpd == NULL) {
2526 if (vcc->pop)
2527 vcc->pop(vcc, skb);
2528 else
2529 dev_kfree_skb_any(skb);
2530 atomic_inc(&vcc->stats->tx_err);
2531 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2532 return -ENOMEM;
2533 }
2534
2535 if (vcc->qos.aal == ATM_AAL5)
2536 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2537 else {
2538 char *pti_clp = (void *) (skb->data + 3);
2539 int clp, pti;
2540
2541 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2542 clp = (*pti_clp & ATM_HDR_CLP);
2543 tpd->status |= TPD_CELLTYPE(pti);
2544 if (clp)
2545 tpd->status |= TPD_CLP;
2546
2547 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2548 }
2549
2550#ifdef USE_SCATTERGATHER
2551 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
e743d313
ED
2552 skb_headlen(skb), PCI_DMA_TODEVICE);
2553 tpd->iovec[slot].len = skb_headlen(skb);
1da177e4
LT
2554 ++slot;
2555
2556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2558
2559 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2560 tpd->vcc = vcc;
2561 tpd->skb = NULL; /* not the last fragment
2562 so dont ->push() yet */
2563 wmb();
2564
2565 __enqueue_tpd(he_dev, tpd, cid);
2566 tpd = __alloc_tpd(he_dev);
2567 if (tpd == NULL) {
2568 if (vcc->pop)
2569 vcc->pop(vcc, skb);
2570 else
2571 dev_kfree_skb_any(skb);
2572 atomic_inc(&vcc->stats->tx_err);
2573 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2574 return -ENOMEM;
2575 }
2576 tpd->status |= TPD_USERCELL;
2577 slot = 0;
2578 }
2579
2580 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2581 (void *) page_address(frag->page) + frag->page_offset,
2582 frag->size, PCI_DMA_TODEVICE);
2583 tpd->iovec[slot].len = frag->size;
2584 ++slot;
2585
2586 }
2587
2588 tpd->iovec[slot - 1].len |= TPD_LST;
2589#else
2590 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2591 tpd->length0 = skb->len | TPD_LST;
2592#endif
2593 tpd->status |= TPD_INT;
2594
2595 tpd->vcc = vcc;
2596 tpd->skb = skb;
2597 wmb();
2598 ATM_SKB(skb)->vcc = vcc;
2599
2600 __enqueue_tpd(he_dev, tpd, cid);
2601 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2602
2603 atomic_inc(&vcc->stats->tx);
2604
2605 return 0;
2606}
2607
2608static int
2609he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2610{
2611 unsigned long flags;
2612 struct he_dev *he_dev = HE_DEV(atm_dev);
2613 struct he_ioctl_reg reg;
2614 int err = 0;
2615
2616 switch (cmd) {
2617 case HE_GET_REG:
2618 if (!capable(CAP_NET_ADMIN))
2619 return -EPERM;
2620
2621 if (copy_from_user(&reg, arg,
2622 sizeof(struct he_ioctl_reg)))
2623 return -EFAULT;
28e84ab3 2624
1da177e4
LT
2625 spin_lock_irqsave(&he_dev->global_lock, flags);
2626 switch (reg.type) {
2627 case HE_REGTYPE_PCI:
e0c5567d 2628 if (reg.addr >= HE_REGMAP_SIZE) {
28e84ab3
RJ
2629 err = -EINVAL;
2630 break;
2631 }
2632
1da177e4
LT
2633 reg.val = he_readl(he_dev, reg.addr);
2634 break;
2635 case HE_REGTYPE_RCM:
2636 reg.val =
2637 he_readl_rcm(he_dev, reg.addr);
2638 break;
2639 case HE_REGTYPE_TCM:
2640 reg.val =
2641 he_readl_tcm(he_dev, reg.addr);
2642 break;
2643 case HE_REGTYPE_MBOX:
2644 reg.val =
2645 he_readl_mbox(he_dev, reg.addr);
2646 break;
2647 default:
2648 err = -EINVAL;
2649 break;
2650 }
2651 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2652 if (err == 0)
2653 if (copy_to_user(arg, &reg,
2654 sizeof(struct he_ioctl_reg)))
2655 return -EFAULT;
2656 break;
2657 default:
2658#ifdef CONFIG_ATM_HE_USE_SUNI
2659 if (atm_dev->phy && atm_dev->phy->ioctl)
2660 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2661#else /* CONFIG_ATM_HE_USE_SUNI */
2662 err = -EINVAL;
2663#endif /* CONFIG_ATM_HE_USE_SUNI */
2664 break;
2665 }
2666
2667 return err;
2668}
2669
2670static void
2671he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2672{
2673 unsigned long flags;
2674 struct he_dev *he_dev = HE_DEV(atm_dev);
2675
2676 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2677
2678 spin_lock_irqsave(&he_dev->global_lock, flags);
2679 he_writel(he_dev, val, FRAMER + (addr*4));
2680 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2681 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2682}
2683
2684
2685static unsigned char
2686he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2687{
2688 unsigned long flags;
2689 struct he_dev *he_dev = HE_DEV(atm_dev);
2690 unsigned reg;
2691
2692 spin_lock_irqsave(&he_dev->global_lock, flags);
2693 reg = he_readl(he_dev, FRAMER + (addr*4));
2694 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2695
2696 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2697 return reg;
2698}
2699
2700static int
2701he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2702{
2703 unsigned long flags;
2704 struct he_dev *he_dev = HE_DEV(dev);
2705 int left, i;
2706#ifdef notdef
2707 struct he_rbrq *rbrq_tail;
2708 struct he_tpdrq *tpdrq_head;
2709 int rbpl_head, rbpl_tail;
2710#endif
2711 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2712
2713
2714 left = *pos;
2715 if (!left--)
900092a4 2716 return sprintf(page, "ATM he driver\n");
1da177e4
LT
2717
2718 if (!left--)
2719 return sprintf(page, "%s%s\n\n",
2720 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2721
2722 if (!left--)
2723 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2724
2725 spin_lock_irqsave(&he_dev->global_lock, flags);
2726 mcc += he_readl(he_dev, MCC);
2727 oec += he_readl(he_dev, OEC);
2728 dcc += he_readl(he_dev, DCC);
2729 cec += he_readl(he_dev, CEC);
2730 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2731
2732 if (!left--)
2733 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2734 mcc, oec, dcc, cec);
2735
2736 if (!left--)
2737 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2738 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2739
2740 if (!left--)
2741 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2742 CONFIG_TPDRQ_SIZE);
2743
2744 if (!left--)
2745 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2746 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2747
2748 if (!left--)
2749 return sprintf(page, "tbrq_size = %d peak = %d\n",
2750 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2751
2752
2753#ifdef notdef
2754 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2755 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2756
2757 inuse = rbpl_head - rbpl_tail;
2758 if (inuse < 0)
2759 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2760 inuse /= sizeof(struct he_rbp);
2761
2762 if (!left--)
2763 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2764 CONFIG_RBPL_SIZE, inuse);
2765#endif
2766
2767 if (!left--)
2768 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2769
2770 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2771 if (!left--)
2772 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2773 he_dev->cs_stper[i].pcr,
2774 he_dev->cs_stper[i].inuse);
2775
2776 if (!left--)
2777 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2778 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2779
2780 return 0;
2781}
2782
2783/* eeprom routines -- see 4.7 */
2784
0ee897d4 2785static u8 read_prom_byte(struct he_dev *he_dev, int addr)
1da177e4
LT
2786{
2787 u32 val = 0, tmp_read = 0;
2788 int i, j = 0;
2789 u8 byte_read = 0;
2790
2791 val = readl(he_dev->membase + HOST_CNTL);
2792 val &= 0xFFFFE0FF;
2793
2794 /* Turn on write enable */
2795 val |= 0x800;
2796 he_writel(he_dev, val, HOST_CNTL);
2797
2798 /* Send READ instruction */
36fe55d6 2799 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
1da177e4
LT
2800 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2801 udelay(EEPROM_DELAY);
2802 }
2803
2804 /* Next, we need to send the byte address to read from */
2805 for (i = 7; i >= 0; i--) {
2806 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2807 udelay(EEPROM_DELAY);
2808 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2809 udelay(EEPROM_DELAY);
2810 }
2811
2812 j = 0;
2813
2814 val &= 0xFFFFF7FF; /* Turn off write enable */
2815 he_writel(he_dev, val, HOST_CNTL);
2816
2817 /* Now, we can read data from the EEPROM by clocking it in */
2818 for (i = 7; i >= 0; i--) {
2819 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2820 udelay(EEPROM_DELAY);
2821 tmp_read = he_readl(he_dev, HOST_CNTL);
2822 byte_read |= (unsigned char)
2823 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2824 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2825 udelay(EEPROM_DELAY);
2826 }
2827
2828 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2829 udelay(EEPROM_DELAY);
2830
2831 return byte_read;
2832}
2833
2834MODULE_LICENSE("GPL");
2835MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2836MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2837module_param(disable64, bool, 0);
2838MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2839module_param(nvpibits, short, 0);
2840MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2841module_param(nvcibits, short, 0);
2842MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2843module_param(rx_skb_reserve, short, 0);
2844MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2845module_param(irq_coalesce, bool, 0);
2846MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2847module_param(sdh, bool, 0);
2848MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2849
2850static struct pci_device_id he_pci_tbl[] = {
2851 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
2852 0, 0, 0 },
2853 { 0, }
2854};
2855
2856MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2857
2858static struct pci_driver he_driver = {
2859 .name = "he",
2860 .probe = he_init_one,
2861 .remove = __devexit_p(he_remove_one),
2862 .id_table = he_pci_tbl,
2863};
2864
2865static int __init he_init(void)
2866{
2867 return pci_register_driver(&he_driver);
2868}
2869
2870static void __exit he_cleanup(void)
2871{
2872 pci_unregister_driver(&he_driver);
2873}
2874
2875module_init(he_init);
2876module_exit(he_cleanup);