]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/atm/fore200e.c
fore200e: don't use GFP_DMA
[mirror_ubuntu-jammy-kernel.git] / drivers / atm / fore200e.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 A FORE Systems 200E-series driver for ATM on Linux.
3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4
5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6
7 This driver simultaneously supports PCA-200E and SBA-200E adapters
8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25
1da177e4
LT
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/init.h>
29#include <linux/capability.h>
1da177e4
LT
30#include <linux/interrupt.h>
31#include <linux/bitops.h>
32#include <linux/pci.h>
33#include <linux/module.h>
34#include <linux/atmdev.h>
35#include <linux/sonet.h>
36#include <linux/atm_suni.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
e92481f9 39#include <linux/firmware.h>
1da177e4
LT
40#include <asm/io.h>
41#include <asm/string.h>
42#include <asm/page.h>
43#include <asm/irq.h>
44#include <asm/dma.h>
45#include <asm/byteorder.h>
7c0f6ba6 46#include <linux/uaccess.h>
60063497 47#include <linux/atomic.h>
1da177e4 48
e92481f9 49#ifdef CONFIG_SBUS
826b6cfc
DM
50#include <linux/of.h>
51#include <linux/of_device.h>
1da177e4 52#include <asm/idprom.h>
1da177e4
LT
53#include <asm/openprom.h>
54#include <asm/oplib.h>
55#include <asm/pgtable.h>
56#endif
57
58#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
59#define FORE200E_USE_TASKLET
60#endif
61
62#if 0 /* enable the debugging code of the buffer supply queues */
63#define FORE200E_BSQ_DEBUG
64#endif
65
66#if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
67#define FORE200E_52BYTE_AAL0_SDU
68#endif
69
70#include "fore200e.h"
71#include "suni.h"
72
73#define FORE200E_VERSION "0.3e"
74
75#define FORE200E "fore200e: "
76
77#if 0 /* override .config */
78#define CONFIG_ATM_FORE200E_DEBUG 1
79#endif
80#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81#define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82 printk(FORE200E format, ##args); } while (0)
83#else
84#define DPRINTK(level, format, args...) do {} while (0)
85#endif
86
87
88#define FORE200E_ALIGN(addr, alignment) \
89 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90
91#define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
92
93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
94
30dfe2c0 95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
1da177e4
LT
96
97#if 1
98#define ASSERT(expr) if (!(expr)) { \
99 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
5a346a10
HH
100 __func__, __LINE__, #expr); \
101 panic(FORE200E "%s", __func__); \
1da177e4
LT
102 }
103#else
104#define ASSERT(expr) do {} while (0)
105#endif
106
107
108static const struct atmdev_ops fore200e_ops;
1da177e4
LT
109
110static LIST_HEAD(fore200e_boards);
111
112
113MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
114MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
115MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
116
117
118static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
119 { BUFFER_S1_NBR, BUFFER_L1_NBR },
120 { BUFFER_S2_NBR, BUFFER_L2_NBR }
121};
122
123static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
124 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
125 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
126};
127
128
129#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
130static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
131#endif
132
133
134#if 0 /* currently unused */
135static int
136fore200e_fore2atm_aal(enum fore200e_aal aal)
137{
138 switch(aal) {
139 case FORE200E_AAL0: return ATM_AAL0;
140 case FORE200E_AAL34: return ATM_AAL34;
141 case FORE200E_AAL5: return ATM_AAL5;
142 }
143
144 return -EINVAL;
145}
146#endif
147
148
149static enum fore200e_aal
150fore200e_atm2fore_aal(int aal)
151{
152 switch(aal) {
153 case ATM_AAL0: return FORE200E_AAL0;
154 case ATM_AAL34: return FORE200E_AAL34;
155 case ATM_AAL1:
156 case ATM_AAL2:
157 case ATM_AAL5: return FORE200E_AAL5;
158 }
159
160 return -EINVAL;
161}
162
163
164static char*
165fore200e_irq_itoa(int irq)
166{
1da177e4
LT
167 static char str[8];
168 sprintf(str, "%d", irq);
169 return str;
1da177e4
LT
170}
171
172
1da177e4
LT
173/* allocate and align a chunk of memory intended to hold the data behing exchanged
174 between the driver and the adapter (using streaming DVMA) */
175
176static int
177fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
178{
179 unsigned long offset = 0;
180
181 if (alignment <= sizeof(int))
182 alignment = 0;
183
184 chunk->alloc_size = size + alignment;
1da177e4
LT
185 chunk->direction = direction;
186
0e21b225 187 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
1da177e4
LT
188 if (chunk->alloc_addr == NULL)
189 return -ENOMEM;
190
191 if (alignment > 0)
192 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
193
194 chunk->align_addr = chunk->alloc_addr + offset;
195
f3fadcb5
CH
196 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
197 size, direction);
1da177e4
LT
198 return 0;
199}
200
201
202/* free a chunk of memory */
203
204static void
205fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
206{
f3fadcb5
CH
207 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
208 chunk->direction);
1f8a5fb8 209 kfree(chunk->alloc_addr);
1da177e4
LT
210}
211
1335d6fd
CH
212/*
213 * Allocate a DMA consistent chunk of memory intended to act as a communication
214 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
215 * and the adapter.
216 */
217static int
218fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
219 int size, int nbr, int alignment)
220{
221 /* returned chunks are page-aligned */
222 chunk->alloc_size = size * nbr;
223 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
224 &chunk->dma_addr, GFP_KERNEL);
225 if (!chunk->alloc_addr)
226 return -ENOMEM;
227 chunk->align_addr = chunk->alloc_addr;
228 return 0;
229}
230
231/*
232 * Free a DMA consistent chunk of memory.
233 */
234static void
235fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
236{
237 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
238 chunk->dma_addr);
239}
1da177e4
LT
240
241static void
242fore200e_spin(int msecs)
243{
244 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
245 while (time_before(jiffies, timeout));
246}
247
248
249static int
250fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
251{
252 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
253 int ok;
254
255 mb();
256 do {
257 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
258 break;
259
260 } while (time_before(jiffies, timeout));
261
262#if 1
263 if (!ok) {
264 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
265 *addr, val);
266 }
267#endif
268
269 return ok;
270}
271
272
273static int
274fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
275{
276 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
277 int ok;
278
279 do {
280 if ((ok = (fore200e->bus->read(addr) == val)))
281 break;
282
283 } while (time_before(jiffies, timeout));
284
285#if 1
286 if (!ok) {
287 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
288 fore200e->bus->read(addr), val);
289 }
290#endif
291
292 return ok;
293}
294
295
296static void
297fore200e_free_rx_buf(struct fore200e* fore200e)
298{
299 int scheme, magn, nbr;
300 struct buffer* buffer;
301
302 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
303 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
304
305 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
306
307 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
308
309 struct chunk* data = &buffer[ nbr ].data;
310
311 if (data->alloc_addr != NULL)
312 fore200e_chunk_free(fore200e, data);
313 }
314 }
315 }
316 }
317}
318
319
320static void
321fore200e_uninit_bs_queue(struct fore200e* fore200e)
322{
323 int scheme, magn;
324
325 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
326 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
327
328 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
329 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
330
331 if (status->alloc_addr)
1335d6fd 332 fore200e_dma_chunk_free(fore200e, status);
1da177e4
LT
333
334 if (rbd_block->alloc_addr)
1335d6fd 335 fore200e_dma_chunk_free(fore200e, rbd_block);
1da177e4
LT
336 }
337 }
338}
339
340
341static int
342fore200e_reset(struct fore200e* fore200e, int diag)
343{
344 int ok;
345
346 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
347
348 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
349
350 fore200e->bus->reset(fore200e);
351
352 if (diag) {
353 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
354 if (ok == 0) {
355
356 printk(FORE200E "device %s self-test failed\n", fore200e->name);
357 return -ENODEV;
358 }
359
360 printk(FORE200E "device %s self-test passed\n", fore200e->name);
361
362 fore200e->state = FORE200E_STATE_RESET;
363 }
364
365 return 0;
366}
367
368
369static void
370fore200e_shutdown(struct fore200e* fore200e)
371{
372 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
373 fore200e->name, fore200e->phys_base,
374 fore200e_irq_itoa(fore200e->irq));
375
376 if (fore200e->state > FORE200E_STATE_RESET) {
377 /* first, reset the board to prevent further interrupts or data transfers */
378 fore200e_reset(fore200e, 0);
379 }
380
381 /* then, release all allocated resources */
382 switch(fore200e->state) {
383
384 case FORE200E_STATE_COMPLETE:
a2c1aa54 385 kfree(fore200e->stats);
1da177e4 386
ec0d0987 387 /* fall through */
1da177e4
LT
388 case FORE200E_STATE_IRQ:
389 free_irq(fore200e->irq, fore200e->atm_dev);
390
ec0d0987 391 /* fall through */
1da177e4
LT
392 case FORE200E_STATE_ALLOC_BUF:
393 fore200e_free_rx_buf(fore200e);
394
ec0d0987 395 /* fall through */
1da177e4
LT
396 case FORE200E_STATE_INIT_BSQ:
397 fore200e_uninit_bs_queue(fore200e);
398
ec0d0987 399 /* fall through */
1da177e4 400 case FORE200E_STATE_INIT_RXQ:
1335d6fd
CH
401 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
402 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
1da177e4 403
ec0d0987 404 /* fall through */
1da177e4 405 case FORE200E_STATE_INIT_TXQ:
1335d6fd
CH
406 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
407 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
1da177e4 408
ec0d0987 409 /* fall through */
1da177e4 410 case FORE200E_STATE_INIT_CMDQ:
1335d6fd 411 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
1da177e4 412
ec0d0987 413 /* fall through */
1da177e4
LT
414 case FORE200E_STATE_INITIALIZE:
415 /* nothing to do for that state */
416
417 case FORE200E_STATE_START_FW:
418 /* nothing to do for that state */
419
1da177e4
LT
420 case FORE200E_STATE_RESET:
421 /* nothing to do for that state */
422
423 case FORE200E_STATE_MAP:
424 fore200e->bus->unmap(fore200e);
425
ec0d0987 426 /* fall through */
1da177e4
LT
427 case FORE200E_STATE_CONFIGURE:
428 /* nothing to do for that state */
429
430 case FORE200E_STATE_REGISTER:
431 /* XXX shouldn't we *start* by deregistering the device? */
432 atm_dev_deregister(fore200e->atm_dev);
433
434 case FORE200E_STATE_BLANK:
435 /* nothing to do for that state */
436 break;
437 }
438}
439
440
e92481f9 441#ifdef CONFIG_PCI
1da177e4
LT
442
443static u32 fore200e_pca_read(volatile u32 __iomem *addr)
444{
445 /* on big-endian hosts, the board is configured to convert
446 the endianess of slave RAM accesses */
447 return le32_to_cpu(readl(addr));
448}
449
450
451static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
452{
453 /* on big-endian hosts, the board is configured to convert
454 the endianess of slave RAM accesses */
455 writel(cpu_to_le32(val), addr);
456}
457
1da177e4
LT
458static int
459fore200e_pca_irq_check(struct fore200e* fore200e)
460{
461 /* this is a 1 bit register */
462 int irq_posted = readl(fore200e->regs.pca.psr);
463
464#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
465 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
466 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
467 }
468#endif
469
470 return irq_posted;
471}
472
473
474static void
475fore200e_pca_irq_ack(struct fore200e* fore200e)
476{
477 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
478}
479
480
481static void
482fore200e_pca_reset(struct fore200e* fore200e)
483{
484 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
485 fore200e_spin(10);
486 writel(0, fore200e->regs.pca.hcr);
487}
488
489
6c44512d 490static int fore200e_pca_map(struct fore200e* fore200e)
1da177e4
LT
491{
492 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
493
494 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
495
496 if (fore200e->virt_base == NULL) {
497 printk(FORE200E "can't map device %s\n", fore200e->name);
498 return -EFAULT;
499 }
500
501 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
502
503 /* gain access to the PCA specific registers */
504 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
505 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
506 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
507
508 fore200e->state = FORE200E_STATE_MAP;
509 return 0;
510}
511
512
513static void
514fore200e_pca_unmap(struct fore200e* fore200e)
515{
516 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
517
518 if (fore200e->virt_base != NULL)
519 iounmap(fore200e->virt_base);
520}
521
522
6c44512d 523static int fore200e_pca_configure(struct fore200e *fore200e)
1da177e4 524{
aff9d262 525 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
1da177e4
LT
526 u8 master_ctrl, latency;
527
528 DPRINTK(2, "device %s being configured\n", fore200e->name);
529
530 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
531 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
532 return -EIO;
533 }
534
535 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
536
537 master_ctrl = master_ctrl
538#if defined(__BIG_ENDIAN)
539 /* request the PCA board to convert the endianess of slave RAM accesses */
540 | PCA200E_CTRL_CONVERT_ENDIAN
541#endif
542#if 0
543 | PCA200E_CTRL_DIS_CACHE_RD
544 | PCA200E_CTRL_DIS_WRT_INVAL
545 | PCA200E_CTRL_ENA_CONT_REQ_MODE
546 | PCA200E_CTRL_2_CACHE_WRT_INVAL
547#endif
548 | PCA200E_CTRL_LARGE_PCI_BURSTS;
549
550 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
551
552 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
553 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
554 this may impact the performances of other PCI devices on the same bus, though */
555 latency = 192;
556 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
557
558 fore200e->state = FORE200E_STATE_CONFIGURE;
559 return 0;
560}
561
562
563static int __init
564fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
565{
566 struct host_cmdq* cmdq = &fore200e->host_cmdq;
567 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
568 struct prom_opcode opcode;
569 int ok;
570 u32 prom_dma;
571
572 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
573
574 opcode.opcode = OPCODE_GET_PROM;
575 opcode.pad = 0;
576
f3fadcb5
CH
577 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
578 DMA_FROM_DEVICE);
1da177e4
LT
579
580 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
581
582 *entry->status = STATUS_PENDING;
583
584 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
585
586 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
587
588 *entry->status = STATUS_FREE;
589
f3fadcb5 590 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
1da177e4
LT
591
592 if (ok == 0) {
593 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
594 return -EIO;
595 }
596
597#if defined(__BIG_ENDIAN)
598
599#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
600
601 /* MAC address is stored as little-endian */
602 swap_here(&prom->mac_addr[0]);
603 swap_here(&prom->mac_addr[4]);
604#endif
605
606 return 0;
607}
608
609
610static int
611fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
612{
aff9d262 613 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
1da177e4
LT
614
615 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
616 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
617}
618
0efe5523
CH
619static const struct fore200e_bus fore200e_pci_ops = {
620 .model_name = "PCA-200E",
621 .proc_name = "pca200e",
622 .descr_alignment = 32,
623 .buffer_alignment = 4,
624 .status_alignment = 32,
625 .read = fore200e_pca_read,
626 .write = fore200e_pca_write,
0efe5523
CH
627 .configure = fore200e_pca_configure,
628 .map = fore200e_pca_map,
629 .reset = fore200e_pca_reset,
630 .prom_read = fore200e_pca_prom_read,
631 .unmap = fore200e_pca_unmap,
632 .irq_check = fore200e_pca_irq_check,
633 .irq_ack = fore200e_pca_irq_ack,
634 .proc_read = fore200e_pca_proc_read,
635};
e92481f9 636#endif /* CONFIG_PCI */
1da177e4 637
e92481f9 638#ifdef CONFIG_SBUS
1da177e4 639
826b6cfc 640static u32 fore200e_sba_read(volatile u32 __iomem *addr)
1da177e4
LT
641{
642 return sbus_readl(addr);
643}
644
826b6cfc 645static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
1da177e4
LT
646{
647 sbus_writel(val, addr);
648}
649
826b6cfc 650static void fore200e_sba_irq_enable(struct fore200e *fore200e)
1da177e4 651{
826b6cfc
DM
652 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
653 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
1da177e4
LT
654}
655
826b6cfc 656static int fore200e_sba_irq_check(struct fore200e *fore200e)
1da177e4 657{
826b6cfc 658 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
1da177e4
LT
659}
660
826b6cfc 661static void fore200e_sba_irq_ack(struct fore200e *fore200e)
1da177e4 662{
826b6cfc
DM
663 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
664 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
1da177e4
LT
665}
666
826b6cfc 667static void fore200e_sba_reset(struct fore200e *fore200e)
1da177e4 668{
826b6cfc
DM
669 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
670 fore200e_spin(10);
671 fore200e->bus->write(0, fore200e->regs.sba.hcr);
1da177e4
LT
672}
673
826b6cfc 674static int __init fore200e_sba_map(struct fore200e *fore200e)
1da177e4 675{
aff9d262 676 struct platform_device *op = to_platform_device(fore200e->dev);
826b6cfc 677 unsigned int bursts;
1da177e4 678
826b6cfc
DM
679 /* gain access to the SBA specific registers */
680 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
681 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
682 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
683 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
1da177e4 684
826b6cfc
DM
685 if (!fore200e->virt_base) {
686 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
687 return -EFAULT;
688 }
1da177e4 689
826b6cfc 690 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
1da177e4 691
826b6cfc 692 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
1da177e4 693
826b6cfc 694 /* get the supported DVMA burst sizes */
61c7a080 695 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
1da177e4 696
826b6cfc
DM
697 if (sbus_can_dma_64bit())
698 sbus_set_sbus64(&op->dev, bursts);
1da177e4 699
826b6cfc
DM
700 fore200e->state = FORE200E_STATE_MAP;
701 return 0;
1da177e4
LT
702}
703
826b6cfc 704static void fore200e_sba_unmap(struct fore200e *fore200e)
1da177e4 705{
aff9d262 706 struct platform_device *op = to_platform_device(fore200e->dev);
1da177e4 707
826b6cfc
DM
708 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
709 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
710 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
711 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH);
712}
1da177e4 713
826b6cfc 714static int __init fore200e_sba_configure(struct fore200e *fore200e)
1da177e4 715{
826b6cfc
DM
716 fore200e->state = FORE200E_STATE_CONFIGURE;
717 return 0;
1da177e4
LT
718}
719
826b6cfc 720static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
1da177e4 721{
aff9d262 722 struct platform_device *op = to_platform_device(fore200e->dev);
826b6cfc
DM
723 const u8 *prop;
724 int len;
1da177e4 725
61c7a080 726 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
826b6cfc
DM
727 if (!prop)
728 return -ENODEV;
729 memcpy(&prom->mac_addr[4], prop, 4);
1da177e4 730
61c7a080 731 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
826b6cfc
DM
732 if (!prop)
733 return -ENODEV;
734 memcpy(&prom->mac_addr[2], prop, 4);
1da177e4 735
61c7a080
GL
736 prom->serial_number = of_getintprop_default(op->dev.of_node,
737 "serialnumber", 0);
738 prom->hw_revision = of_getintprop_default(op->dev.of_node,
739 "promversion", 0);
1da177e4 740
826b6cfc 741 return 0;
1da177e4
LT
742}
743
826b6cfc 744static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
1da177e4 745{
aff9d262 746 struct platform_device *op = to_platform_device(fore200e->dev);
826b6cfc 747 const struct linux_prom_registers *regs;
1da177e4 748
61c7a080 749 regs = of_get_property(op->dev.of_node, "reg", NULL);
1da177e4 750
826b6cfc 751 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
61c7a080 752 (regs ? regs->which_io : 0), op->dev.of_node->name);
1da177e4 753}
1da177e4 754
0efe5523
CH
755static const struct fore200e_bus fore200e_sbus_ops = {
756 .model_name = "SBA-200E",
757 .proc_name = "sba200e",
758 .descr_alignment = 32,
759 .buffer_alignent = 64,
760 .status_alignment = 32,
761 .read = fore200e_sba_read,
762 .write = fore200e_sba_write,
0efe5523
CH
763 .configure = fore200e_sba_configure,
764 .map = fore200e_sba_map,
765 .reset = fore200e_sba_reset,
766 .prom_read = fore200e_sba_prom_read,
767 .unmap = fore200e_sba_unmap,
768 .irq_enable = fore200e_sba_irq_enable,
769 .irq_check = fore200e_sba_irq_check,
770 .irq_ack = fore200e_sba_irq_ack,
771 .proc_read = fore200e_sba_proc_read,
772};
773#endif /* CONFIG_SBUS */
1da177e4
LT
774
775static void
776fore200e_tx_irq(struct fore200e* fore200e)
777{
778 struct host_txq* txq = &fore200e->host_txq;
779 struct host_txq_entry* entry;
780 struct atm_vcc* vcc;
781 struct fore200e_vc_map* vc_map;
782
783 if (fore200e->host_txq.txing == 0)
784 return;
785
786 for (;;) {
787
788 entry = &txq->host_entry[ txq->tail ];
789
790 if ((*entry->status & STATUS_COMPLETE) == 0) {
791 break;
792 }
793
794 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
795 entry, txq->tail, entry->vc_map, entry->skb);
796
797 /* free copy of misaligned data */
a2c1aa54 798 kfree(entry->data);
1da177e4
LT
799
800 /* remove DMA mapping */
f3fadcb5 801 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
1da177e4
LT
802 DMA_TO_DEVICE);
803
804 vc_map = entry->vc_map;
805
806 /* vcc closed since the time the entry was submitted for tx? */
807 if ((vc_map->vcc == NULL) ||
808 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
809
810 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
811 fore200e->atm_dev->number);
812
813 dev_kfree_skb_any(entry->skb);
814 }
815 else {
816 ASSERT(vc_map->vcc);
817
818 /* vcc closed then immediately re-opened? */
819 if (vc_map->incarn != entry->incarn) {
820
821 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
822 if the same vcc is immediately re-opened, those pending PDUs must
823 not be popped after the completion of their emission, as they refer
824 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
825 would be decremented by the size of the (unrelated) skb, possibly
826 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
827 we thus bind the tx entry to the current incarnation of the vcc
828 when the entry is submitted for tx. When the tx later completes,
829 if the incarnation number of the tx entry does not match the one
830 of the vcc, then this implies that the vcc has been closed then re-opened.
831 we thus just drop the skb here. */
832
833 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
834 fore200e->atm_dev->number);
835
836 dev_kfree_skb_any(entry->skb);
837 }
838 else {
839 vcc = vc_map->vcc;
840 ASSERT(vcc);
841
842 /* notify tx completion */
843 if (vcc->pop) {
844 vcc->pop(vcc, entry->skb);
845 }
846 else {
847 dev_kfree_skb_any(entry->skb);
848 }
14afee4b 849
1da177e4
LT
850 /* check error condition */
851 if (*entry->status & STATUS_ERROR)
852 atomic_inc(&vcc->stats->tx_err);
853 else
854 atomic_inc(&vcc->stats->tx);
855 }
856 }
857
858 *entry->status = STATUS_FREE;
859
860 fore200e->host_txq.txing--;
861
862 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
863 }
864}
865
866
867#ifdef FORE200E_BSQ_DEBUG
868int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
869{
870 struct buffer* buffer;
871 int count = 0;
872
873 buffer = bsq->freebuf;
874 while (buffer) {
875
876 if (buffer->supplied) {
877 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
878 where, scheme, magn, buffer->index);
879 }
880
881 if (buffer->magn != magn) {
882 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
883 where, scheme, magn, buffer->index, buffer->magn);
884 }
885
886 if (buffer->scheme != scheme) {
887 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
888 where, scheme, magn, buffer->index, buffer->scheme);
889 }
890
891 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
892 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
893 where, scheme, magn, buffer->index);
894 }
895
896 count++;
897 buffer = buffer->next;
898 }
899
900 if (count != bsq->freebuf_count) {
901 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
902 where, scheme, magn, count, bsq->freebuf_count);
903 }
904 return 0;
905}
906#endif
907
908
909static void
910fore200e_supply(struct fore200e* fore200e)
911{
912 int scheme, magn, i;
913
914 struct host_bsq* bsq;
915 struct host_bsq_entry* entry;
916 struct buffer* buffer;
917
918 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
919 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
920
921 bsq = &fore200e->host_bsq[ scheme ][ magn ];
922
923#ifdef FORE200E_BSQ_DEBUG
924 bsq_audit(1, bsq, scheme, magn);
925#endif
926 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
927
928 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
929 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
930
931 entry = &bsq->host_entry[ bsq->head ];
932
933 for (i = 0; i < RBD_BLK_SIZE; i++) {
934
935 /* take the first buffer in the free buffer list */
936 buffer = bsq->freebuf;
937 if (!buffer) {
938 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
939 scheme, magn, bsq->freebuf_count);
940 return;
941 }
942 bsq->freebuf = buffer->next;
943
944#ifdef FORE200E_BSQ_DEBUG
945 if (buffer->supplied)
946 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
947 scheme, magn, buffer->index);
948 buffer->supplied = 1;
949#endif
950 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
951 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
952 }
953
954 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
955
956 /* decrease accordingly the number of free rx buffers */
957 bsq->freebuf_count -= RBD_BLK_SIZE;
958
959 *entry->status = STATUS_PENDING;
960 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
961 }
962 }
963 }
964}
965
966
967static int
968fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
969{
970 struct sk_buff* skb;
971 struct buffer* buffer;
972 struct fore200e_vcc* fore200e_vcc;
973 int i, pdu_len = 0;
974#ifdef FORE200E_52BYTE_AAL0_SDU
975 u32 cell_header = 0;
976#endif
977
978 ASSERT(vcc);
979
980 fore200e_vcc = FORE200E_VCC(vcc);
981 ASSERT(fore200e_vcc);
982
983#ifdef FORE200E_52BYTE_AAL0_SDU
984 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
985
986 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
987 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
988 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
989 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
990 rpd->atm_header.clp;
991 pdu_len = 4;
992 }
993#endif
994
995 /* compute total PDU length */
996 for (i = 0; i < rpd->nseg; i++)
997 pdu_len += rpd->rsd[ i ].length;
998
999 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1000 if (skb == NULL) {
1001 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1002
1003 atomic_inc(&vcc->stats->rx_drop);
1004 return -ENOMEM;
1005 }
1006
a61bbcf2 1007 __net_timestamp(skb);
1da177e4
LT
1008
1009#ifdef FORE200E_52BYTE_AAL0_SDU
1010 if (cell_header) {
1011 *((u32*)skb_put(skb, 4)) = cell_header;
1012 }
1013#endif
1014
1015 /* reassemble segments */
1016 for (i = 0; i < rpd->nseg; i++) {
1017
1018 /* rebuild rx buffer address from rsd handle */
1019 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1020
1021 /* Make device DMA transfer visible to CPU. */
f3fadcb5
CH
1022 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1023 rpd->rsd[i].length, DMA_FROM_DEVICE);
1da177e4 1024
59ae1d12 1025 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1da177e4
LT
1026
1027 /* Now let the device get at it again. */
f3fadcb5
CH
1028 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1029 rpd->rsd[i].length, DMA_FROM_DEVICE);
1da177e4
LT
1030 }
1031
1032 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1033
1034 if (pdu_len < fore200e_vcc->rx_min_pdu)
1035 fore200e_vcc->rx_min_pdu = pdu_len;
1036 if (pdu_len > fore200e_vcc->rx_max_pdu)
1037 fore200e_vcc->rx_max_pdu = pdu_len;
1038 fore200e_vcc->rx_pdu++;
1039
1040 /* push PDU */
1041 if (atm_charge(vcc, skb->truesize) == 0) {
1042
1043 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1044 vcc->itf, vcc->vpi, vcc->vci);
1045
1046 dev_kfree_skb_any(skb);
1047
1048 atomic_inc(&vcc->stats->rx_drop);
1049 return -ENOMEM;
1050 }
1051
1da177e4
LT
1052 vcc->push(vcc, skb);
1053 atomic_inc(&vcc->stats->rx);
1054
1da177e4
LT
1055 return 0;
1056}
1057
1058
1059static void
1060fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1061{
1062 struct host_bsq* bsq;
1063 struct buffer* buffer;
1064 int i;
1065
1066 for (i = 0; i < rpd->nseg; i++) {
1067
1068 /* rebuild rx buffer address from rsd handle */
1069 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1070
1071 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1072
1073#ifdef FORE200E_BSQ_DEBUG
1074 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1075
1076 if (buffer->supplied == 0)
1077 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1078 buffer->scheme, buffer->magn, buffer->index);
1079 buffer->supplied = 0;
1080#endif
1081
1082 /* re-insert the buffer into the free buffer list */
1083 buffer->next = bsq->freebuf;
1084 bsq->freebuf = buffer;
1085
1086 /* then increment the number of free rx buffers */
1087 bsq->freebuf_count++;
1088 }
1089}
1090
1091
1092static void
1093fore200e_rx_irq(struct fore200e* fore200e)
1094{
1095 struct host_rxq* rxq = &fore200e->host_rxq;
1096 struct host_rxq_entry* entry;
1097 struct atm_vcc* vcc;
1098 struct fore200e_vc_map* vc_map;
1099
1100 for (;;) {
1101
1102 entry = &rxq->host_entry[ rxq->head ];
1103
1104 /* no more received PDUs */
1105 if ((*entry->status & STATUS_COMPLETE) == 0)
1106 break;
1107
1108 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1109
1110 if ((vc_map->vcc == NULL) ||
1111 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1112
1113 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1114 fore200e->atm_dev->number,
1115 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1116 }
1117 else {
1118 vcc = vc_map->vcc;
1119 ASSERT(vcc);
1120
1121 if ((*entry->status & STATUS_ERROR) == 0) {
1122
1123 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1124 }
1125 else {
1126 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1127 fore200e->atm_dev->number,
1128 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1129 atomic_inc(&vcc->stats->rx_err);
1130 }
1131 }
1132
1133 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1134
1135 fore200e_collect_rpd(fore200e, entry->rpd);
1136
1137 /* rewrite the rpd address to ack the received PDU */
1138 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1139 *entry->status = STATUS_FREE;
1140
1141 fore200e_supply(fore200e);
1142 }
1143}
1144
1145
1146#ifndef FORE200E_USE_TASKLET
1147static void
1148fore200e_irq(struct fore200e* fore200e)
1149{
1150 unsigned long flags;
1151
1152 spin_lock_irqsave(&fore200e->q_lock, flags);
1153 fore200e_rx_irq(fore200e);
1154 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1155
1156 spin_lock_irqsave(&fore200e->q_lock, flags);
1157 fore200e_tx_irq(fore200e);
1158 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1159}
1160#endif
1161
1162
1163static irqreturn_t
7d12e780 1164fore200e_interrupt(int irq, void* dev)
1da177e4
LT
1165{
1166 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1167
1168 if (fore200e->bus->irq_check(fore200e) == 0) {
1169
1170 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1171 return IRQ_NONE;
1172 }
1173 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1174
1175#ifdef FORE200E_USE_TASKLET
1176 tasklet_schedule(&fore200e->tx_tasklet);
1177 tasklet_schedule(&fore200e->rx_tasklet);
1178#else
1179 fore200e_irq(fore200e);
1180#endif
1181
1182 fore200e->bus->irq_ack(fore200e);
1183 return IRQ_HANDLED;
1184}
1185
1186
1187#ifdef FORE200E_USE_TASKLET
1188static void
1189fore200e_tx_tasklet(unsigned long data)
1190{
1191 struct fore200e* fore200e = (struct fore200e*) data;
1192 unsigned long flags;
1193
1194 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1195
1196 spin_lock_irqsave(&fore200e->q_lock, flags);
1197 fore200e_tx_irq(fore200e);
1198 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1199}
1200
1201
1202static void
1203fore200e_rx_tasklet(unsigned long data)
1204{
1205 struct fore200e* fore200e = (struct fore200e*) data;
1206 unsigned long flags;
1207
1208 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1209
1210 spin_lock_irqsave(&fore200e->q_lock, flags);
1211 fore200e_rx_irq((struct fore200e*) data);
1212 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1213}
1214#endif
1215
1216
1217static int
1218fore200e_select_scheme(struct atm_vcc* vcc)
1219{
1220 /* fairly balance the VCs over (identical) buffer schemes */
1221 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1222
1223 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1224 vcc->itf, vcc->vpi, vcc->vci, scheme);
1225
1226 return scheme;
1227}
1228
1229
1230static int
1231fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1232{
1233 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1234 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1235 struct activate_opcode activ_opcode;
1236 struct deactivate_opcode deactiv_opcode;
1237 struct vpvc vpvc;
1238 int ok;
1239 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1240
1241 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1242
1243 if (activate) {
1244 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1245
1246 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1247 activ_opcode.aal = aal;
1248 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1249 activ_opcode.pad = 0;
1250 }
1251 else {
1252 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1253 deactiv_opcode.pad = 0;
1254 }
1255
1256 vpvc.vci = vcc->vci;
1257 vpvc.vpi = vcc->vpi;
1258
1259 *entry->status = STATUS_PENDING;
1260
1261 if (activate) {
1262
1263#ifdef FORE200E_52BYTE_AAL0_SDU
1264 mtu = 48;
1265#endif
1266 /* the MTU is not used by the cp, except in the case of AAL0 */
1267 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1268 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1269 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1270 }
1271 else {
1272 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1273 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1274 }
1275
1276 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1277
1278 *entry->status = STATUS_FREE;
1279
1280 if (ok == 0) {
1281 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1282 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1283 return -EIO;
1284 }
1285
1286 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1287 activate ? "open" : "clos");
1288
1289 return 0;
1290}
1291
1292
1293#define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1294
1295static void
1296fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1297{
1298 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1299
1300 /* compute the data cells to idle cells ratio from the tx PCR */
1301 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1302 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1303 }
1304 else {
1305 /* disable rate control */
1306 rate->data_cells = rate->idle_cells = 0;
1307 }
1308}
1309
1310
1311static int
1312fore200e_open(struct atm_vcc *vcc)
1313{
1314 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1315 struct fore200e_vcc* fore200e_vcc;
1316 struct fore200e_vc_map* vc_map;
1317 unsigned long flags;
1318 int vci = vcc->vci;
1319 short vpi = vcc->vpi;
1320
1321 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1322 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1323
1324 spin_lock_irqsave(&fore200e->q_lock, flags);
1325
1326 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1327 if (vc_map->vcc) {
1328
1329 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1330
1331 printk(FORE200E "VC %d.%d.%d already in use\n",
1332 fore200e->atm_dev->number, vpi, vci);
1333
1334 return -EINVAL;
1335 }
1336
1337 vc_map->vcc = vcc;
1338
1339 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1340
1f8a5fb8 1341 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1da177e4
LT
1342 if (fore200e_vcc == NULL) {
1343 vc_map->vcc = NULL;
1344 return -ENOMEM;
1345 }
1346
1347 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1348 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1349 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1350 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1351 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1352 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1353 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1354
1355 /* pseudo-CBR bandwidth requested? */
1356 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1357
bfbf3c09 1358 mutex_lock(&fore200e->rate_mtx);
1da177e4 1359 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
bfbf3c09 1360 mutex_unlock(&fore200e->rate_mtx);
1da177e4 1361
1f8a5fb8 1362 kfree(fore200e_vcc);
1da177e4
LT
1363 vc_map->vcc = NULL;
1364 return -EAGAIN;
1365 }
1366
1367 /* reserve bandwidth */
1368 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
bfbf3c09 1369 mutex_unlock(&fore200e->rate_mtx);
1da177e4
LT
1370 }
1371
1372 vcc->itf = vcc->dev->number;
1373
1374 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1375 set_bit(ATM_VF_ADDR, &vcc->flags);
1376
1377 vcc->dev_data = fore200e_vcc;
1378
1379 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1380
1381 vc_map->vcc = NULL;
1382
1383 clear_bit(ATM_VF_ADDR, &vcc->flags);
1384 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1385
1386 vcc->dev_data = NULL;
1387
1388 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1389
1f8a5fb8 1390 kfree(fore200e_vcc);
1da177e4
LT
1391 return -EINVAL;
1392 }
1393
1394 /* compute rate control parameters */
1395 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1396
1397 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1398 set_bit(ATM_VF_HASQOS, &vcc->flags);
1399
1400 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1401 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1402 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1403 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1404 }
1405
1406 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1407 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1408 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1409
1410 /* new incarnation of the vcc */
1411 vc_map->incarn = ++fore200e->incarn_count;
1412
1413 /* VC unusable before this flag is set */
1414 set_bit(ATM_VF_READY, &vcc->flags);
1415
1416 return 0;
1417}
1418
1419
1420static void
1421fore200e_close(struct atm_vcc* vcc)
1422{
1423 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1424 struct fore200e_vcc* fore200e_vcc;
1425 struct fore200e_vc_map* vc_map;
1426 unsigned long flags;
1427
1428 ASSERT(vcc);
1429 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1430 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1431
1432 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1433
1434 clear_bit(ATM_VF_READY, &vcc->flags);
1435
1436 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1437
1438 spin_lock_irqsave(&fore200e->q_lock, flags);
1439
1440 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1441
1442 /* the vc is no longer considered as "in use" by fore200e_open() */
1443 vc_map->vcc = NULL;
1444
1445 vcc->itf = vcc->vci = vcc->vpi = 0;
1446
1447 fore200e_vcc = FORE200E_VCC(vcc);
1448 vcc->dev_data = NULL;
1449
1450 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1451
1452 /* release reserved bandwidth, if any */
1453 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1454
bfbf3c09 1455 mutex_lock(&fore200e->rate_mtx);
1da177e4 1456 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
bfbf3c09 1457 mutex_unlock(&fore200e->rate_mtx);
1da177e4
LT
1458
1459 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1460 }
1461
1462 clear_bit(ATM_VF_ADDR, &vcc->flags);
1463 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1464
1465 ASSERT(fore200e_vcc);
1f8a5fb8 1466 kfree(fore200e_vcc);
1da177e4
LT
1467}
1468
1469
1470static int
1471fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1472{
1473 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1474 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1475 struct fore200e_vc_map* vc_map;
1476 struct host_txq* txq = &fore200e->host_txq;
1477 struct host_txq_entry* entry;
1478 struct tpd* tpd;
1479 struct tpd_haddr tpd_haddr;
1480 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1481 int tx_copy = 0;
1482 int tx_len = skb->len;
1483 u32* cell_header = NULL;
1484 unsigned char* skb_data;
1485 int skb_len;
1486 unsigned char* data;
1487 unsigned long flags;
1488
1489 ASSERT(vcc);
1da177e4
LT
1490 ASSERT(fore200e);
1491 ASSERT(fore200e_vcc);
1492
1493 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1494 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1495 dev_kfree_skb_any(skb);
1496 return -EINVAL;
1497 }
1498
1499#ifdef FORE200E_52BYTE_AAL0_SDU
1500 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1501 cell_header = (u32*) skb->data;
1502 skb_data = skb->data + 4; /* skip 4-byte cell header */
1503 skb_len = tx_len = skb->len - 4;
1504
1505 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1506 }
1507 else
1508#endif
1509 {
1510 skb_data = skb->data;
1511 skb_len = skb->len;
1512 }
1513
1514 if (((unsigned long)skb_data) & 0x3) {
1515
1516 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1517 tx_copy = 1;
1518 tx_len = skb_len;
1519 }
1520
1521 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1522
1523 /* this simply NUKES the PCA board */
1524 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1525 tx_copy = 1;
1526 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1527 }
1528
1529 if (tx_copy) {
0e21b225 1530 data = kmalloc(tx_len, GFP_ATOMIC);
1da177e4
LT
1531 if (data == NULL) {
1532 if (vcc->pop) {
1533 vcc->pop(vcc, skb);
1534 }
1535 else {
1536 dev_kfree_skb_any(skb);
1537 }
1538 return -ENOMEM;
1539 }
1540
1541 memcpy(data, skb_data, skb_len);
1542 if (skb_len < tx_len)
1543 memset(data + skb_len, 0x00, tx_len - skb_len);
1544 }
1545 else {
1546 data = skb_data;
1547 }
1548
1549 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1550 ASSERT(vc_map->vcc == vcc);
1551
1552 retry_here:
1553
1554 spin_lock_irqsave(&fore200e->q_lock, flags);
1555
1556 entry = &txq->host_entry[ txq->head ];
1557
1558 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1559
1560 /* try to free completed tx queue entries */
1561 fore200e_tx_irq(fore200e);
1562
1563 if (*entry->status != STATUS_FREE) {
1564
1565 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1566
1567 /* retry once again? */
1568 if (--retry > 0) {
1569 udelay(50);
1570 goto retry_here;
1571 }
1572
1573 atomic_inc(&vcc->stats->tx_err);
1574
1575 fore200e->tx_sat++;
1576 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1577 fore200e->name, fore200e->cp_queues->heartbeat);
1578 if (vcc->pop) {
1579 vcc->pop(vcc, skb);
1580 }
1581 else {
1582 dev_kfree_skb_any(skb);
1583 }
1584
1585 if (tx_copy)
1586 kfree(data);
1587
1588 return -ENOBUFS;
1589 }
1590 }
1591
1592 entry->incarn = vc_map->incarn;
1593 entry->vc_map = vc_map;
1594 entry->skb = skb;
1595 entry->data = tx_copy ? data : NULL;
1596
1597 tpd = entry->tpd;
f3fadcb5
CH
1598 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1599 DMA_TO_DEVICE);
1da177e4
LT
1600 tpd->tsd[ 0 ].length = tx_len;
1601
1602 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1603 txq->txing++;
1604
1605 /* The dma_map call above implies a dma_sync so the device can use it,
1606 * thus no explicit dma_sync call is necessary here.
1607 */
1608
1609 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1610 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1611 tpd->tsd[0].length, skb_len);
1612
1613 if (skb_len < fore200e_vcc->tx_min_pdu)
1614 fore200e_vcc->tx_min_pdu = skb_len;
1615 if (skb_len > fore200e_vcc->tx_max_pdu)
1616 fore200e_vcc->tx_max_pdu = skb_len;
1617 fore200e_vcc->tx_pdu++;
1618
1619 /* set tx rate control information */
1620 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1621 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1622
1623 if (cell_header) {
1624 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1625 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1626 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1627 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1628 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1629 }
1630 else {
1631 /* set the ATM header, common to all cells conveying the PDU */
1632 tpd->atm_header.clp = 0;
1633 tpd->atm_header.plt = 0;
1634 tpd->atm_header.vci = vcc->vci;
1635 tpd->atm_header.vpi = vcc->vpi;
1636 tpd->atm_header.gfc = 0;
1637 }
1638
1639 tpd->spec.length = tx_len;
1640 tpd->spec.nseg = 1;
1641 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1642 tpd->spec.intr = 1;
1643
1644 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1645 tpd_haddr.pad = 0;
1646 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1647
1648 *entry->status = STATUS_PENDING;
1649 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1650
1651 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1652
1653 return 0;
1654}
1655
1656
1657static int
1658fore200e_getstats(struct fore200e* fore200e)
1659{
1660 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1661 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1662 struct stats_opcode opcode;
1663 int ok;
1664 u32 stats_dma_addr;
1665
1666 if (fore200e->stats == NULL) {
0e21b225 1667 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1da177e4
LT
1668 if (fore200e->stats == NULL)
1669 return -ENOMEM;
1670 }
1671
f3fadcb5
CH
1672 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1673 sizeof(struct stats), DMA_FROM_DEVICE);
1da177e4
LT
1674
1675 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1676
1677 opcode.opcode = OPCODE_GET_STATS;
1678 opcode.pad = 0;
1679
1680 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1681
1682 *entry->status = STATUS_PENDING;
1683
1684 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1685
1686 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1687
1688 *entry->status = STATUS_FREE;
1689
f3fadcb5 1690 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1da177e4
LT
1691
1692 if (ok == 0) {
1693 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1694 return -EIO;
1695 }
1696
1697 return 0;
1698}
1699
1700
1701static int
1702fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1703{
1704 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1705
1706 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1707 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1708
1709 return -EINVAL;
1710}
1711
1712
1713static int
b7058842 1714fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1da177e4
LT
1715{
1716 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1717
1718 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1719 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1720
1721 return -EINVAL;
1722}
1723
1724
1725#if 0 /* currently unused */
1726static int
1727fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1728{
1729 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1730 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1731 struct oc3_opcode opcode;
1732 int ok;
1733 u32 oc3_regs_dma_addr;
1734
1735 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1736
1737 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1738
1739 opcode.opcode = OPCODE_GET_OC3;
1740 opcode.reg = 0;
1741 opcode.value = 0;
1742 opcode.mask = 0;
1743
1744 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1745
1746 *entry->status = STATUS_PENDING;
1747
1748 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1749
1750 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1751
1752 *entry->status = STATUS_FREE;
1753
1754 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1755
1756 if (ok == 0) {
1757 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1758 return -EIO;
1759 }
1760
1761 return 0;
1762}
1763#endif
1764
1765
1766static int
1767fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1768{
1769 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1770 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1771 struct oc3_opcode opcode;
1772 int ok;
1773
1774 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1775
1776 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1777
1778 opcode.opcode = OPCODE_SET_OC3;
1779 opcode.reg = reg;
1780 opcode.value = value;
1781 opcode.mask = mask;
1782
1783 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1784
1785 *entry->status = STATUS_PENDING;
1786
1787 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1788
1789 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1790
1791 *entry->status = STATUS_FREE;
1792
1793 if (ok == 0) {
1794 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1795 return -EIO;
1796 }
1797
1798 return 0;
1799}
1800
1801
1802static int
1803fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1804{
1805 u32 mct_value, mct_mask;
1806 int error;
1807
1808 if (!capable(CAP_NET_ADMIN))
1809 return -EPERM;
1810
1811 switch (loop_mode) {
1812
1813 case ATM_LM_NONE:
1814 mct_value = 0;
1815 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1816 break;
1817
1818 case ATM_LM_LOC_PHY:
1819 mct_value = mct_mask = SUNI_MCT_DLE;
1820 break;
1821
1822 case ATM_LM_RMT_PHY:
1823 mct_value = mct_mask = SUNI_MCT_LLE;
1824 break;
1825
1826 default:
1827 return -EINVAL;
1828 }
1829
1830 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1831 if (error == 0)
1832 fore200e->loop_mode = loop_mode;
1833
1834 return error;
1835}
1836
1837
1da177e4
LT
1838static int
1839fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1840{
1841 struct sonet_stats tmp;
1842
1843 if (fore200e_getstats(fore200e) < 0)
1844 return -EIO;
1845
63734a32
AV
1846 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1847 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1848 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1849 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1850 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1851 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1852 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1853 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1854 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1855 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1856 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
1857 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1858 be32_to_cpu(fore200e->stats->aal5.cells_received);
1da177e4
LT
1859
1860 if (arg)
1861 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1862
1863 return 0;
1864}
1865
1866
1867static int
1868fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1869{
1870 struct fore200e* fore200e = FORE200E_DEV(dev);
1871
1872 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1873
1874 switch (cmd) {
1875
1876 case SONET_GETSTAT:
1877 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1878
1879 case SONET_GETDIAG:
1880 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1881
1882 case ATM_SETLOOP:
1883 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1884
1885 case ATM_GETLOOP:
1886 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1887
1888 case ATM_QUERYLOOP:
1889 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1890 }
1891
1892 return -ENOSYS; /* not implemented */
1893}
1894
1895
1896static int
1897fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1898{
1899 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1900 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1901
1902 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1903 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1904 return -EINVAL;
1905 }
1906
1907 DPRINTK(2, "change_qos %d.%d.%d, "
1908 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1909 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1910 "available_cell_rate = %u",
1911 vcc->itf, vcc->vpi, vcc->vci,
1912 fore200e_traffic_class[ qos->txtp.traffic_class ],
1913 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1914 fore200e_traffic_class[ qos->rxtp.traffic_class ],
1915 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1916 flags, fore200e->available_cell_rate);
1917
1918 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1919
bfbf3c09 1920 mutex_lock(&fore200e->rate_mtx);
1da177e4 1921 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
bfbf3c09 1922 mutex_unlock(&fore200e->rate_mtx);
1da177e4
LT
1923 return -EAGAIN;
1924 }
1925
1926 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1927 fore200e->available_cell_rate -= qos->txtp.max_pcr;
1928
bfbf3c09 1929 mutex_unlock(&fore200e->rate_mtx);
1da177e4
LT
1930
1931 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1932
1933 /* update rate control parameters */
1934 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1935
1936 set_bit(ATM_VF_HASQOS, &vcc->flags);
1937
1938 return 0;
1939 }
1940
1941 return -EINVAL;
1942}
1943
1944
6c44512d 1945static int fore200e_irq_request(struct fore200e *fore200e)
1da177e4 1946{
dace1453 1947 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1da177e4
LT
1948
1949 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1950 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1951 return -EBUSY;
1952 }
1953
1954 printk(FORE200E "IRQ %s reserved for device %s\n",
1955 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1956
1957#ifdef FORE200E_USE_TASKLET
1958 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1959 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1960#endif
1961
1962 fore200e->state = FORE200E_STATE_IRQ;
1963 return 0;
1964}
1965
1966
6c44512d 1967static int fore200e_get_esi(struct fore200e *fore200e)
1da177e4 1968{
0e21b225 1969 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1da177e4
LT
1970 int ok, i;
1971
1972 if (!prom)
1973 return -ENOMEM;
1974
1975 ok = fore200e->bus->prom_read(fore200e, prom);
1976 if (ok < 0) {
1f8a5fb8 1977 kfree(prom);
1da177e4
LT
1978 return -EBUSY;
1979 }
1980
3008ab36 1981 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1da177e4
LT
1982 fore200e->name,
1983 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
3008ab36 1984 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1da177e4
LT
1985
1986 for (i = 0; i < ESI_LEN; i++) {
1987 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1988 }
1989
1f8a5fb8 1990 kfree(prom);
1da177e4
LT
1991
1992 return 0;
1993}
1994
1995
6c44512d 1996static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1da177e4
LT
1997{
1998 int scheme, magn, nbr, size, i;
1999
2000 struct host_bsq* bsq;
2001 struct buffer* buffer;
2002
2003 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2004 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2005
2006 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2007
2008 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2009 size = fore200e_rx_buf_size[ scheme ][ magn ];
2010
2011 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2012
2013 /* allocate the array of receive buffers */
6396bb22
KC
2014 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2015 GFP_KERNEL);
1da177e4
LT
2016
2017 if (buffer == NULL)
2018 return -ENOMEM;
2019
2020 bsq->freebuf = NULL;
2021
2022 for (i = 0; i < nbr; i++) {
2023
2024 buffer[ i ].scheme = scheme;
2025 buffer[ i ].magn = magn;
2026#ifdef FORE200E_BSQ_DEBUG
2027 buffer[ i ].index = i;
2028 buffer[ i ].supplied = 0;
2029#endif
2030
2031 /* allocate the receive buffer body */
2032 if (fore200e_chunk_alloc(fore200e,
2033 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2034 DMA_FROM_DEVICE) < 0) {
2035
2036 while (i > 0)
2037 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
1f8a5fb8 2038 kfree(buffer);
1da177e4
LT
2039
2040 return -ENOMEM;
2041 }
2042
2043 /* insert the buffer into the free buffer list */
2044 buffer[ i ].next = bsq->freebuf;
2045 bsq->freebuf = &buffer[ i ];
2046 }
2047 /* all the buffers are free, initially */
2048 bsq->freebuf_count = nbr;
2049
2050#ifdef FORE200E_BSQ_DEBUG
2051 bsq_audit(3, bsq, scheme, magn);
2052#endif
2053 }
2054 }
2055
2056 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2057 return 0;
2058}
2059
2060
6c44512d 2061static int fore200e_init_bs_queue(struct fore200e *fore200e)
1da177e4
LT
2062{
2063 int scheme, magn, i;
2064
2065 struct host_bsq* bsq;
2066 struct cp_bsq_entry __iomem * cp_entry;
2067
2068 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2069 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2070
2071 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2072
2073 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2074
2075 /* allocate and align the array of status words */
1335d6fd 2076 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2077 &bsq->status,
2078 sizeof(enum status),
2079 QUEUE_SIZE_BS,
2080 fore200e->bus->status_alignment) < 0) {
2081 return -ENOMEM;
2082 }
2083
2084 /* allocate and align the array of receive buffer descriptors */
1335d6fd 2085 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2086 &bsq->rbd_block,
2087 sizeof(struct rbd_block),
2088 QUEUE_SIZE_BS,
2089 fore200e->bus->descr_alignment) < 0) {
2090
1335d6fd 2091 fore200e_dma_chunk_free(fore200e, &bsq->status);
1da177e4
LT
2092 return -ENOMEM;
2093 }
2094
2095 /* get the base address of the cp resident buffer supply queue entries */
2096 cp_entry = fore200e->virt_base +
2097 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2098
2099 /* fill the host resident and cp resident buffer supply queue entries */
2100 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2101
2102 bsq->host_entry[ i ].status =
2103 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2104 bsq->host_entry[ i ].rbd_block =
2105 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2106 bsq->host_entry[ i ].rbd_block_dma =
2107 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2108 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2109
2110 *bsq->host_entry[ i ].status = STATUS_FREE;
2111
2112 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2113 &cp_entry[ i ].status_haddr);
2114 }
2115 }
2116 }
2117
2118 fore200e->state = FORE200E_STATE_INIT_BSQ;
2119 return 0;
2120}
2121
2122
6c44512d 2123static int fore200e_init_rx_queue(struct fore200e *fore200e)
1da177e4
LT
2124{
2125 struct host_rxq* rxq = &fore200e->host_rxq;
2126 struct cp_rxq_entry __iomem * cp_entry;
2127 int i;
2128
2129 DPRINTK(2, "receive queue is being initialized\n");
2130
2131 /* allocate and align the array of status words */
1335d6fd 2132 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2133 &rxq->status,
2134 sizeof(enum status),
2135 QUEUE_SIZE_RX,
2136 fore200e->bus->status_alignment) < 0) {
2137 return -ENOMEM;
2138 }
2139
2140 /* allocate and align the array of receive PDU descriptors */
1335d6fd 2141 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2142 &rxq->rpd,
2143 sizeof(struct rpd),
2144 QUEUE_SIZE_RX,
2145 fore200e->bus->descr_alignment) < 0) {
2146
1335d6fd 2147 fore200e_dma_chunk_free(fore200e, &rxq->status);
1da177e4
LT
2148 return -ENOMEM;
2149 }
2150
2151 /* get the base address of the cp resident rx queue entries */
2152 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2153
2154 /* fill the host resident and cp resident rx entries */
2155 for (i=0; i < QUEUE_SIZE_RX; i++) {
2156
2157 rxq->host_entry[ i ].status =
2158 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2159 rxq->host_entry[ i ].rpd =
2160 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2161 rxq->host_entry[ i ].rpd_dma =
2162 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2163 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2164
2165 *rxq->host_entry[ i ].status = STATUS_FREE;
2166
2167 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2168 &cp_entry[ i ].status_haddr);
2169
2170 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2171 &cp_entry[ i ].rpd_haddr);
2172 }
2173
2174 /* set the head entry of the queue */
2175 rxq->head = 0;
2176
2177 fore200e->state = FORE200E_STATE_INIT_RXQ;
2178 return 0;
2179}
2180
2181
6c44512d 2182static int fore200e_init_tx_queue(struct fore200e *fore200e)
1da177e4
LT
2183{
2184 struct host_txq* txq = &fore200e->host_txq;
2185 struct cp_txq_entry __iomem * cp_entry;
2186 int i;
2187
2188 DPRINTK(2, "transmit queue is being initialized\n");
2189
2190 /* allocate and align the array of status words */
1335d6fd 2191 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2192 &txq->status,
2193 sizeof(enum status),
2194 QUEUE_SIZE_TX,
2195 fore200e->bus->status_alignment) < 0) {
2196 return -ENOMEM;
2197 }
2198
2199 /* allocate and align the array of transmit PDU descriptors */
1335d6fd 2200 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2201 &txq->tpd,
2202 sizeof(struct tpd),
2203 QUEUE_SIZE_TX,
2204 fore200e->bus->descr_alignment) < 0) {
2205
1335d6fd 2206 fore200e_dma_chunk_free(fore200e, &txq->status);
1da177e4
LT
2207 return -ENOMEM;
2208 }
2209
2210 /* get the base address of the cp resident tx queue entries */
2211 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2212
2213 /* fill the host resident and cp resident tx entries */
2214 for (i=0; i < QUEUE_SIZE_TX; i++) {
2215
2216 txq->host_entry[ i ].status =
2217 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2218 txq->host_entry[ i ].tpd =
2219 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2220 txq->host_entry[ i ].tpd_dma =
2221 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2222 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2223
2224 *txq->host_entry[ i ].status = STATUS_FREE;
2225
2226 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2227 &cp_entry[ i ].status_haddr);
2228
2229 /* although there is a one-to-one mapping of tx queue entries and tpds,
2230 we do not write here the DMA (physical) base address of each tpd into
2231 the related cp resident entry, because the cp relies on this write
2232 operation to detect that a new pdu has been submitted for tx */
2233 }
2234
2235 /* set the head and tail entries of the queue */
2236 txq->head = 0;
2237 txq->tail = 0;
2238
2239 fore200e->state = FORE200E_STATE_INIT_TXQ;
2240 return 0;
2241}
2242
2243
6c44512d 2244static int fore200e_init_cmd_queue(struct fore200e *fore200e)
1da177e4
LT
2245{
2246 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2247 struct cp_cmdq_entry __iomem * cp_entry;
2248 int i;
2249
2250 DPRINTK(2, "command queue is being initialized\n");
2251
2252 /* allocate and align the array of status words */
1335d6fd 2253 if (fore200e_dma_chunk_alloc(fore200e,
1da177e4
LT
2254 &cmdq->status,
2255 sizeof(enum status),
2256 QUEUE_SIZE_CMD,
2257 fore200e->bus->status_alignment) < 0) {
2258 return -ENOMEM;
2259 }
2260
2261 /* get the base address of the cp resident cmd queue entries */
2262 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2263
2264 /* fill the host resident and cp resident cmd entries */
2265 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2266
2267 cmdq->host_entry[ i ].status =
2268 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2269 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2270
2271 *cmdq->host_entry[ i ].status = STATUS_FREE;
2272
2273 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2274 &cp_entry[ i ].status_haddr);
2275 }
2276
2277 /* set the head entry of the queue */
2278 cmdq->head = 0;
2279
2280 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2281 return 0;
2282}
2283
2284
6c44512d
GKH
2285static void fore200e_param_bs_queue(struct fore200e *fore200e,
2286 enum buffer_scheme scheme,
2287 enum buffer_magn magn, int queue_length,
2288 int pool_size, int supply_blksize)
1da177e4
LT
2289{
2290 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2291
2292 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2293 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2294 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2295 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2296}
2297
2298
6c44512d 2299static int fore200e_initialize(struct fore200e *fore200e)
1da177e4
LT
2300{
2301 struct cp_queues __iomem * cpq;
2302 int ok, scheme, magn;
2303
2304 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2305
bfbf3c09 2306 mutex_init(&fore200e->rate_mtx);
1da177e4
LT
2307 spin_lock_init(&fore200e->q_lock);
2308
2309 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2310
2311 /* enable cp to host interrupts */
2312 fore200e->bus->write(1, &cpq->imask);
2313
2314 if (fore200e->bus->irq_enable)
2315 fore200e->bus->irq_enable(fore200e);
2316
2317 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2318
2319 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2320 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2321 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2322
2323 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2324 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2325
2326 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2327 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2328 fore200e_param_bs_queue(fore200e, scheme, magn,
2329 QUEUE_SIZE_BS,
2330 fore200e_rx_buf_nbr[ scheme ][ magn ],
2331 RBD_BLK_SIZE);
2332
2333 /* issue the initialize command */
2334 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2335 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2336
2337 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2338 if (ok == 0) {
2339 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2340 return -ENODEV;
2341 }
2342
2343 printk(FORE200E "device %s initialized\n", fore200e->name);
2344
2345 fore200e->state = FORE200E_STATE_INITIALIZE;
2346 return 0;
2347}
2348
2349
6c44512d 2350static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
1da177e4
LT
2351{
2352 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2353
2354#if 0
2355 printk("%c", c);
2356#endif
2357 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2358}
2359
2360
6c44512d 2361static int fore200e_monitor_getc(struct fore200e *fore200e)
1da177e4
LT
2362{
2363 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2364 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2365 int c;
2366
2367 while (time_before(jiffies, timeout)) {
2368
2369 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2370
2371 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2372
2373 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2374#if 0
2375 printk("%c", c & 0xFF);
2376#endif
2377 return c & 0xFF;
2378 }
2379 }
2380
2381 return -1;
2382}
2383
2384
6c44512d 2385static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
1da177e4
LT
2386{
2387 while (*str) {
2388
2389 /* the i960 monitor doesn't accept any new character if it has something to say */
2390 while (fore200e_monitor_getc(fore200e) >= 0);
2391
2392 fore200e_monitor_putc(fore200e, *str++);
2393 }
2394
2395 while (fore200e_monitor_getc(fore200e) >= 0);
2396}
2397
e92481f9
CW
2398#ifdef __LITTLE_ENDIAN
2399#define FW_EXT ".bin"
2400#else
2401#define FW_EXT "_ecd.bin2"
2402#endif
1da177e4 2403
6c44512d 2404static int fore200e_load_and_start_fw(struct fore200e *fore200e)
e92481f9
CW
2405{
2406 const struct firmware *firmware;
b65b24d4 2407 const struct fw_header *fw_header;
6f75a9b6
CW
2408 const __le32 *fw_data;
2409 u32 fw_size;
e92481f9
CW
2410 u32 __iomem *load_addr;
2411 char buf[48];
aff9d262 2412 int err;
e92481f9
CW
2413
2414 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
aff9d262 2415 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
fcffd0d8 2416 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
e92481f9
CW
2417 return err;
2418 }
2419
b65b24d4 2420 fw_data = (const __le32 *)firmware->data;
e92481f9 2421 fw_size = firmware->size / sizeof(u32);
b65b24d4 2422 fw_header = (const struct fw_header *)firmware->data;
e92481f9
CW
2423 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2424
2425 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2426 fore200e->name, load_addr, fw_size);
2427
2428 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2429 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2430 goto release;
2431 }
2432
2433 for (; fw_size--; fw_data++, load_addr++)
2434 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
1da177e4
LT
2435
2436 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2437
2438#if defined(__sparc_v9__)
2439 /* reported to be required by SBA cards on some sparc64 hosts */
2440 fore200e_spin(100);
2441#endif
2442
e92481f9
CW
2443 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2444 fore200e_monitor_puts(fore200e, buf);
1da177e4 2445
e92481f9 2446 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
1da177e4 2447 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
e92481f9 2448 goto release;
1da177e4
LT
2449 }
2450
2451 printk(FORE200E "device %s firmware started\n", fore200e->name);
2452
2453 fore200e->state = FORE200E_STATE_START_FW;
e92481f9 2454 err = 0;
1da177e4 2455
e92481f9
CW
2456release:
2457 release_firmware(firmware);
2458 return err;
1da177e4
LT
2459}
2460
2461
6c44512d 2462static int fore200e_register(struct fore200e *fore200e, struct device *parent)
1da177e4
LT
2463{
2464 struct atm_dev* atm_dev;
2465
2466 DPRINTK(2, "device %s being registered\n", fore200e->name);
2467
d9ca676b
DW
2468 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2469 -1, NULL);
1da177e4
LT
2470 if (atm_dev == NULL) {
2471 printk(FORE200E "unable to register device %s\n", fore200e->name);
2472 return -ENODEV;
2473 }
2474
2475 atm_dev->dev_data = fore200e;
2476 fore200e->atm_dev = atm_dev;
2477
2478 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2479 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2480
2481 fore200e->available_cell_rate = ATM_OC3_PCR;
2482
2483 fore200e->state = FORE200E_STATE_REGISTER;
2484 return 0;
2485}
2486
2487
6c44512d 2488static int fore200e_init(struct fore200e *fore200e, struct device *parent)
1da177e4 2489{
d9ca676b 2490 if (fore200e_register(fore200e, parent) < 0)
1da177e4
LT
2491 return -ENODEV;
2492
2493 if (fore200e->bus->configure(fore200e) < 0)
2494 return -ENODEV;
2495
2496 if (fore200e->bus->map(fore200e) < 0)
2497 return -ENODEV;
2498
2499 if (fore200e_reset(fore200e, 1) < 0)
2500 return -ENODEV;
2501
e92481f9 2502 if (fore200e_load_and_start_fw(fore200e) < 0)
1da177e4
LT
2503 return -ENODEV;
2504
2505 if (fore200e_initialize(fore200e) < 0)
2506 return -ENODEV;
2507
2508 if (fore200e_init_cmd_queue(fore200e) < 0)
2509 return -ENOMEM;
2510
2511 if (fore200e_init_tx_queue(fore200e) < 0)
2512 return -ENOMEM;
2513
2514 if (fore200e_init_rx_queue(fore200e) < 0)
2515 return -ENOMEM;
2516
2517 if (fore200e_init_bs_queue(fore200e) < 0)
2518 return -ENOMEM;
2519
2520 if (fore200e_alloc_rx_buf(fore200e) < 0)
2521 return -ENOMEM;
2522
2523 if (fore200e_get_esi(fore200e) < 0)
2524 return -EIO;
2525
2526 if (fore200e_irq_request(fore200e) < 0)
2527 return -EBUSY;
2528
2529 fore200e_supply(fore200e);
c027f5f9 2530
1da177e4
LT
2531 /* all done, board initialization is now complete */
2532 fore200e->state = FORE200E_STATE_COMPLETE;
2533 return 0;
2534}
2535
826b6cfc 2536#ifdef CONFIG_SBUS
b1608d69 2537static const struct of_device_id fore200e_sba_match[];
6c44512d 2538static int fore200e_sba_probe(struct platform_device *op)
826b6cfc 2539{
b1608d69 2540 const struct of_device_id *match;
826b6cfc
DM
2541 struct fore200e *fore200e;
2542 static int index = 0;
2543 int err;
2544
b1608d69
GL
2545 match = of_match_device(fore200e_sba_match, &op->dev);
2546 if (!match)
1c48a5c9 2547 return -EINVAL;
1c48a5c9 2548
826b6cfc
DM
2549 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2550 if (!fore200e)
2551 return -ENOMEM;
2552
0efe5523 2553 fore200e->bus = &fore200e_sbus_ops;
aff9d262 2554 fore200e->dev = &op->dev;
1636f8ac 2555 fore200e->irq = op->archdata.irqs[0];
826b6cfc
DM
2556 fore200e->phys_base = op->resource[0].start;
2557
0efe5523 2558 sprintf(fore200e->name, "SBA-200E-%d", index);
826b6cfc 2559
d9ca676b 2560 err = fore200e_init(fore200e, &op->dev);
826b6cfc
DM
2561 if (err < 0) {
2562 fore200e_shutdown(fore200e);
2563 kfree(fore200e);
2564 return err;
2565 }
2566
2567 index++;
2568 dev_set_drvdata(&op->dev, fore200e);
2569
2570 return 0;
2571}
2572
6c44512d 2573static int fore200e_sba_remove(struct platform_device *op)
826b6cfc
DM
2574{
2575 struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2576
2577 fore200e_shutdown(fore200e);
2578 kfree(fore200e);
2579
2580 return 0;
2581}
2582
fd098316 2583static const struct of_device_id fore200e_sba_match[] = {
826b6cfc
DM
2584 {
2585 .name = SBA200E_PROM_NAME,
826b6cfc
DM
2586 },
2587 {},
2588};
2589MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2590
1c48a5c9 2591static struct platform_driver fore200e_sba_driver = {
4018294b
GL
2592 .driver = {
2593 .name = "fore_200e",
4018294b
GL
2594 .of_match_table = fore200e_sba_match,
2595 },
826b6cfc 2596 .probe = fore200e_sba_probe,
6c44512d 2597 .remove = fore200e_sba_remove,
826b6cfc
DM
2598};
2599#endif
2600
e92481f9 2601#ifdef CONFIG_PCI
6c44512d
GKH
2602static int fore200e_pca_detect(struct pci_dev *pci_dev,
2603 const struct pci_device_id *pci_ent)
1da177e4 2604{
1da177e4
LT
2605 struct fore200e* fore200e;
2606 int err = 0;
2607 static int index = 0;
2608
2609 if (pci_enable_device(pci_dev)) {
2610 err = -EINVAL;
2611 goto out;
2612 }
ede58ef2
C
2613
2614 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2615 err = -EINVAL;
2616 goto out;
2617 }
1da177e4 2618
1f8a5fb8 2619 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
1da177e4
LT
2620 if (fore200e == NULL) {
2621 err = -ENOMEM;
2622 goto out_disable;
2623 }
2624
0efe5523 2625 fore200e->bus = &fore200e_pci_ops;
aff9d262 2626 fore200e->dev = &pci_dev->dev;
1da177e4
LT
2627 fore200e->irq = pci_dev->irq;
2628 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2629
0efe5523 2630 sprintf(fore200e->name, "PCA-200E-%d", index - 1);
1da177e4
LT
2631
2632 pci_set_master(pci_dev);
2633
0efe5523 2634 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
1da177e4
LT
2635 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2636
0efe5523 2637 sprintf(fore200e->name, "PCA-200E-%d", index);
1da177e4 2638
d9ca676b 2639 err = fore200e_init(fore200e, &pci_dev->dev);
1da177e4
LT
2640 if (err < 0) {
2641 fore200e_shutdown(fore200e);
2642 goto out_free;
2643 }
2644
2645 ++index;
2646 pci_set_drvdata(pci_dev, fore200e);
2647
2648out:
2649 return err;
2650
2651out_free:
2652 kfree(fore200e);
2653out_disable:
2654 pci_disable_device(pci_dev);
2655 goto out;
2656}
2657
2658
6c44512d 2659static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
1da177e4
LT
2660{
2661 struct fore200e *fore200e;
2662
2663 fore200e = pci_get_drvdata(pci_dev);
2664
1da177e4
LT
2665 fore200e_shutdown(fore200e);
2666 kfree(fore200e);
2667 pci_disable_device(pci_dev);
2668}
2669
2670
d5c5665d 2671static const struct pci_device_id fore200e_pca_tbl[] = {
0efe5523 2672 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
1da177e4
LT
2673 { 0, }
2674};
2675
2676MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2677
2678static struct pci_driver fore200e_pca_driver = {
2679 .name = "fore_200e",
2680 .probe = fore200e_pca_detect,
6c44512d 2681 .remove = fore200e_pca_remove_one,
1da177e4
LT
2682 .id_table = fore200e_pca_tbl,
2683};
2684#endif
2685
826b6cfc 2686static int __init fore200e_module_init(void)
1da177e4 2687{
74e8ce34 2688 int err = 0;
1da177e4 2689
826b6cfc 2690 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
1da177e4 2691
826b6cfc 2692#ifdef CONFIG_SBUS
1c48a5c9 2693 err = platform_driver_register(&fore200e_sba_driver);
826b6cfc
DM
2694 if (err)
2695 return err;
2696#endif
1da177e4 2697
e92481f9 2698#ifdef CONFIG_PCI
826b6cfc 2699 err = pci_register_driver(&fore200e_pca_driver);
1da177e4
LT
2700#endif
2701
826b6cfc
DM
2702#ifdef CONFIG_SBUS
2703 if (err)
1c48a5c9 2704 platform_driver_unregister(&fore200e_sba_driver);
826b6cfc 2705#endif
1da177e4 2706
826b6cfc 2707 return err;
1da177e4
LT
2708}
2709
826b6cfc 2710static void __exit fore200e_module_cleanup(void)
1da177e4 2711{
e92481f9 2712#ifdef CONFIG_PCI
826b6cfc
DM
2713 pci_unregister_driver(&fore200e_pca_driver);
2714#endif
2715#ifdef CONFIG_SBUS
1c48a5c9 2716 platform_driver_unregister(&fore200e_sba_driver);
1da177e4 2717#endif
1da177e4
LT
2718}
2719
1da177e4
LT
2720static int
2721fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2722{
2723 struct fore200e* fore200e = FORE200E_DEV(dev);
2724 struct fore200e_vcc* fore200e_vcc;
2725 struct atm_vcc* vcc;
2726 int i, len, left = *pos;
2727 unsigned long flags;
2728
2729 if (!left--) {
2730
2731 if (fore200e_getstats(fore200e) < 0)
2732 return -EIO;
2733
2734 len = sprintf(page,"\n"
2735 " device:\n"
2736 " internal name:\t\t%s\n", fore200e->name);
2737
2738 /* print bus-specific information */
2739 if (fore200e->bus->proc_read)
2740 len += fore200e->bus->proc_read(fore200e, page + len);
2741
2742 len += sprintf(page + len,
2743 " interrupt line:\t\t%s\n"
2744 " physical base address:\t0x%p\n"
2745 " virtual base address:\t0x%p\n"
3008ab36 2746 " factory address (ESI):\t%pM\n"
1da177e4
LT
2747 " board serial number:\t\t%d\n\n",
2748 fore200e_irq_itoa(fore200e->irq),
2749 (void*)fore200e->phys_base,
2750 fore200e->virt_base,
3008ab36 2751 fore200e->esi,
1da177e4
LT
2752 fore200e->esi[4] * 256 + fore200e->esi[5]);
2753
2754 return len;
2755 }
2756
2757 if (!left--)
2758 return sprintf(page,
2759 " free small bufs, scheme 1:\t%d\n"
2760 " free large bufs, scheme 1:\t%d\n"
2761 " free small bufs, scheme 2:\t%d\n"
2762 " free large bufs, scheme 2:\t%d\n",
2763 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2764 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2765 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2766 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2767
2768 if (!left--) {
2769 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2770
2771 len = sprintf(page,"\n\n"
2772 " cell processor:\n"
2773 " heartbeat state:\t\t");
2774
2775 if (hb >> 16 != 0xDEAD)
2776 len += sprintf(page + len, "0x%08x\n", hb);
2777 else
2778 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2779
2780 return len;
2781 }
2782
2783 if (!left--) {
2784 static const char* media_name[] = {
2785 "unshielded twisted pair",
2786 "multimode optical fiber ST",
2787 "multimode optical fiber SC",
2788 "single-mode optical fiber ST",
2789 "single-mode optical fiber SC",
2790 "unknown"
2791 };
2792
2793 static const char* oc3_mode[] = {
2794 "normal operation",
2795 "diagnostic loopback",
2796 "line loopback",
2797 "unknown"
2798 };
2799
2800 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2801 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2802 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2803 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2804 u32 oc3_index;
2805
e0c5567d 2806 if (media_index > 4)
2807 media_index = 5;
1da177e4
LT
2808
2809 switch (fore200e->loop_mode) {
2810 case ATM_LM_NONE: oc3_index = 0;
2811 break;
2812 case ATM_LM_LOC_PHY: oc3_index = 1;
2813 break;
2814 case ATM_LM_RMT_PHY: oc3_index = 2;
2815 break;
2816 default: oc3_index = 3;
2817 }
2818
2819 return sprintf(page,
2820 " firmware release:\t\t%d.%d.%d\n"
2821 " monitor release:\t\t%d.%d\n"
2822 " media type:\t\t\t%s\n"
2823 " OC-3 revision:\t\t0x%x\n"
2824 " OC-3 mode:\t\t\t%s",
2825 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2826 mon960_release >> 16, mon960_release << 16 >> 16,
2827 media_name[ media_index ],
2828 oc3_revision,
2829 oc3_mode[ oc3_index ]);
2830 }
2831
2832 if (!left--) {
2833 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2834
2835 return sprintf(page,
2836 "\n\n"
2837 " monitor:\n"
2838 " version number:\t\t%d\n"
2839 " boot status word:\t\t0x%08x\n",
2840 fore200e->bus->read(&cp_monitor->mon_version),
2841 fore200e->bus->read(&cp_monitor->bstat));
2842 }
2843
2844 if (!left--)
2845 return sprintf(page,
2846 "\n"
2847 " device statistics:\n"
2848 " 4b5b:\n"
2849 " crc_header_errors:\t\t%10u\n"
2850 " framing_errors:\t\t%10u\n",
63734a32
AV
2851 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2852 be32_to_cpu(fore200e->stats->phy.framing_errors));
1da177e4
LT
2853
2854 if (!left--)
2855 return sprintf(page, "\n"
2856 " OC-3:\n"
2857 " section_bip8_errors:\t%10u\n"
2858 " path_bip8_errors:\t\t%10u\n"
2859 " line_bip24_errors:\t\t%10u\n"
2860 " line_febe_errors:\t\t%10u\n"
2861 " path_febe_errors:\t\t%10u\n"
2862 " corr_hcs_errors:\t\t%10u\n"
2863 " ucorr_hcs_errors:\t\t%10u\n",
63734a32
AV
2864 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2865 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2866 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2867 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2868 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2869 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2870 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
1da177e4
LT
2871
2872 if (!left--)
2873 return sprintf(page,"\n"
2874 " ATM:\t\t\t\t cells\n"
2875 " TX:\t\t\t%10u\n"
2876 " RX:\t\t\t%10u\n"
2877 " vpi out of range:\t\t%10u\n"
2878 " vpi no conn:\t\t%10u\n"
2879 " vci out of range:\t\t%10u\n"
2880 " vci no conn:\t\t%10u\n",
63734a32
AV
2881 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2882 be32_to_cpu(fore200e->stats->atm.cells_received),
2883 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2884 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2885 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2886 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
1da177e4
LT
2887
2888 if (!left--)
2889 return sprintf(page,"\n"
2890 " AAL0:\t\t\t cells\n"
2891 " TX:\t\t\t%10u\n"
2892 " RX:\t\t\t%10u\n"
2893 " dropped:\t\t\t%10u\n",
63734a32
AV
2894 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2895 be32_to_cpu(fore200e->stats->aal0.cells_received),
2896 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
1da177e4
LT
2897
2898 if (!left--)
2899 return sprintf(page,"\n"
2900 " AAL3/4:\n"
2901 " SAR sublayer:\t\t cells\n"
2902 " TX:\t\t\t%10u\n"
2903 " RX:\t\t\t%10u\n"
2904 " dropped:\t\t\t%10u\n"
2905 " CRC errors:\t\t%10u\n"
2906 " protocol errors:\t\t%10u\n\n"
2907 " CS sublayer:\t\t PDUs\n"
2908 " TX:\t\t\t%10u\n"
2909 " RX:\t\t\t%10u\n"
2910 " dropped:\t\t\t%10u\n"
2911 " protocol errors:\t\t%10u\n",
63734a32
AV
2912 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2913 be32_to_cpu(fore200e->stats->aal34.cells_received),
2914 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2915 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2916 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2917 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2918 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2919 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2920 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
1da177e4
LT
2921
2922 if (!left--)
2923 return sprintf(page,"\n"
2924 " AAL5:\n"
2925 " SAR sublayer:\t\t cells\n"
2926 " TX:\t\t\t%10u\n"
2927 " RX:\t\t\t%10u\n"
2928 " dropped:\t\t\t%10u\n"
2929 " congestions:\t\t%10u\n\n"
2930 " CS sublayer:\t\t PDUs\n"
2931 " TX:\t\t\t%10u\n"
2932 " RX:\t\t\t%10u\n"
2933 " dropped:\t\t\t%10u\n"
2934 " CRC errors:\t\t%10u\n"
2935 " protocol errors:\t\t%10u\n",
63734a32
AV
2936 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2937 be32_to_cpu(fore200e->stats->aal5.cells_received),
2938 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2939 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2940 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2941 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2942 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2943 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2944 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
1da177e4
LT
2945
2946 if (!left--)
2947 return sprintf(page,"\n"
2948 " AUX:\t\t allocation failures\n"
2949 " small b1:\t\t\t%10u\n"
2950 " large b1:\t\t\t%10u\n"
2951 " small b2:\t\t\t%10u\n"
2952 " large b2:\t\t\t%10u\n"
2953 " RX PDUs:\t\t\t%10u\n"
2954 " TX PDUs:\t\t\t%10lu\n",
63734a32
AV
2955 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2956 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2957 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2958 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2959 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
1da177e4
LT
2960 fore200e->tx_sat);
2961
2962 if (!left--)
2963 return sprintf(page,"\n"
2964 " receive carrier:\t\t\t%s\n",
2965 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2966
2967 if (!left--) {
2968 return sprintf(page,"\n"
2969 " VCCs:\n address VPI VCI AAL "
2970 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
2971 }
2972
2973 for (i = 0; i < NBR_CONNECT; i++) {
2974
2975 vcc = fore200e->vc_map[i].vcc;
2976
2977 if (vcc == NULL)
2978 continue;
2979
2980 spin_lock_irqsave(&fore200e->q_lock, flags);
2981
2982 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2983
2984 fore200e_vcc = FORE200E_VCC(vcc);
2985 ASSERT(fore200e_vcc);
2986
2987 len = sprintf(page,
22dac9f1
CIK
2988 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
2989 vcc,
1da177e4
LT
2990 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2991 fore200e_vcc->tx_pdu,
2992 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2993 fore200e_vcc->tx_max_pdu,
2994 fore200e_vcc->rx_pdu,
2995 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2996 fore200e_vcc->rx_max_pdu);
2997
2998 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2999 return len;
3000 }
3001
3002 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3003 }
3004
3005 return 0;
3006}
3007
3008module_init(fore200e_module_init);
3009module_exit(fore200e_module_cleanup);
3010
3011
0efe5523 3012static const struct atmdev_ops fore200e_ops = {
1da177e4
LT
3013 .open = fore200e_open,
3014 .close = fore200e_close,
3015 .ioctl = fore200e_ioctl,
3016 .getsockopt = fore200e_getsockopt,
3017 .setsockopt = fore200e_setsockopt,
3018 .send = fore200e_send,
3019 .change_qos = fore200e_change_qos,
3020 .proc_read = fore200e_proc_read,
3021 .owner = THIS_MODULE
3022};
3023
1da177e4 3024MODULE_LICENSE("GPL");
6f75a9b6
CW
3025#ifdef CONFIG_PCI
3026#ifdef __LITTLE_ENDIAN__
3027MODULE_FIRMWARE("pca200e.bin");
3028#else
3029MODULE_FIRMWARE("pca200e_ecd.bin2");
3030#endif
3031#endif /* CONFIG_PCI */
3032#ifdef CONFIG_SBUS
3033MODULE_FIRMWARE("sba200e_ecd.bin2");
1da177e4 3034#endif