2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
36 static int __init
ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev
*, const struct pci_device_id
*);
38 static void ca91cx42_remove(struct pci_dev
*);
39 static void __exit
ca91cx42_exit(void);
41 struct vme_bridge
*ca91cx42_bridge
;
42 wait_queue_head_t dma_queue
;
43 wait_queue_head_t iack_queue
;
44 wait_queue_head_t lm_queue
;
45 wait_queue_head_t mbox_queue
;
47 void (*lm_callback
[4])(int); /* Called in interrupt handler, be careful! */
51 struct mutex vme_rmw
; /* Only one RMW cycle at a time */
52 struct mutex vme_int
; /*
53 * Only one VME interrupt can be
54 * generated at a time, provide locking
57 static char driver_name
[] = "vme_ca91cx42";
59 static struct pci_device_id ca91cx42_ids
[] = {
60 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_CA91C142
) },
64 static struct pci_driver ca91cx42_driver
= {
66 .id_table
= ca91cx42_ids
,
67 .probe
= ca91cx42_probe
,
68 .remove
= ca91cx42_remove
,
71 static u32
ca91cx42_DMA_irqhandler(void)
75 return CA91CX42_LINT_DMA
;
78 static u32
ca91cx42_LM_irqhandler(u32 stat
)
83 for (i
= 0; i
< 4; i
++) {
84 if (stat
& CA91CX42_LINT_LM
[i
]) {
85 /* We only enable interrupts if the callback is set */
87 serviced
|= CA91CX42_LINT_LM
[i
];
94 /* XXX This needs to be split into 4 queues */
95 static u32
ca91cx42_MB_irqhandler(int mbox_mask
)
99 return CA91CX42_LINT_MBOX
;
102 static u32
ca91cx42_IACK_irqhandler(void)
104 wake_up(&iack_queue
);
106 return CA91CX42_LINT_SW_IACK
;
110 int ca91cx42_bus_error_chk(int clrflag
)
113 tmp
= ioread32(ca91cx42_bridge
->base
+ PCI_COMMAND
);
114 if (tmp
& 0x08000000) { /* S_TA is Set */
116 iowrite32(tmp
| 0x08000000,
117 ca91cx42_bridge
->base
+ PCI_COMMAND
);
124 static u32
ca91cx42_VERR_irqhandler(void)
128 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
130 if (!(val
& 0x00000800)) {
131 printk(KERN_ERR
"ca91c042: ca91cx42_VERR_irqhandler DMA Read "
132 "Error DGCS=%08X\n", val
);
135 return CA91CX42_LINT_VERR
;
138 static u32
ca91cx42_LERR_irqhandler(void)
142 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
144 if (!(val
& 0x00000800)) {
145 printk(KERN_ERR
"ca91c042: ca91cx42_LERR_irqhandler DMA Read "
146 "Error DGCS=%08X\n", val
);
150 return CA91CX42_LINT_LERR
;
154 static u32
ca91cx42_VIRQ_irqhandler(int stat
)
156 int vec
, i
, serviced
= 0;
158 for (i
= 7; i
> 0; i
--) {
159 if (stat
& (1 << i
)) {
160 vec
= ioread32(ca91cx42_bridge
->base
+
161 CA91CX42_V_STATID
[i
]) & 0xff;
163 vme_irq_handler(ca91cx42_bridge
, i
, vec
);
165 serviced
|= (1 << i
);
172 static irqreturn_t
ca91cx42_irqhandler(int irq
, void *dev_id
)
174 u32 stat
, enable
, serviced
= 0;
176 if (dev_id
!= ca91cx42_bridge
->base
)
179 enable
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
180 stat
= ioread32(ca91cx42_bridge
->base
+ LINT_STAT
);
182 /* Only look at unmasked interrupts */
188 if (stat
& CA91CX42_LINT_DMA
)
189 serviced
|= ca91cx42_DMA_irqhandler();
190 if (stat
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
192 serviced
|= ca91cx42_LM_irqhandler(stat
);
193 if (stat
& CA91CX42_LINT_MBOX
)
194 serviced
|= ca91cx42_MB_irqhandler(stat
);
195 if (stat
& CA91CX42_LINT_SW_IACK
)
196 serviced
|= ca91cx42_IACK_irqhandler();
197 if (stat
& CA91CX42_LINT_VERR
)
198 serviced
|= ca91cx42_VERR_irqhandler();
199 if (stat
& CA91CX42_LINT_LERR
)
200 serviced
|= ca91cx42_LERR_irqhandler();
201 if (stat
& (CA91CX42_LINT_VIRQ1
| CA91CX42_LINT_VIRQ2
|
202 CA91CX42_LINT_VIRQ3
| CA91CX42_LINT_VIRQ4
|
203 CA91CX42_LINT_VIRQ5
| CA91CX42_LINT_VIRQ6
|
204 CA91CX42_LINT_VIRQ7
))
205 serviced
|= ca91cx42_VIRQ_irqhandler(stat
);
207 /* Clear serviced interrupts */
208 iowrite32(stat
, ca91cx42_bridge
->base
+ LINT_STAT
);
213 static int ca91cx42_irq_init(struct vme_bridge
*bridge
)
216 struct pci_dev
*pdev
;
219 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
221 /* Initialise list for VME bus errors */
222 INIT_LIST_HEAD(&(bridge
->vme_errors
));
224 mutex_init(&(bridge
->irq_mtx
));
226 /* Disable interrupts from PCI to VME */
227 iowrite32(0, bridge
->base
+ VINT_EN
);
229 /* Disable PCI interrupts */
230 iowrite32(0, bridge
->base
+ LINT_EN
);
231 /* Clear Any Pending PCI Interrupts */
232 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
234 result
= request_irq(pdev
->irq
, ca91cx42_irqhandler
, IRQF_SHARED
,
237 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
242 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
243 iowrite32(0, bridge
->base
+ LINT_MAP0
);
244 iowrite32(0, bridge
->base
+ LINT_MAP1
);
245 iowrite32(0, bridge
->base
+ LINT_MAP2
);
247 /* Enable DMA, mailbox & LM Interrupts */
248 tmp
= CA91CX42_LINT_MBOX3
| CA91CX42_LINT_MBOX2
| CA91CX42_LINT_MBOX1
|
249 CA91CX42_LINT_MBOX0
| CA91CX42_LINT_SW_IACK
|
250 CA91CX42_LINT_VERR
| CA91CX42_LINT_LERR
| CA91CX42_LINT_DMA
;
252 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
257 static void ca91cx42_irq_exit(struct pci_dev
*pdev
)
259 /* Disable interrupts from PCI to VME */
260 iowrite32(0, ca91cx42_bridge
->base
+ VINT_EN
);
262 /* Disable PCI interrupts */
263 iowrite32(0, ca91cx42_bridge
->base
+ LINT_EN
);
264 /* Clear Any Pending PCI Interrupts */
265 iowrite32(0x00FFFFFF, ca91cx42_bridge
->base
+ LINT_STAT
);
267 free_irq(pdev
->irq
, pdev
);
271 * Set up an VME interrupt
273 void ca91cx42_irq_set(int level
, int state
, int sync
)
276 struct pci_dev
*pdev
;
279 /* Enable IRQ level */
280 tmp
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
283 tmp
&= ~CA91CX42_LINT_VIRQ
[level
];
285 tmp
|= CA91CX42_LINT_VIRQ
[level
];
287 iowrite32(tmp
, ca91cx42_bridge
->base
+ LINT_EN
);
289 if ((state
== 0) && (sync
!= 0)) {
290 pdev
= container_of(ca91cx42_bridge
->parent
, struct pci_dev
,
293 synchronize_irq(pdev
->irq
);
297 int ca91cx42_irq_generate(int level
, int statid
)
301 /* Universe can only generate even vectors */
305 mutex_lock(&(vme_int
));
307 tmp
= ioread32(ca91cx42_bridge
->base
+ VINT_EN
);
310 iowrite32(statid
<< 24, ca91cx42_bridge
->base
+ STATID
);
312 /* Assert VMEbus IRQ */
313 tmp
= tmp
| (1 << (level
+ 24));
314 iowrite32(tmp
, ca91cx42_bridge
->base
+ VINT_EN
);
317 wait_event_interruptible(iack_queue
, 0);
319 /* Return interrupt to low state */
320 tmp
= ioread32(ca91cx42_bridge
->base
+ VINT_EN
);
321 tmp
= tmp
& ~(1 << (level
+ 24));
322 iowrite32(tmp
, ca91cx42_bridge
->base
+ VINT_EN
);
324 mutex_unlock(&(vme_int
));
329 int ca91cx42_slave_set(struct vme_slave_resource
*image
, int enabled
,
330 unsigned long long vme_base
, unsigned long long size
,
331 dma_addr_t pci_base
, vme_address_t aspace
, vme_cycle_t cycle
)
333 unsigned int i
, addr
= 0, granularity
= 0;
334 unsigned int temp_ctl
= 0;
335 unsigned int vme_bound
, pci_offset
;
341 addr
|= CA91CX42_VSI_CTL_VAS_A16
;
344 addr
|= CA91CX42_VSI_CTL_VAS_A24
;
347 addr
|= CA91CX42_VSI_CTL_VAS_A32
;
350 addr
|= CA91CX42_VSI_CTL_VAS_USER1
;
353 addr
|= CA91CX42_VSI_CTL_VAS_USER2
;
360 printk(KERN_ERR
"Invalid address space\n");
366 * Bound address is a valid address for the window, adjust
369 vme_bound
= vme_base
+ size
- granularity
;
370 pci_offset
= pci_base
- vme_base
;
372 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
373 * too big for registers
376 if ((i
== 0) || (i
== 4))
377 granularity
= 0x1000;
379 granularity
= 0x10000;
381 if (vme_base
& (granularity
- 1)) {
382 printk(KERN_ERR
"Invalid VME base alignment\n");
385 if (vme_bound
& (granularity
- 1)) {
386 printk(KERN_ERR
"Invalid VME bound alignment\n");
389 if (pci_offset
& (granularity
- 1)) {
390 printk(KERN_ERR
"Invalid PCI Offset alignment\n");
394 /* Disable while we are mucking around */
395 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
396 temp_ctl
&= ~CA91CX42_VSI_CTL_EN
;
397 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
400 iowrite32(vme_base
, ca91cx42_bridge
->base
+ CA91CX42_VSI_BS
[i
]);
401 iowrite32(vme_bound
, ca91cx42_bridge
->base
+ CA91CX42_VSI_BD
[i
]);
402 iowrite32(pci_offset
, ca91cx42_bridge
->base
+ CA91CX42_VSI_TO
[i
]);
404 /* XXX Prefetch stuff currently unsupported */
406 if (vmeIn
->wrPostEnable
)
407 temp_ctl
|= CA91CX42_VSI_CTL_PWEN
;
408 if (vmeIn
->prefetchEnable
)
409 temp_ctl
|= CA91CX42_VSI_CTL_PREN
;
411 temp_ctl
|= CA91CX42_VSI_CTL_LLRMW
;
412 if (vmeIn
->data64BitCapable
)
413 temp_ctl
|= CA91CX42_VSI_CTL_LD64EN
;
416 /* Setup address space */
417 temp_ctl
&= ~CA91CX42_VSI_CTL_VAS_M
;
420 /* Setup cycle types */
421 temp_ctl
&= ~(CA91CX42_VSI_CTL_PGM_M
| CA91CX42_VSI_CTL_SUPER_M
);
422 if (cycle
& VME_SUPER
)
423 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_SUPR
;
424 if (cycle
& VME_USER
)
425 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_NPRIV
;
426 if (cycle
& VME_PROG
)
427 temp_ctl
|= CA91CX42_VSI_CTL_PGM_PGM
;
428 if (cycle
& VME_DATA
)
429 temp_ctl
|= CA91CX42_VSI_CTL_PGM_DATA
;
431 /* Write ctl reg without enable */
432 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
435 temp_ctl
|= CA91CX42_VSI_CTL_EN
;
437 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
442 int ca91cx42_slave_get(struct vme_slave_resource
*image
, int *enabled
,
443 unsigned long long *vme_base
, unsigned long long *size
,
444 dma_addr_t
*pci_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
446 unsigned int i
, granularity
= 0, ctl
= 0;
447 unsigned long long vme_bound
, pci_offset
;
451 if ((i
== 0) || (i
== 4))
452 granularity
= 0x1000;
454 granularity
= 0x10000;
457 ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
459 *vme_base
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_BS
[i
]);
460 vme_bound
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_BD
[i
]);
461 pci_offset
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_TO
[i
]);
463 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
464 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
470 if (ctl
& CA91CX42_VSI_CTL_EN
)
473 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A16
)
475 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A24
)
477 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A32
)
479 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER1
)
481 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER2
)
484 if (ctl
& CA91CX42_VSI_CTL_SUPER_SUPR
)
486 if (ctl
& CA91CX42_VSI_CTL_SUPER_NPRIV
)
488 if (ctl
& CA91CX42_VSI_CTL_PGM_PGM
)
490 if (ctl
& CA91CX42_VSI_CTL_PGM_DATA
)
497 * Allocate and map PCI Resource
499 static int ca91cx42_alloc_resource(struct vme_master_resource
*image
,
500 unsigned long long size
)
502 unsigned long long existing_size
;
504 struct pci_dev
*pdev
;
506 /* Find pci_dev container of dev */
507 if (ca91cx42_bridge
->parent
== NULL
) {
508 printk(KERN_ERR
"Dev entry NULL\n");
511 pdev
= container_of(ca91cx42_bridge
->parent
, struct pci_dev
, dev
);
513 existing_size
= (unsigned long long)(image
->pci_resource
.end
-
514 image
->pci_resource
.start
);
516 /* If the existing size is OK, return */
517 if (existing_size
== (size
- 1))
520 if (existing_size
!= 0) {
521 iounmap(image
->kern_base
);
522 image
->kern_base
= NULL
;
523 if (image
->pci_resource
.name
!= NULL
)
524 kfree(image
->pci_resource
.name
);
525 release_resource(&(image
->pci_resource
));
526 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
529 if (image
->pci_resource
.name
== NULL
) {
530 image
->pci_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_KERNEL
);
531 if (image
->pci_resource
.name
== NULL
) {
532 printk(KERN_ERR
"Unable to allocate memory for resource"
539 sprintf((char *)image
->pci_resource
.name
, "%s.%d",
540 ca91cx42_bridge
->name
, image
->number
);
542 image
->pci_resource
.start
= 0;
543 image
->pci_resource
.end
= (unsigned long)size
;
544 image
->pci_resource
.flags
= IORESOURCE_MEM
;
546 retval
= pci_bus_alloc_resource(pdev
->bus
,
547 &(image
->pci_resource
), size
, size
, PCIBIOS_MIN_MEM
,
550 printk(KERN_ERR
"Failed to allocate mem resource for "
551 "window %d size 0x%lx start 0x%lx\n",
552 image
->number
, (unsigned long)size
,
553 (unsigned long)image
->pci_resource
.start
);
557 image
->kern_base
= ioremap_nocache(
558 image
->pci_resource
.start
, size
);
559 if (image
->kern_base
== NULL
) {
560 printk(KERN_ERR
"Failed to remap resource\n");
567 iounmap(image
->kern_base
);
568 image
->kern_base
= NULL
;
570 release_resource(&(image
->pci_resource
));
572 kfree(image
->pci_resource
.name
);
573 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
579 * * Free and unmap PCI Resource
581 static void ca91cx42_free_resource(struct vme_master_resource
*image
)
583 iounmap(image
->kern_base
);
584 image
->kern_base
= NULL
;
585 release_resource(&(image
->pci_resource
));
586 kfree(image
->pci_resource
.name
);
587 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
591 int ca91cx42_master_set(struct vme_master_resource
*image
, int enabled
,
592 unsigned long long vme_base
, unsigned long long size
,
593 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
597 unsigned int temp_ctl
= 0;
598 unsigned long long pci_bound
, vme_offset
, pci_base
;
600 /* Verify input data */
601 if (vme_base
& 0xFFF) {
602 printk(KERN_ERR
"Invalid VME Window alignment\n");
607 printk(KERN_ERR
"Invalid VME Window alignment\n");
612 spin_lock(&(image
->lock
));
614 /* XXX We should do this much later, so that we can exit without
615 * needing to redo the mapping...
618 * Let's allocate the resource here rather than further up the stack as
619 * it avoids pushing loads of bus dependant stuff up the stack
621 retval
= ca91cx42_alloc_resource(image
, size
);
623 spin_unlock(&(image
->lock
));
624 printk(KERN_ERR
"Unable to allocate memory for resource "
630 pci_base
= (unsigned long long)image
->pci_resource
.start
;
633 * Bound address is a valid address for the window, adjust
634 * according to window granularity.
636 pci_bound
= pci_base
+ (size
- 0x1000);
637 vme_offset
= vme_base
- pci_base
;
641 /* Disable while we are mucking around */
642 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
643 temp_ctl
&= ~CA91CX42_LSI_CTL_EN
;
644 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
646 /* XXX Prefetch stuff currently unsupported */
648 if (vmeOut
->wrPostEnable
)
649 temp_ctl
|= 0x40000000;
652 /* Setup cycle types */
653 temp_ctl
&= ~CA91CX42_LSI_CTL_VCT_M
;
655 temp_ctl
|= CA91CX42_LSI_CTL_VCT_BLT
;
656 if (cycle
& VME_MBLT
)
657 temp_ctl
|= CA91CX42_LSI_CTL_VCT_MBLT
;
659 /* Setup data width */
660 temp_ctl
&= ~CA91CX42_LSI_CTL_VDW_M
;
663 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D8
;
666 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D16
;
669 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D32
;
672 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D64
;
675 spin_unlock(&(image
->lock
));
676 printk(KERN_ERR
"Invalid data width\n");
682 /* Setup address space */
683 temp_ctl
&= ~CA91CX42_LSI_CTL_VAS_M
;
686 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A16
;
689 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A24
;
692 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A32
;
695 temp_ctl
|= CA91CX42_LSI_CTL_VAS_CRCSR
;
698 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER1
;
701 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER2
;
707 spin_unlock(&(image
->lock
));
708 printk(KERN_ERR
"Invalid address space\n");
714 temp_ctl
&= ~(CA91CX42_LSI_CTL_PGM_M
| CA91CX42_LSI_CTL_SUPER_M
);
715 if (cycle
& VME_SUPER
)
716 temp_ctl
|= CA91CX42_LSI_CTL_SUPER_SUPR
;
717 if (cycle
& VME_PROG
)
718 temp_ctl
|= CA91CX42_LSI_CTL_PGM_PGM
;
721 iowrite32(pci_base
, ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
722 iowrite32(pci_bound
, ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
723 iowrite32(vme_offset
, ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
725 /* Write ctl reg without enable */
726 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
729 temp_ctl
|= CA91CX42_LSI_CTL_EN
;
731 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
733 spin_unlock(&(image
->lock
));
738 ca91cx42_free_resource(image
);
744 int __ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
745 unsigned long long *vme_base
, unsigned long long *size
,
746 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
749 unsigned long long pci_base
, pci_bound
, vme_offset
;
753 ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
755 pci_base
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
756 vme_offset
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
757 pci_bound
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
759 *vme_base
= pci_base
+ vme_offset
;
760 *size
= (pci_bound
- pci_base
) + 0x1000;
767 if (ctl
& CA91CX42_LSI_CTL_EN
)
770 /* Setup address space */
771 switch (ctl
& CA91CX42_LSI_CTL_VAS_M
) {
772 case CA91CX42_LSI_CTL_VAS_A16
:
775 case CA91CX42_LSI_CTL_VAS_A24
:
778 case CA91CX42_LSI_CTL_VAS_A32
:
781 case CA91CX42_LSI_CTL_VAS_CRCSR
:
784 case CA91CX42_LSI_CTL_VAS_USER1
:
787 case CA91CX42_LSI_CTL_VAS_USER2
:
792 /* XXX Not sure howto check for MBLT */
793 /* Setup cycle types */
794 if (ctl
& CA91CX42_LSI_CTL_VCT_BLT
)
799 if (ctl
& CA91CX42_LSI_CTL_SUPER_SUPR
)
804 if (ctl
& CA91CX42_LSI_CTL_PGM_PGM
)
809 /* Setup data width */
810 switch (ctl
& CA91CX42_LSI_CTL_VDW_M
) {
811 case CA91CX42_LSI_CTL_VDW_D8
:
814 case CA91CX42_LSI_CTL_VDW_D16
:
817 case CA91CX42_LSI_CTL_VDW_D32
:
820 case CA91CX42_LSI_CTL_VDW_D64
:
825 /* XXX Prefetch stuff currently unsupported */
827 if (ctl
& 0x40000000)
828 vmeOut
->wrPostEnable
= 1;
834 int ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
835 unsigned long long *vme_base
, unsigned long long *size
,
836 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
840 spin_lock(&(image
->lock
));
842 retval
= __ca91cx42_master_get(image
, enabled
, vme_base
, size
, aspace
,
845 spin_unlock(&(image
->lock
));
850 ssize_t
ca91cx42_master_read(struct vme_master_resource
*image
, void *buf
,
851 size_t count
, loff_t offset
)
855 spin_lock(&(image
->lock
));
857 memcpy_fromio(buf
, image
->kern_base
+ offset
, (unsigned int)count
);
860 spin_unlock(&(image
->lock
));
865 ssize_t
ca91cx42_master_write(struct vme_master_resource
*image
, void *buf
,
866 size_t count
, loff_t offset
)
870 spin_lock(&(image
->lock
));
872 memcpy_toio(image
->kern_base
+ offset
, buf
, (unsigned int)count
);
875 spin_unlock(&(image
->lock
));
880 int ca91cx42_slot_get(void)
884 slot
= ioread32(ca91cx42_bridge
->base
+ VCSR_BS
);
885 slot
= ((slot
& CA91CX42_VCSR_BS_SLOT_M
) >> 27);
890 static int __init
ca91cx42_init(void)
892 return pci_register_driver(&ca91cx42_driver
);
896 * Configure CR/CSR space
898 * Access to the CR/CSR can be configured at power-up. The location of the
899 * CR/CSR registers in the CR/CSR address space is determined by the boards
900 * Auto-ID or Geographic address. This function ensures that the window is
901 * enabled at an offset consistent with the boards geopgraphic address.
903 static int ca91cx42_crcsr_init(struct pci_dev
*pdev
)
905 unsigned int crcsr_addr
;
908 /* XXX We may need to set this somehow as the Universe II does not support
909 * geographical addressing.
912 if (vme_slotnum
!= -1)
913 iowrite32(vme_slotnum
<< 27, ca91cx42_bridge
->base
+ VCSR_BS
);
915 slot
= ca91cx42_slot_get();
916 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", slot
);
918 dev_err(&pdev
->dev
, "Slot number is unset, not configuring "
923 /* Allocate mem for CR/CSR image */
924 crcsr_kernel
= pci_alloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
926 if (crcsr_kernel
== NULL
) {
927 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
932 memset(crcsr_kernel
, 0, VME_CRCSR_BUF_SIZE
);
934 crcsr_addr
= slot
* (512 * 1024);
935 iowrite32(crcsr_bus
- crcsr_addr
, ca91cx42_bridge
->base
+ VCSR_TO
);
937 tmp
= ioread32(ca91cx42_bridge
->base
+ VCSR_CTL
);
938 tmp
|= CA91CX42_VCSR_CTL_EN
;
939 iowrite32(tmp
, ca91cx42_bridge
->base
+ VCSR_CTL
);
944 static void ca91cx42_crcsr_exit(struct pci_dev
*pdev
)
948 /* Turn off CR/CSR space */
949 tmp
= ioread32(ca91cx42_bridge
->base
+ VCSR_CTL
);
950 tmp
&= ~CA91CX42_VCSR_CTL_EN
;
951 iowrite32(tmp
, ca91cx42_bridge
->base
+ VCSR_CTL
);
954 iowrite32(0, ca91cx42_bridge
->base
+ VCSR_TO
);
956 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, crcsr_kernel
, crcsr_bus
);
959 static int ca91cx42_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
963 struct list_head
*pos
= NULL
;
964 struct vme_master_resource
*master_image
;
965 struct vme_slave_resource
*slave_image
;
967 struct vme_dma_resource
*dma_ctrlr
;
969 struct vme_lm_resource
*lm
;
971 /* We want to support more than one of each bridge so we need to
972 * dynamically allocate the bridge structure
974 ca91cx42_bridge
= kmalloc(sizeof(struct vme_bridge
), GFP_KERNEL
);
976 if (ca91cx42_bridge
== NULL
) {
977 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
983 memset(ca91cx42_bridge
, 0, sizeof(struct vme_bridge
));
985 /* Enable the device */
986 retval
= pci_enable_device(pdev
);
988 dev_err(&pdev
->dev
, "Unable to enable device\n");
993 retval
= pci_request_regions(pdev
, driver_name
);
995 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
999 /* map registers in BAR 0 */
1000 ca91cx42_bridge
->base
= ioremap_nocache(pci_resource_start(pdev
, 0),
1002 if (!ca91cx42_bridge
->base
) {
1003 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
1008 /* Check to see if the mapping worked out */
1009 data
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_PCI_ID
) & 0x0000FFFF;
1010 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
1011 dev_err(&pdev
->dev
, "PCI_ID check failed\n");
1016 /* Initialize wait queues & mutual exclusion flags */
1017 /* XXX These need to be moved to the vme_bridge structure */
1018 init_waitqueue_head(&dma_queue
);
1019 init_waitqueue_head(&iack_queue
);
1020 mutex_init(&(vme_int
));
1021 mutex_init(&(vme_rmw
));
1023 ca91cx42_bridge
->parent
= &(pdev
->dev
);
1024 strcpy(ca91cx42_bridge
->name
, driver_name
);
1027 retval
= ca91cx42_irq_init(ca91cx42_bridge
);
1029 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
1033 /* Add master windows to list */
1034 INIT_LIST_HEAD(&(ca91cx42_bridge
->master_resources
));
1035 for (i
= 0; i
< CA91C142_MAX_MASTER
; i
++) {
1036 master_image
= kmalloc(sizeof(struct vme_master_resource
),
1038 if (master_image
== NULL
) {
1039 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1040 "master resource structure\n");
1044 master_image
->parent
= ca91cx42_bridge
;
1045 spin_lock_init(&(master_image
->lock
));
1046 master_image
->locked
= 0;
1047 master_image
->number
= i
;
1048 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
1049 VME_CRCSR
| VME_USER1
| VME_USER2
;
1050 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1051 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1052 master_image
->width_attr
= VME_D8
| VME_D16
| VME_D32
| VME_D64
;
1053 memset(&(master_image
->pci_resource
), 0,
1054 sizeof(struct resource
));
1055 master_image
->kern_base
= NULL
;
1056 list_add_tail(&(master_image
->list
),
1057 &(ca91cx42_bridge
->master_resources
));
1060 /* Add slave windows to list */
1061 INIT_LIST_HEAD(&(ca91cx42_bridge
->slave_resources
));
1062 for (i
= 0; i
< CA91C142_MAX_SLAVE
; i
++) {
1063 slave_image
= kmalloc(sizeof(struct vme_slave_resource
),
1065 if (slave_image
== NULL
) {
1066 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1067 "slave resource structure\n");
1071 slave_image
->parent
= ca91cx42_bridge
;
1072 mutex_init(&(slave_image
->mtx
));
1073 slave_image
->locked
= 0;
1074 slave_image
->number
= i
;
1075 slave_image
->address_attr
= VME_A24
| VME_A32
| VME_USER1
|
1078 /* Only windows 0 and 4 support A16 */
1079 if (i
== 0 || i
== 4)
1080 slave_image
->address_attr
|= VME_A16
;
1082 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1083 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1084 list_add_tail(&(slave_image
->list
),
1085 &(ca91cx42_bridge
->slave_resources
));
1088 /* Add dma engines to list */
1089 INIT_LIST_HEAD(&(ca91cx42_bridge
->dma_resources
));
1090 for (i
= 0; i
< CA91C142_MAX_DMA
; i
++) {
1091 dma_ctrlr
= kmalloc(sizeof(struct vme_dma_resource
),
1093 if (dma_ctrlr
== NULL
) {
1094 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1095 "dma resource structure\n");
1099 dma_ctrlr
->parent
= ca91cx42_bridge
;
1100 mutex_init(&(dma_ctrlr
->mtx
));
1101 dma_ctrlr
->locked
= 0;
1102 dma_ctrlr
->number
= i
;
1103 INIT_LIST_HEAD(&(dma_ctrlr
->pending
));
1104 INIT_LIST_HEAD(&(dma_ctrlr
->running
));
1105 list_add_tail(&(dma_ctrlr
->list
),
1106 &(ca91cx42_bridge
->dma_resources
));
1109 /* Add location monitor to list */
1110 INIT_LIST_HEAD(&(ca91cx42_bridge
->lm_resources
));
1111 lm
= kmalloc(sizeof(struct vme_lm_resource
), GFP_KERNEL
);
1113 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1114 "location monitor resource structure\n");
1118 lm
->parent
= ca91cx42_bridge
;
1119 mutex_init(&(lm
->mtx
));
1123 list_add_tail(&(lm
->list
), &(ca91cx42_bridge
->lm_resources
));
1125 ca91cx42_bridge
->slave_get
= ca91cx42_slave_get
;
1126 ca91cx42_bridge
->slave_set
= ca91cx42_slave_set
;
1127 ca91cx42_bridge
->master_get
= ca91cx42_master_get
;
1128 ca91cx42_bridge
->master_set
= ca91cx42_master_set
;
1129 ca91cx42_bridge
->master_read
= ca91cx42_master_read
;
1130 ca91cx42_bridge
->master_write
= ca91cx42_master_write
;
1132 ca91cx42_bridge
->master_rmw
= ca91cx42_master_rmw
;
1133 ca91cx42_bridge
->dma_list_add
= ca91cx42_dma_list_add
;
1134 ca91cx42_bridge
->dma_list_exec
= ca91cx42_dma_list_exec
;
1135 ca91cx42_bridge
->dma_list_empty
= ca91cx42_dma_list_empty
;
1137 ca91cx42_bridge
->irq_set
= ca91cx42_irq_set
;
1138 ca91cx42_bridge
->irq_generate
= ca91cx42_irq_generate
;
1140 ca91cx42_bridge
->lm_set
= ca91cx42_lm_set
;
1141 ca91cx42_bridge
->lm_get
= ca91cx42_lm_get
;
1142 ca91cx42_bridge
->lm_attach
= ca91cx42_lm_attach
;
1143 ca91cx42_bridge
->lm_detach
= ca91cx42_lm_detach
;
1145 ca91cx42_bridge
->slot_get
= ca91cx42_slot_get
;
1147 data
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1148 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
1149 (data
& CA91CX42_MISC_CTL_SYSCON
) ? "" : " not");
1150 dev_info(&pdev
->dev
, "Slot ID is %d\n", ca91cx42_slot_get());
1152 if (ca91cx42_crcsr_init(pdev
)) {
1153 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
1160 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1163 retval
= vme_register_bridge(ca91cx42_bridge
);
1165 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
1171 vme_unregister_bridge(ca91cx42_bridge
);
1173 ca91cx42_crcsr_exit(pdev
);
1178 /* resources are stored in link list */
1179 list_for_each(pos
, &(ca91cx42_bridge
->lm_resources
)) {
1180 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1186 /* resources are stored in link list */
1187 list_for_each(pos
, &(ca91cx42_bridge
->dma_resources
)) {
1188 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1194 /* resources are stored in link list */
1195 list_for_each(pos
, &(ca91cx42_bridge
->slave_resources
)) {
1196 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1201 /* resources are stored in link list */
1202 list_for_each(pos
, &(ca91cx42_bridge
->master_resources
)) {
1203 master_image
= list_entry(pos
, struct vme_master_resource
,
1206 kfree(master_image
);
1209 ca91cx42_irq_exit(pdev
);
1212 iounmap(ca91cx42_bridge
->base
);
1214 pci_release_regions(pdev
);
1216 pci_disable_device(pdev
);
1218 kfree(ca91cx42_bridge
);
1224 void ca91cx42_remove(struct pci_dev
*pdev
)
1226 struct list_head
*pos
= NULL
;
1227 struct vme_master_resource
*master_image
;
1228 struct vme_slave_resource
*slave_image
;
1229 struct vme_dma_resource
*dma_ctrlr
;
1230 struct vme_lm_resource
*lm
;
1233 iowrite32(0, ca91cx42_bridge
->base
+ LINT_EN
);
1235 /* Turn off the windows */
1236 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI0_CTL
);
1237 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI1_CTL
);
1238 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI2_CTL
);
1239 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI3_CTL
);
1240 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI4_CTL
);
1241 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI5_CTL
);
1242 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI6_CTL
);
1243 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI7_CTL
);
1244 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI0_CTL
);
1245 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI1_CTL
);
1246 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI2_CTL
);
1247 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI3_CTL
);
1248 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI4_CTL
);
1249 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI5_CTL
);
1250 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI6_CTL
);
1251 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI7_CTL
);
1253 vme_unregister_bridge(ca91cx42_bridge
);
1255 ca91cx42_crcsr_exit(pdev
);
1257 /* resources are stored in link list */
1258 list_for_each(pos
, &(ca91cx42_bridge
->lm_resources
)) {
1259 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1264 /* resources are stored in link list */
1265 list_for_each(pos
, &(ca91cx42_bridge
->dma_resources
)) {
1266 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1271 /* resources are stored in link list */
1272 list_for_each(pos
, &(ca91cx42_bridge
->slave_resources
)) {
1273 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1278 /* resources are stored in link list */
1279 list_for_each(pos
, &(ca91cx42_bridge
->master_resources
)) {
1280 master_image
= list_entry(pos
, struct vme_master_resource
,
1283 kfree(master_image
);
1286 ca91cx42_irq_exit(pdev
);
1288 iounmap(ca91cx42_bridge
->base
);
1290 pci_release_regions(pdev
);
1292 pci_disable_device(pdev
);
1294 kfree(ca91cx42_bridge
);
1297 static void __exit
ca91cx42_exit(void)
1299 pci_unregister_driver(&ca91cx42_driver
);
1302 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1303 MODULE_LICENSE("GPL");
1305 module_init(ca91cx42_init
);
1306 module_exit(ca91cx42_exit
);
1308 /*----------------------------------------------------------------------------
1310 *--------------------------------------------------------------------------*/
1313 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1315 int ca91cx42_master_rmw(vmeRmwCfg_t
*vmeRmw
)
1323 int *rmw_pci_data_ptr
= NULL
;
1324 int *vaDataPtr
= NULL
;
1326 vmeOutWindowCfg_t vmeOut
;
1327 if (vmeRmw
->maxAttempts
< 1) {
1330 if (vmeRmw
->targetAddrU
) {
1333 /* Find the PCI address that maps to the desired VME address */
1334 for (i
= 0; i
< 8; i
++) {
1335 temp_ctl
= ioread32(ca91cx42_bridge
->base
+
1336 CA91CX42_LSI_CTL
[i
]);
1337 if ((temp_ctl
& 0x80000000) == 0) {
1340 memset(&vmeOut
, 0, sizeof(vmeOut
));
1341 vmeOut
.windowNbr
= i
;
1342 ca91cx42_get_out_bound(&vmeOut
);
1343 if (vmeOut
.addrSpace
!= vmeRmw
->addrSpace
) {
1346 tempBS
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
1347 tempBD
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
1348 tempTO
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
1349 vmeBS
= tempBS
+ tempTO
;
1350 vmeBD
= tempBD
+ tempTO
;
1351 if ((vmeRmw
->targetAddr
>= vmeBS
) &&
1352 (vmeRmw
->targetAddr
< vmeBD
)) {
1354 (int *)(tempBS
+ (vmeRmw
->targetAddr
- vmeBS
));
1356 (int *)(out_image_va
[i
] +
1357 (vmeRmw
->targetAddr
- vmeBS
));
1362 /* If no window - fail. */
1363 if (rmw_pci_data_ptr
== NULL
) {
1366 /* Setup the RMW registers. */
1367 iowrite32(0, ca91cx42_bridge
->base
+ SCYC_CTL
);
1368 iowrite32(SWIZZLE(vmeRmw
->enableMask
), ca91cx42_bridge
->base
+ SCYC_EN
);
1369 iowrite32(SWIZZLE(vmeRmw
->compareData
), ca91cx42_bridge
->base
+
1371 iowrite32(SWIZZLE(vmeRmw
->swapData
), ca91cx42_bridge
->base
+ SCYC_SWP
);
1372 iowrite32((int)rmw_pci_data_ptr
, ca91cx42_bridge
->base
+ SCYC_ADDR
);
1373 iowrite32(1, ca91cx42_bridge
->base
+ SCYC_CTL
);
1375 /* Run the RMW cycle until either success or max attempts. */
1376 vmeRmw
->numAttempts
= 1;
1377 while (vmeRmw
->numAttempts
<= vmeRmw
->maxAttempts
) {
1379 if ((ioread32(vaDataPtr
) & vmeRmw
->enableMask
) ==
1380 (vmeRmw
->swapData
& vmeRmw
->enableMask
)) {
1382 iowrite32(0, ca91cx42_bridge
->base
+ SCYC_CTL
);
1386 vmeRmw
->numAttempts
++;
1389 /* If no success, set num Attempts to be greater than max attempts */
1390 if (vmeRmw
->numAttempts
> vmeRmw
->maxAttempts
) {
1391 vmeRmw
->numAttempts
= vmeRmw
->maxAttempts
+ 1;
1397 int uniSetupDctlReg(vmeDmaPacket_t
* vmeDma
, int *dctlregreturn
)
1399 unsigned int dctlreg
= 0x80;
1400 struct vmeAttr
*vmeAttr
;
1402 if (vmeDma
->srcBus
== VME_DMA_VME
) {
1404 vmeAttr
= &vmeDma
->srcVmeAttr
;
1406 dctlreg
= 0x80000000;
1407 vmeAttr
= &vmeDma
->dstVmeAttr
;
1410 switch (vmeAttr
->maxDataWidth
) {
1414 dctlreg
|= 0x00400000;
1417 dctlreg
|= 0x00800000;
1420 dctlreg
|= 0x00C00000;
1424 switch (vmeAttr
->addrSpace
) {
1428 dctlreg
|= 0x00010000;
1431 dctlreg
|= 0x00020000;
1434 dctlreg
|= 0x00060000;
1437 dctlreg
|= 0x00070000;
1440 case VME_A64
: /* not supported in Universe DMA */
1447 if (vmeAttr
->userAccessType
== VME_PROG
) {
1448 dctlreg
|= 0x00004000;
1450 if (vmeAttr
->dataAccessType
== VME_SUPER
) {
1451 dctlreg
|= 0x00001000;
1453 if (vmeAttr
->xferProtocol
!= VME_SCT
) {
1454 dctlreg
|= 0x00000100;
1456 *dctlregreturn
= dctlreg
;
1461 ca91cx42_start_dma(int channel
, unsigned int dgcsreg
, TDMA_Cmd_Packet
*vmeLL
)
1465 /* Setup registers as needed for direct or chained. */
1466 if (dgcsreg
& 0x8000000) {
1467 iowrite32(0, ca91cx42_bridge
->base
+ DTBC
);
1468 iowrite32((unsigned int)vmeLL
, ca91cx42_bridge
->base
+ DCPP
);
1471 printk(KERN_ERR
"Starting: DGCS = %08x\n", dgcsreg
);
1472 printk(KERN_ERR
"Starting: DVA = %08x\n",
1473 ioread32(&vmeLL
->dva
));
1474 printk(KERN_ERR
"Starting: DLV = %08x\n",
1475 ioread32(&vmeLL
->dlv
));
1476 printk(KERN_ERR
"Starting: DTBC = %08x\n",
1477 ioread32(&vmeLL
->dtbc
));
1478 printk(KERN_ERR
"Starting: DCTL = %08x\n",
1479 ioread32(&vmeLL
->dctl
));
1481 /* Write registers */
1482 iowrite32(ioread32(&vmeLL
->dva
), ca91cx42_bridge
->base
+ DVA
);
1483 iowrite32(ioread32(&vmeLL
->dlv
), ca91cx42_bridge
->base
+ DLA
);
1484 iowrite32(ioread32(&vmeLL
->dtbc
), ca91cx42_bridge
->base
+ DTBC
);
1485 iowrite32(ioread32(&vmeLL
->dctl
), ca91cx42_bridge
->base
+ DCTL
);
1486 iowrite32(0, ca91cx42_bridge
->base
+ DCPP
);
1489 /* Start the operation */
1490 iowrite32(dgcsreg
, ca91cx42_bridge
->base
+ DGCS
);
1492 iowrite32(dgcsreg
| 0x8000000F, ca91cx42_bridge
->base
+ DGCS
);
1496 TDMA_Cmd_Packet
*ca91cx42_setup_dma(vmeDmaPacket_t
* vmeDma
)
1498 vmeDmaPacket_t
*vmeCur
;
1501 TDMA_Cmd_Packet
*startLL
;
1502 TDMA_Cmd_Packet
*currentLL
;
1503 TDMA_Cmd_Packet
*nextLL
;
1504 unsigned int dctlreg
= 0;
1506 maxPerPage
= PAGESIZE
/ sizeof(TDMA_Cmd_Packet
) - 1;
1507 startLL
= (TDMA_Cmd_Packet
*) __get_free_pages(GFP_KERNEL
, 0);
1511 /* First allocate pages for descriptors and create linked list */
1513 currentLL
= startLL
;
1515 while (vmeCur
!= 0) {
1516 if (vmeCur
->pNextPacket
!= 0) {
1517 currentLL
->dcpp
= (unsigned int)(currentLL
+ 1);
1519 if (currentLLcount
>= maxPerPage
) {
1521 __get_free_pages(GFP_KERNEL
, 0);
1524 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1526 currentLL
->dcpp
= (unsigned int)0;
1528 vmeCur
= vmeCur
->pNextPacket
;
1531 /* Next fill in information for each descriptor */
1533 currentLL
= startLL
;
1534 while (vmeCur
!= 0) {
1535 if (vmeCur
->srcBus
== VME_DMA_VME
) {
1536 iowrite32(vmeCur
->srcAddr
, ¤tLL
->dva
);
1537 iowrite32(vmeCur
->dstAddr
, ¤tLL
->dlv
);
1539 iowrite32(vmeCur
->srcAddr
, ¤tLL
->dlv
);
1540 iowrite32(vmeCur
->dstAddr
, ¤tLL
->dva
);
1542 uniSetupDctlReg(vmeCur
, &dctlreg
);
1543 iowrite32(dctlreg
, ¤tLL
->dctl
);
1544 iowrite32(vmeCur
->byteCount
, ¤tLL
->dtbc
);
1546 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1547 vmeCur
= vmeCur
->pNextPacket
;
1550 /* Convert Links to PCI addresses. */
1551 currentLL
= startLL
;
1552 while (currentLL
!= 0) {
1553 nextLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1555 iowrite32(1, ¤tLL
->dcpp
);
1557 iowrite32((unsigned int)virt_to_bus(nextLL
),
1563 /* Return pointer to descriptors list */
1567 int ca91cx42_free_dma(TDMA_Cmd_Packet
*startLL
)
1569 TDMA_Cmd_Packet
*currentLL
;
1570 TDMA_Cmd_Packet
*prevLL
;
1571 TDMA_Cmd_Packet
*nextLL
;
1572 unsigned int dcppreg
;
1574 /* Convert Links to virtual addresses. */
1575 currentLL
= startLL
;
1576 while (currentLL
!= 0) {
1577 dcppreg
= ioread32(¤tLL
->dcpp
);
1580 currentLL
->dcpp
= 0;
1582 currentLL
->dcpp
= (unsigned int)bus_to_virt(dcppreg
);
1584 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1587 /* Free all pages associated with the descriptors. */
1588 currentLL
= startLL
;
1590 while (currentLL
!= 0) {
1591 nextLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1592 if (currentLL
+ 1 != nextLL
) {
1593 free_pages((int)prevLL
, 0);
1599 /* Return pointer to descriptors list */
1603 int ca91cx42_do_dma(vmeDmaPacket_t
*vmeDma
)
1605 unsigned int dgcsreg
= 0;
1606 unsigned int dctlreg
= 0;
1609 vmeDmaPacket_t
*curDma
;
1610 TDMA_Cmd_Packet
*dmaLL
;
1612 /* Sanity check the VME chain. */
1613 channel
= vmeDma
->channel_number
;
1618 while (curDma
!= 0) {
1619 if (curDma
->byteCount
== 0) {
1622 if (curDma
->byteCount
>= 0x1000000) {
1625 if ((curDma
->srcAddr
& 7) != (curDma
->dstAddr
& 7)) {
1628 switch (curDma
->srcBus
) {
1630 if (curDma
->dstBus
!= VME_DMA_VME
) {
1635 if (curDma
->dstBus
!= VME_DMA_PCI
) {
1643 if (uniSetupDctlReg(curDma
, &dctlreg
) < 0) {
1647 curDma
= curDma
->pNextPacket
;
1648 if (curDma
== vmeDma
) { /* Endless Loop! */
1653 /* calculate control register */
1654 if (vmeDma
->pNextPacket
!= 0) {
1655 dgcsreg
= 0x8000000;
1660 for (x
= 0; x
< 8; x
++) { /* vme block size */
1661 if ((256 << x
) >= vmeDma
->maxVmeBlockSize
) {
1667 dgcsreg
|= (x
<< 20);
1669 if (vmeDma
->vmeBackOffTimer
) {
1670 for (x
= 1; x
< 8; x
++) { /* vme timer */
1671 if ((16 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
1677 dgcsreg
|= (x
<< 16);
1679 /*` Setup the dma chain */
1680 dmaLL
= ca91cx42_setup_dma(vmeDma
);
1683 if (dgcsreg
& 0x8000000) {
1684 vmeDma
->vmeDmaStartTick
=
1685 ca91cx42_start_dma(channel
, dgcsreg
,
1686 (TDMA_Cmd_Packet
*) virt_to_phys(dmaLL
));
1688 vmeDma
->vmeDmaStartTick
=
1689 ca91cx42_start_dma(channel
, dgcsreg
, dmaLL
);
1692 wait_event_interruptible(dma_queue
,
1693 ioread32(ca91cx42_bridge
->base
+ DGCS
) & 0x800);
1695 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
1696 iowrite32(val
| 0xF00, ca91cx42_bridge
->base
+ DGCS
);
1698 vmeDma
->vmeDmaStatus
= 0;
1700 if (!(val
& 0x00000800)) {
1701 vmeDma
->vmeDmaStatus
= val
& 0x700;
1702 printk(KERN_ERR
"ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1703 " DGCS=%08X\n", val
);
1704 val
= ioread32(ca91cx42_bridge
->base
+ DCPP
);
1705 printk(KERN_ERR
"ca91c042: DCPP=%08X\n", val
);
1706 val
= ioread32(ca91cx42_bridge
->base
+ DCTL
);
1707 printk(KERN_ERR
"ca91c042: DCTL=%08X\n", val
);
1708 val
= ioread32(ca91cx42_bridge
->base
+ DTBC
);
1709 printk(KERN_ERR
"ca91c042: DTBC=%08X\n", val
);
1710 val
= ioread32(ca91cx42_bridge
->base
+ DLA
);
1711 printk(KERN_ERR
"ca91c042: DLA=%08X\n", val
);
1712 val
= ioread32(ca91cx42_bridge
->base
+ DVA
);
1713 printk(KERN_ERR
"ca91c042: DVA=%08X\n", val
);
1716 /* Free the dma chain */
1717 ca91cx42_free_dma(dmaLL
);
1722 int ca91cx42_lm_set(vmeLmCfg_t
*vmeLm
)
1729 switch (vmeLm
->addrSpace
) {
1735 temp_ctl
|= 0x00000;
1738 temp_ctl
|= 0x10000;
1741 temp_ctl
|= 0x20000;
1744 temp_ctl
|= 0x50000;
1747 temp_ctl
|= 0x60000;
1750 temp_ctl
|= 0x70000;
1754 /* Disable while we are mucking around */
1755 iowrite32(0x00000000, ca91cx42_bridge
->base
+ LM_CTL
);
1757 iowrite32(vmeLm
->addr
, ca91cx42_bridge
->base
+ LM_BS
);
1759 /* Setup CTL register. */
1760 if (vmeLm
->userAccessType
& VME_SUPER
)
1761 temp_ctl
|= 0x00200000;
1762 if (vmeLm
->userAccessType
& VME_USER
)
1763 temp_ctl
|= 0x00100000;
1764 if (vmeLm
->dataAccessType
& VME_PROG
)
1765 temp_ctl
|= 0x00800000;
1766 if (vmeLm
->dataAccessType
& VME_DATA
)
1767 temp_ctl
|= 0x00400000;
1770 /* Write ctl reg and enable */
1771 iowrite32(0x80000000 | temp_ctl
, ca91cx42_bridge
->base
+ LM_CTL
);
1772 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ LM_CTL
);
1777 int ca91cx42_wait_lm(vmeLmCfg_t
*vmeLm
)
1779 unsigned long flags
;
1782 spin_lock_irqsave(&lm_lock
, flags
);
1783 spin_unlock_irqrestore(&lm_lock
, flags
);
1785 if (vmeLm
->lmWait
< 10)
1787 interruptible_sleep_on_timeout(&lm_queue
, vmeLm
->lmWait
);
1789 iowrite32(0x00000000, ca91cx42_bridge
->base
+ LM_CTL
);
1796 int ca91cx42_set_arbiter(vmeArbiterCfg_t
*vmeArb
)
1801 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1802 temp_ctl
&= 0x00FFFFFF;
1804 if (vmeArb
->globalTimeoutTimer
== 0xFFFFFFFF) {
1806 } else if (vmeArb
->globalTimeoutTimer
> 1024) {
1808 } else if (vmeArb
->globalTimeoutTimer
== 0) {
1812 while ((16 * (1 << (vbto
- 1))) < vmeArb
->globalTimeoutTimer
)
1815 temp_ctl
|= (vbto
<< 28);
1817 if (vmeArb
->arbiterMode
== VME_PRIORITY_MODE
)
1818 temp_ctl
|= 1 << 26;
1820 if (vmeArb
->arbiterTimeoutFlag
)
1821 temp_ctl
|= 2 << 24;
1823 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ MISC_CTL
);
1827 int ca91cx42_get_arbiter(vmeArbiterCfg_t
*vmeArb
)
1832 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1834 vbto
= (temp_ctl
>> 28) & 0xF;
1836 vmeArb
->globalTimeoutTimer
= (16 * (1 << (vbto
- 1)));
1838 if (temp_ctl
& (1 << 26))
1839 vmeArb
->arbiterMode
= VME_PRIORITY_MODE
;
1841 vmeArb
->arbiterMode
= VME_R_ROBIN_MODE
;
1843 if (temp_ctl
& (3 << 24))
1844 vmeArb
->arbiterTimeoutFlag
= 1;
1849 int ca91cx42_set_requestor(vmeRequesterCfg_t
*vmeReq
)
1853 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MAST_CTL
);
1854 temp_ctl
&= 0xFF0FFFFF;
1856 if (vmeReq
->releaseMode
== 1)
1857 temp_ctl
|= (1 << 20);
1859 if (vmeReq
->fairMode
== 1)
1860 temp_ctl
|= (1 << 21);
1862 temp_ctl
|= (vmeReq
->requestLevel
<< 22);
1864 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ MAST_CTL
);
1868 int ca91cx42_get_requestor(vmeRequesterCfg_t
*vmeReq
)
1872 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MAST_CTL
);
1874 if (temp_ctl
& (1 << 20))
1875 vmeReq
->releaseMode
= 1;
1877 if (temp_ctl
& (1 << 21))
1878 vmeReq
->fairMode
= 1;
1880 vmeReq
->requestLevel
= (temp_ctl
& 0xC00000) >> 22;