2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
36 static int __init
tsi148_init(void);
37 static int tsi148_probe(struct pci_dev
*, const struct pci_device_id
*);
38 static void tsi148_remove(struct pci_dev
*);
39 static void __exit
tsi148_exit(void);
42 int tsi148_slave_set(struct vme_slave_resource
*, int, unsigned long long,
43 unsigned long long, dma_addr_t
, vme_address_t
, vme_cycle_t
);
44 int tsi148_slave_get(struct vme_slave_resource
*, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t
*, vme_address_t
*, vme_cycle_t
*);
47 int tsi148_master_get(struct vme_master_resource
*, int *, unsigned long long *,
48 unsigned long long *, vme_address_t
*, vme_cycle_t
*, vme_width_t
*);
49 int tsi148_master_set(struct vme_master_resource
*, int, unsigned long long,
50 unsigned long long, vme_address_t
, vme_cycle_t
, vme_width_t
);
51 ssize_t
tsi148_master_read(struct vme_master_resource
*, void *, size_t,
53 ssize_t
tsi148_master_write(struct vme_master_resource
*, void *, size_t,
55 unsigned int tsi148_master_rmw(struct vme_master_resource
*, unsigned int,
56 unsigned int, unsigned int, loff_t
);
57 int tsi148_dma_list_add (struct vme_dma_list
*, struct vme_dma_attr
*,
58 struct vme_dma_attr
*, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list
*);
60 int tsi148_dma_list_empty(struct vme_dma_list
*);
61 int tsi148_generate_irq(int, int);
62 int tsi148_slot_get(void);
68 /* XXX These should all be in a per device structure */
69 static struct vme_bridge
*tsi148_bridge
;
70 static wait_queue_head_t dma_queue
[2];
71 static wait_queue_head_t iack_queue
;
72 static void (*lm_callback
[4])(int); /* Called in interrupt handler */
73 static void *crcsr_kernel
;
74 static dma_addr_t crcsr_bus
;
75 static struct vme_master_resource
*flush_image
;
76 static struct mutex vme_rmw
; /* Only one RMW cycle at a time */
77 static struct mutex vme_int
; /*
78 * Only one VME interrupt can be
79 * generated at a time, provide locking
82 static char driver_name
[] = "vme_tsi148";
84 static const struct pci_device_id tsi148_ids
[] = {
85 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_TSI148
) },
89 static struct pci_driver tsi148_driver
= {
91 .id_table
= tsi148_ids
,
92 .probe
= tsi148_probe
,
93 .remove
= tsi148_remove
,
96 static void reg_join(unsigned int high
, unsigned int low
,
97 unsigned long long *variable
)
99 *variable
= (unsigned long long)high
<< 32;
100 *variable
|= (unsigned long long)low
;
103 static void reg_split(unsigned long long variable
, unsigned int *high
,
106 *low
= (unsigned int)variable
& 0xFFFFFFFF;
107 *high
= (unsigned int)(variable
>> 32);
111 * Wakes up DMA queue.
113 static u32
tsi148_DMA_irqhandler(int channel_mask
)
117 if (channel_mask
& TSI148_LCSR_INTS_DMA0S
) {
118 wake_up(&dma_queue
[0]);
119 serviced
|= TSI148_LCSR_INTC_DMA0C
;
121 if (channel_mask
& TSI148_LCSR_INTS_DMA1S
) {
122 wake_up(&dma_queue
[1]);
123 serviced
|= TSI148_LCSR_INTC_DMA1C
;
130 * Wake up location monitor queue
132 static u32
tsi148_LM_irqhandler(u32 stat
)
137 for (i
= 0; i
< 4; i
++) {
138 if(stat
& TSI148_LCSR_INTS_LMS
[i
]) {
139 /* We only enable interrupts if the callback is set */
141 serviced
|= TSI148_LCSR_INTC_LMC
[i
];
149 * Wake up mail box queue.
151 * XXX This functionality is not exposed up though API.
153 static u32
tsi148_MB_irqhandler(u32 stat
)
159 for (i
= 0; i
< 4; i
++) {
160 if(stat
& TSI148_LCSR_INTS_MBS
[i
]) {
161 val
= ioread32be(tsi148_bridge
->base
+
162 TSI148_GCSR_MBOX
[i
]);
163 printk("VME Mailbox %d received: 0x%x\n", i
, val
);
164 serviced
|= TSI148_LCSR_INTC_MBC
[i
];
172 * Display error & status message when PERR (PCI) exception interrupt occurs.
174 static u32
tsi148_PERR_irqhandler(void)
177 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
178 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAU
),
179 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAL
),
180 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
)
183 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
184 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPXA
),
185 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPXS
)
188 iowrite32be(TSI148_LCSR_EDPAT_EDPCL
,
189 tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
);
191 return TSI148_LCSR_INTC_PERRC
;
195 * Save address and status when VME error interrupt occurs.
197 static u32
tsi148_VERR_irqhandler(void)
199 unsigned int error_addr_high
, error_addr_low
;
200 unsigned long long error_addr
;
202 struct vme_bus_error
*error
;
204 error_addr_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAU
);
205 error_addr_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAL
);
206 error_attrib
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
208 reg_join(error_addr_high
, error_addr_low
, &error_addr
);
210 /* Check for exception register overflow (we have lost error data) */
211 if(error_attrib
& TSI148_LCSR_VEAT_VEOF
) {
212 printk(KERN_ERR
"VME Bus Exception Overflow Occurred\n");
215 error
= (struct vme_bus_error
*)kmalloc(sizeof (struct vme_bus_error
),
218 error
->address
= error_addr
;
219 error
->attributes
= error_attrib
;
220 list_add_tail(&(error
->list
), &(tsi148_bridge
->vme_errors
));
223 "Unable to alloc memory for VMEbus Error reporting\n");
225 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
226 error_addr
, error_attrib
);
230 iowrite32be(TSI148_LCSR_VEAT_VESCL
,
231 tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
233 return TSI148_LCSR_INTC_VERRC
;
237 * Wake up IACK queue.
239 static u32
tsi148_IACK_irqhandler(void)
241 wake_up(&iack_queue
);
243 return TSI148_LCSR_INTC_IACKC
;
247 * Calling VME bus interrupt callback if provided.
249 static u32
tsi148_VIRQ_irqhandler(u32 stat
)
251 int vec
, i
, serviced
= 0;
253 for (i
= 7; i
> 0; i
--) {
254 if (stat
& (1 << i
)) {
256 * Note: Even though the registers are defined
257 * as 32-bits in the spec, we only want to issue
258 * 8-bit IACK cycles on the bus, read from offset
261 vec
= ioread8(tsi148_bridge
->base
+
262 TSI148_LCSR_VIACK
[i
] + 3);
264 vme_irq_handler(tsi148_bridge
, i
, vec
);
266 serviced
|= (1 << i
);
274 * Top level interrupt handler. Clears appropriate interrupt status bits and
275 * then calls appropriate sub handler(s).
277 static irqreturn_t
tsi148_irqhandler(int irq
, void *dev_id
)
279 u32 stat
, enable
, serviced
= 0;
281 /* Determine which interrupts are unmasked and set */
282 enable
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
283 stat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTS
);
285 /* Only look at unmasked interrupts */
288 if (unlikely(!stat
)) {
292 /* Call subhandlers as appropriate */
294 if (stat
& (TSI148_LCSR_INTS_DMA1S
| TSI148_LCSR_INTS_DMA0S
))
295 serviced
|= tsi148_DMA_irqhandler(stat
);
297 /* Location monitor irqs */
298 if (stat
& (TSI148_LCSR_INTS_LM3S
| TSI148_LCSR_INTS_LM2S
|
299 TSI148_LCSR_INTS_LM1S
| TSI148_LCSR_INTS_LM0S
))
300 serviced
|= tsi148_LM_irqhandler(stat
);
303 if (stat
& (TSI148_LCSR_INTS_MB3S
| TSI148_LCSR_INTS_MB2S
|
304 TSI148_LCSR_INTS_MB1S
| TSI148_LCSR_INTS_MB0S
))
305 serviced
|= tsi148_MB_irqhandler(stat
);
308 if (stat
& TSI148_LCSR_INTS_PERRS
)
309 serviced
|= tsi148_PERR_irqhandler();
312 if (stat
& TSI148_LCSR_INTS_VERRS
)
313 serviced
|= tsi148_VERR_irqhandler();
316 if (stat
& TSI148_LCSR_INTS_IACKS
)
317 serviced
|= tsi148_IACK_irqhandler();
320 if (stat
& (TSI148_LCSR_INTS_IRQ7S
| TSI148_LCSR_INTS_IRQ6S
|
321 TSI148_LCSR_INTS_IRQ5S
| TSI148_LCSR_INTS_IRQ4S
|
322 TSI148_LCSR_INTS_IRQ3S
| TSI148_LCSR_INTS_IRQ2S
|
323 TSI148_LCSR_INTS_IRQ1S
))
324 serviced
|= tsi148_VIRQ_irqhandler(stat
);
326 /* Clear serviced interrupts */
327 iowrite32be(serviced
, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
332 static int tsi148_irq_init(struct vme_bridge
*bridge
)
336 struct pci_dev
*pdev
;
339 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
341 /* Initialise list for VME bus errors */
342 INIT_LIST_HEAD(&(bridge
->vme_errors
));
344 mutex_init(&(bridge
->irq_mtx
));
346 result
= request_irq(pdev
->irq
,
351 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
356 /* Enable and unmask interrupts */
357 tmp
= TSI148_LCSR_INTEO_DMA1EO
| TSI148_LCSR_INTEO_DMA0EO
|
358 TSI148_LCSR_INTEO_MB3EO
| TSI148_LCSR_INTEO_MB2EO
|
359 TSI148_LCSR_INTEO_MB1EO
| TSI148_LCSR_INTEO_MB0EO
|
360 TSI148_LCSR_INTEO_PERREO
| TSI148_LCSR_INTEO_VERREO
|
361 TSI148_LCSR_INTEO_IACKEO
;
363 /* XXX This leaves the following interrupts masked.
364 * TSI148_LCSR_INTEO_VIEEO
365 * TSI148_LCSR_INTEO_SYSFLEO
366 * TSI148_LCSR_INTEO_ACFLEO
369 /* Don't enable Location Monitor interrupts here - they will be
370 * enabled when the location monitors are properly configured and
371 * a callback has been attached.
372 * TSI148_LCSR_INTEO_LM0EO
373 * TSI148_LCSR_INTEO_LM1EO
374 * TSI148_LCSR_INTEO_LM2EO
375 * TSI148_LCSR_INTEO_LM3EO
378 /* Don't enable VME interrupts until we add a handler, else the board
379 * will respond to it and we don't want that unless it knows how to
380 * properly deal with it.
381 * TSI148_LCSR_INTEO_IRQ7EO
382 * TSI148_LCSR_INTEO_IRQ6EO
383 * TSI148_LCSR_INTEO_IRQ5EO
384 * TSI148_LCSR_INTEO_IRQ4EO
385 * TSI148_LCSR_INTEO_IRQ3EO
386 * TSI148_LCSR_INTEO_IRQ2EO
387 * TSI148_LCSR_INTEO_IRQ1EO
390 iowrite32be(tmp
, bridge
->base
+ TSI148_LCSR_INTEO
);
391 iowrite32be(tmp
, bridge
->base
+ TSI148_LCSR_INTEN
);
396 static void tsi148_irq_exit(struct pci_dev
*pdev
)
398 /* Turn off interrupts */
399 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
400 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
402 /* Clear all interrupts */
403 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
405 /* Detach interrupt handler */
406 free_irq(pdev
->irq
, pdev
);
410 * Check to see if an IACk has been received, return true (1) or false (0).
412 int tsi148_iack_received(void)
416 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
418 if (tmp
& TSI148_LCSR_VICR_IRQS
)
425 * Configure VME interrupt
427 void tsi148_irq_set(int level
, int state
, int sync
)
429 struct pci_dev
*pdev
;
432 /* We need to do the ordering differently for enabling and disabling */
434 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
435 tmp
&= ~TSI148_LCSR_INTEN_IRQEN
[level
- 1];
436 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
438 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
439 tmp
&= ~TSI148_LCSR_INTEO_IRQEO
[level
- 1];
440 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
443 pdev
= container_of(tsi148_bridge
->parent
,
444 struct pci_dev
, dev
);
446 synchronize_irq(pdev
->irq
);
449 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
450 tmp
|= TSI148_LCSR_INTEO_IRQEO
[level
- 1];
451 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
453 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
454 tmp
|= TSI148_LCSR_INTEN_IRQEN
[level
- 1];
455 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
460 * Generate a VME bus interrupt at the requested level & vector. Wait for
461 * interrupt to be acked.
463 int tsi148_irq_generate(int level
, int statid
)
467 mutex_lock(&(vme_int
));
469 /* Read VICR register */
470 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
473 tmp
= (tmp
& ~TSI148_LCSR_VICR_STID_M
) |
474 (statid
& TSI148_LCSR_VICR_STID_M
);
475 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
477 /* Assert VMEbus IRQ */
478 tmp
= tmp
| TSI148_LCSR_VICR_IRQL
[level
];
479 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
481 /* XXX Consider implementing a timeout? */
482 wait_event_interruptible(iack_queue
, tsi148_iack_received());
484 mutex_unlock(&(vme_int
));
490 * Find the first error in this address range
492 static struct vme_bus_error
*tsi148_find_error(vme_address_t aspace
,
493 unsigned long long address
, size_t count
)
495 struct list_head
*err_pos
;
496 struct vme_bus_error
*vme_err
, *valid
= NULL
;
497 unsigned long long bound
;
499 bound
= address
+ count
;
502 * XXX We are currently not looking at the address space when parsing
503 * for errors. This is because parsing the Address Modifier Codes
504 * is going to be quite resource intensive to do properly. We
505 * should be OK just looking at the addresses and this is certainly
506 * much better than what we had before.
509 /* Iterate through errors */
510 list_for_each(err_pos
, &(tsi148_bridge
->vme_errors
)) {
511 vme_err
= list_entry(err_pos
, struct vme_bus_error
, list
);
512 if((vme_err
->address
>= address
) && (vme_err
->address
< bound
)){
522 * Clear errors in the provided address range.
524 static void tsi148_clear_errors(vme_address_t aspace
,
525 unsigned long long address
, size_t count
)
527 struct list_head
*err_pos
, *temp
;
528 struct vme_bus_error
*vme_err
;
529 unsigned long long bound
;
531 bound
= address
+ count
;
534 * XXX We are currently not looking at the address space when parsing
535 * for errors. This is because parsing the Address Modifier Codes
536 * is going to be quite resource intensive to do properly. We
537 * should be OK just looking at the addresses and this is certainly
538 * much better than what we had before.
541 /* Iterate through errors */
542 list_for_each_safe(err_pos
, temp
, &(tsi148_bridge
->vme_errors
)) {
543 vme_err
= list_entry(err_pos
, struct vme_bus_error
, list
);
545 if((vme_err
->address
>= address
) && (vme_err
->address
< bound
)){
553 * Initialize a slave window with the requested attributes.
555 int tsi148_slave_set(struct vme_slave_resource
*image
, int enabled
,
556 unsigned long long vme_base
, unsigned long long size
,
557 dma_addr_t pci_base
, vme_address_t aspace
, vme_cycle_t cycle
)
559 unsigned int i
, addr
= 0, granularity
= 0;
560 unsigned int temp_ctl
= 0;
561 unsigned int vme_base_low
, vme_base_high
;
562 unsigned int vme_bound_low
, vme_bound_high
;
563 unsigned int pci_offset_low
, pci_offset_high
;
564 unsigned long long vme_bound
, pci_offset
;
567 printk("Set slave image %d to:\n", image
->number
);
568 printk("\tEnabled: %s\n", (enabled
== 1)? "yes" : "no");
569 printk("\tVME Base:0x%llx\n", vme_base
);
570 printk("\tWindow Size:0x%llx\n", size
);
571 printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base
);
572 printk("\tAddress Space:0x%x\n", aspace
);
573 printk("\tTransfer Cycle Properties:0x%x\n", cycle
);
581 addr
|= TSI148_LCSR_ITAT_AS_A16
;
584 granularity
= 0x1000;
585 addr
|= TSI148_LCSR_ITAT_AS_A24
;
588 granularity
= 0x10000;
589 addr
|= TSI148_LCSR_ITAT_AS_A32
;
592 granularity
= 0x10000;
593 addr
|= TSI148_LCSR_ITAT_AS_A64
;
601 printk("Invalid address space\n");
606 /* Convert 64-bit variables to 2x 32-bit variables */
607 reg_split(vme_base
, &vme_base_high
, &vme_base_low
);
610 * Bound address is a valid address for the window, adjust
613 vme_bound
= vme_base
+ size
- granularity
;
614 reg_split(vme_bound
, &vme_bound_high
, &vme_bound_low
);
615 pci_offset
= (unsigned long long)pci_base
- vme_base
;
616 reg_split(pci_offset
, &pci_offset_high
, &pci_offset_low
);
618 if (vme_base_low
& (granularity
- 1)) {
619 printk("Invalid VME base alignment\n");
622 if (vme_bound_low
& (granularity
- 1)) {
623 printk("Invalid VME bound alignment\n");
626 if (pci_offset_low
& (granularity
- 1)) {
627 printk("Invalid PCI Offset alignment\n");
632 printk("\tVME Bound:0x%llx\n", vme_bound
);
633 printk("\tPCI Offset:0x%llx\n", pci_offset
);
636 /* Disable while we are mucking around */
637 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
638 TSI148_LCSR_OFFSET_ITAT
);
639 temp_ctl
&= ~TSI148_LCSR_ITAT_EN
;
640 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
641 TSI148_LCSR_OFFSET_ITAT
);
644 iowrite32be(vme_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
645 TSI148_LCSR_OFFSET_ITSAU
);
646 iowrite32be(vme_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
647 TSI148_LCSR_OFFSET_ITSAL
);
648 iowrite32be(vme_bound_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
649 TSI148_LCSR_OFFSET_ITEAU
);
650 iowrite32be(vme_bound_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
651 TSI148_LCSR_OFFSET_ITEAL
);
652 iowrite32be(pci_offset_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
653 TSI148_LCSR_OFFSET_ITOFU
);
654 iowrite32be(pci_offset_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
655 TSI148_LCSR_OFFSET_ITOFL
);
657 /* XXX Prefetch stuff currently unsupported */
660 for (x
= 0; x
< 4; x
++) {
661 if ((64 << x
) >= vmeIn
->prefetchSize
) {
667 temp_ctl
|= (x
<< 16);
669 if (vmeIn
->prefetchThreshold
)
670 if (vmeIn
->prefetchThreshold
)
674 /* Setup 2eSST speeds */
675 temp_ctl
&= ~TSI148_LCSR_ITAT_2eSSTM_M
;
676 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
678 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_160
;
681 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_267
;
684 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_320
;
688 /* Setup cycle types */
689 temp_ctl
&= ~(0x1F << 7);
691 temp_ctl
|= TSI148_LCSR_ITAT_BLT
;
692 if (cycle
& VME_MBLT
)
693 temp_ctl
|= TSI148_LCSR_ITAT_MBLT
;
694 if (cycle
& VME_2eVME
)
695 temp_ctl
|= TSI148_LCSR_ITAT_2eVME
;
696 if (cycle
& VME_2eSST
)
697 temp_ctl
|= TSI148_LCSR_ITAT_2eSST
;
698 if (cycle
& VME_2eSSTB
)
699 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTB
;
701 /* Setup address space */
702 temp_ctl
&= ~TSI148_LCSR_ITAT_AS_M
;
706 if (cycle
& VME_SUPER
)
707 temp_ctl
|= TSI148_LCSR_ITAT_SUPR
;
708 if (cycle
& VME_USER
)
709 temp_ctl
|= TSI148_LCSR_ITAT_NPRIV
;
710 if (cycle
& VME_PROG
)
711 temp_ctl
|= TSI148_LCSR_ITAT_PGM
;
712 if (cycle
& VME_DATA
)
713 temp_ctl
|= TSI148_LCSR_ITAT_DATA
;
715 /* Write ctl reg without enable */
716 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
717 TSI148_LCSR_OFFSET_ITAT
);
720 temp_ctl
|= TSI148_LCSR_ITAT_EN
;
722 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
723 TSI148_LCSR_OFFSET_ITAT
);
729 * Get slave window configuration.
731 * XXX Prefetch currently unsupported.
733 int tsi148_slave_get(struct vme_slave_resource
*image
, int *enabled
,
734 unsigned long long *vme_base
, unsigned long long *size
,
735 dma_addr_t
*pci_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
737 unsigned int i
, granularity
= 0, ctl
= 0;
738 unsigned int vme_base_low
, vme_base_high
;
739 unsigned int vme_bound_low
, vme_bound_high
;
740 unsigned int pci_offset_low
, pci_offset_high
;
741 unsigned long long vme_bound
, pci_offset
;
747 ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
748 TSI148_LCSR_OFFSET_ITAT
);
750 vme_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
751 TSI148_LCSR_OFFSET_ITSAU
);
752 vme_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
753 TSI148_LCSR_OFFSET_ITSAL
);
754 vme_bound_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
755 TSI148_LCSR_OFFSET_ITEAU
);
756 vme_bound_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
757 TSI148_LCSR_OFFSET_ITEAL
);
758 pci_offset_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
759 TSI148_LCSR_OFFSET_ITOFU
);
760 pci_offset_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
761 TSI148_LCSR_OFFSET_ITOFL
);
763 /* Convert 64-bit variables to 2x 32-bit variables */
764 reg_join(vme_base_high
, vme_base_low
, vme_base
);
765 reg_join(vme_bound_high
, vme_bound_low
, &vme_bound
);
766 reg_join(pci_offset_high
, pci_offset_low
, &pci_offset
);
768 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
774 if (ctl
& TSI148_LCSR_ITAT_EN
)
777 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A16
) {
781 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A24
) {
782 granularity
= 0x1000;
785 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A32
) {
786 granularity
= 0x10000;
789 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A64
) {
790 granularity
= 0x10000;
794 /* Need granularity before we set the size */
795 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
798 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_160
)
799 *cycle
|= VME_2eSST160
;
800 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_267
)
801 *cycle
|= VME_2eSST267
;
802 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_320
)
803 *cycle
|= VME_2eSST320
;
805 if (ctl
& TSI148_LCSR_ITAT_BLT
)
807 if (ctl
& TSI148_LCSR_ITAT_MBLT
)
809 if (ctl
& TSI148_LCSR_ITAT_2eVME
)
811 if (ctl
& TSI148_LCSR_ITAT_2eSST
)
813 if (ctl
& TSI148_LCSR_ITAT_2eSSTB
)
814 *cycle
|= VME_2eSSTB
;
816 if (ctl
& TSI148_LCSR_ITAT_SUPR
)
818 if (ctl
& TSI148_LCSR_ITAT_NPRIV
)
820 if (ctl
& TSI148_LCSR_ITAT_PGM
)
822 if (ctl
& TSI148_LCSR_ITAT_DATA
)
829 * Allocate and map PCI Resource
831 static int tsi148_alloc_resource(struct vme_master_resource
*image
,
832 unsigned long long size
)
834 unsigned long long existing_size
;
836 struct pci_dev
*pdev
;
838 /* Find pci_dev container of dev */
839 if (tsi148_bridge
->parent
== NULL
) {
840 printk("Dev entry NULL\n");
843 pdev
= container_of(tsi148_bridge
->parent
, struct pci_dev
, dev
);
845 existing_size
= (unsigned long long)(image
->pci_resource
.end
-
846 image
->pci_resource
.start
);
848 /* If the existing size is OK, return */
849 if ((size
!= 0) && (existing_size
== (size
- 1)))
852 if (existing_size
!= 0) {
853 iounmap(image
->kern_base
);
854 image
->kern_base
= NULL
;
855 if (image
->pci_resource
.name
!= NULL
)
856 kfree(image
->pci_resource
.name
);
857 release_resource(&(image
->pci_resource
));
858 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
861 /* Exit here if size is zero */
866 if (image
->pci_resource
.name
== NULL
) {
867 image
->pci_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_KERNEL
);
868 if (image
->pci_resource
.name
== NULL
) {
869 printk(KERN_ERR
"Unable to allocate memory for resource"
876 sprintf((char *)image
->pci_resource
.name
, "%s.%d", tsi148_bridge
->name
,
879 image
->pci_resource
.start
= 0;
880 image
->pci_resource
.end
= (unsigned long)size
;
881 image
->pci_resource
.flags
= IORESOURCE_MEM
;
883 retval
= pci_bus_alloc_resource(pdev
->bus
,
884 &(image
->pci_resource
), size
, size
, PCIBIOS_MIN_MEM
,
887 printk(KERN_ERR
"Failed to allocate mem resource for "
888 "window %d size 0x%lx start 0x%lx\n",
889 image
->number
, (unsigned long)size
,
890 (unsigned long)image
->pci_resource
.start
);
894 image
->kern_base
= ioremap_nocache(
895 image
->pci_resource
.start
, size
);
896 if (image
->kern_base
== NULL
) {
897 printk(KERN_ERR
"Failed to remap resource\n");
904 iounmap(image
->kern_base
);
905 image
->kern_base
= NULL
;
907 release_resource(&(image
->pci_resource
));
909 kfree(image
->pci_resource
.name
);
910 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
916 * Free and unmap PCI Resource
918 static void tsi148_free_resource(struct vme_master_resource
*image
)
920 iounmap(image
->kern_base
);
921 image
->kern_base
= NULL
;
922 release_resource(&(image
->pci_resource
));
923 kfree(image
->pci_resource
.name
);
924 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
928 * Set the attributes of an outbound window.
930 int tsi148_master_set( struct vme_master_resource
*image
, int enabled
,
931 unsigned long long vme_base
, unsigned long long size
,
932 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
936 unsigned int temp_ctl
= 0;
937 unsigned int pci_base_low
, pci_base_high
;
938 unsigned int pci_bound_low
, pci_bound_high
;
939 unsigned int vme_offset_low
, vme_offset_high
;
940 unsigned long long pci_bound
, vme_offset
, pci_base
;
942 /* Verify input data */
943 if (vme_base
& 0xFFFF) {
944 printk(KERN_ERR
"Invalid VME Window alignment\n");
949 if ((size
== 0) && (enabled
!= 0)) {
950 printk(KERN_ERR
"Size must be non-zero for enabled windows\n");
955 spin_lock(&(image
->lock
));
957 /* Let's allocate the resource here rather than further up the stack as
958 * it avoids pushing loads of bus dependant stuff up the stack. If size
959 * is zero, any existing resource will be freed.
961 retval
= tsi148_alloc_resource(image
, size
);
963 spin_unlock(&(image
->lock
));
964 printk(KERN_ERR
"Unable to allocate memory for "
974 pci_base
= (unsigned long long)image
->pci_resource
.start
;
977 * Bound address is a valid address for the window, adjust
978 * according to window granularity.
980 pci_bound
= pci_base
+ (size
- 0x10000);
981 vme_offset
= vme_base
- pci_base
;
984 /* Convert 64-bit variables to 2x 32-bit variables */
985 reg_split(pci_base
, &pci_base_high
, &pci_base_low
);
986 reg_split(pci_bound
, &pci_bound_high
, &pci_bound_low
);
987 reg_split(vme_offset
, &vme_offset_high
, &vme_offset_low
);
989 if (pci_base_low
& 0xFFFF) {
990 spin_unlock(&(image
->lock
));
991 printk(KERN_ERR
"Invalid PCI base alignment\n");
995 if (pci_bound_low
& 0xFFFF) {
996 spin_unlock(&(image
->lock
));
997 printk(KERN_ERR
"Invalid PCI bound alignment\n");
1001 if (vme_offset_low
& 0xFFFF) {
1002 spin_unlock(&(image
->lock
));
1003 printk(KERN_ERR
"Invalid VME Offset alignment\n");
1010 /* Disable while we are mucking around */
1011 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1012 TSI148_LCSR_OFFSET_OTAT
);
1013 temp_ctl
&= ~TSI148_LCSR_OTAT_EN
;
1014 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1015 TSI148_LCSR_OFFSET_OTAT
);
1017 /* XXX Prefetch stuff currently unsupported */
1019 if (vmeOut
->prefetchEnable
) {
1020 temp_ctl
|= 0x40000;
1021 for (x
= 0; x
< 4; x
++) {
1022 if ((2 << x
) >= vmeOut
->prefetchSize
)
1027 temp_ctl
|= (x
<< 16);
1031 /* Setup 2eSST speeds */
1032 temp_ctl
&= ~TSI148_LCSR_OTAT_2eSSTM_M
;
1033 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1035 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_160
;
1038 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_267
;
1041 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_320
;
1045 /* Setup cycle types */
1046 if (cycle
& VME_BLT
) {
1047 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1048 temp_ctl
|= TSI148_LCSR_OTAT_TM_BLT
;
1050 if (cycle
& VME_MBLT
) {
1051 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1052 temp_ctl
|= TSI148_LCSR_OTAT_TM_MBLT
;
1054 if (cycle
& VME_2eVME
) {
1055 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1056 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eVME
;
1058 if (cycle
& VME_2eSST
) {
1059 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1060 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eSST
;
1062 if (cycle
& VME_2eSSTB
) {
1063 printk(KERN_WARNING
"Currently not setting Broadcast Select "
1065 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1066 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eSSTB
;
1069 /* Setup data width */
1070 temp_ctl
&= ~TSI148_LCSR_OTAT_DBW_M
;
1073 temp_ctl
|= TSI148_LCSR_OTAT_DBW_16
;
1076 temp_ctl
|= TSI148_LCSR_OTAT_DBW_32
;
1079 spin_unlock(&(image
->lock
));
1080 printk(KERN_ERR
"Invalid data width\n");
1085 /* Setup address space */
1086 temp_ctl
&= ~TSI148_LCSR_OTAT_AMODE_M
;
1089 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A16
;
1092 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A24
;
1095 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A32
;
1098 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A64
;
1101 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_CRCSR
;
1104 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER1
;
1107 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER2
;
1110 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER3
;
1113 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER4
;
1116 spin_unlock(&(image
->lock
));
1117 printk(KERN_ERR
"Invalid address space\n");
1123 temp_ctl
&= ~(3<<4);
1124 if (cycle
& VME_SUPER
)
1125 temp_ctl
|= TSI148_LCSR_OTAT_SUP
;
1126 if (cycle
& VME_PROG
)
1127 temp_ctl
|= TSI148_LCSR_OTAT_PGM
;
1130 iowrite32be(pci_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1131 TSI148_LCSR_OFFSET_OTSAU
);
1132 iowrite32be(pci_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1133 TSI148_LCSR_OFFSET_OTSAL
);
1134 iowrite32be(pci_bound_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1135 TSI148_LCSR_OFFSET_OTEAU
);
1136 iowrite32be(pci_bound_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1137 TSI148_LCSR_OFFSET_OTEAL
);
1138 iowrite32be(vme_offset_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1139 TSI148_LCSR_OFFSET_OTOFU
);
1140 iowrite32be(vme_offset_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1141 TSI148_LCSR_OFFSET_OTOFL
);
1143 /* XXX We need to deal with OTBS */
1145 iowrite32be(vmeOut
->bcastSelect2esst
, tsi148_bridge
->base
+
1146 TSI148_LCSR_OT
[i
] + TSI148_LCSR_OFFSET_OTBS
);
1149 /* Write ctl reg without enable */
1150 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1151 TSI148_LCSR_OFFSET_OTAT
);
1154 temp_ctl
|= TSI148_LCSR_OTAT_EN
;
1156 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1157 TSI148_LCSR_OFFSET_OTAT
);
1159 spin_unlock(&(image
->lock
));
1165 tsi148_free_resource(image
);
1173 * Set the attributes of an outbound window.
1175 * XXX Not parsing prefetch information.
1177 int __tsi148_master_get( struct vme_master_resource
*image
, int *enabled
,
1178 unsigned long long *vme_base
, unsigned long long *size
,
1179 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
1181 unsigned int i
, ctl
;
1182 unsigned int pci_base_low
, pci_base_high
;
1183 unsigned int pci_bound_low
, pci_bound_high
;
1184 unsigned int vme_offset_low
, vme_offset_high
;
1186 unsigned long long pci_base
, pci_bound
, vme_offset
;
1190 ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1191 TSI148_LCSR_OFFSET_OTAT
);
1193 pci_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1194 TSI148_LCSR_OFFSET_OTSAU
);
1195 pci_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1196 TSI148_LCSR_OFFSET_OTSAL
);
1197 pci_bound_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1198 TSI148_LCSR_OFFSET_OTEAU
);
1199 pci_bound_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1200 TSI148_LCSR_OFFSET_OTEAL
);
1201 vme_offset_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1202 TSI148_LCSR_OFFSET_OTOFU
);
1203 vme_offset_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1204 TSI148_LCSR_OFFSET_OTOFL
);
1206 /* Convert 64-bit variables to 2x 32-bit variables */
1207 reg_join(pci_base_high
, pci_base_low
, &pci_base
);
1208 reg_join(pci_bound_high
, pci_bound_low
, &pci_bound
);
1209 reg_join(vme_offset_high
, vme_offset_low
, &vme_offset
);
1211 *vme_base
= pci_base
+ vme_offset
;
1212 *size
= (unsigned long long)(pci_bound
- pci_base
) + 0x10000;
1219 if (ctl
& TSI148_LCSR_OTAT_EN
)
1222 /* Setup address space */
1223 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A16
)
1225 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A24
)
1227 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A32
)
1229 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A64
)
1231 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_CRCSR
)
1232 *aspace
|= VME_CRCSR
;
1233 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER1
)
1234 *aspace
|= VME_USER1
;
1235 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER2
)
1236 *aspace
|= VME_USER2
;
1237 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER3
)
1238 *aspace
|= VME_USER3
;
1239 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER4
)
1240 *aspace
|= VME_USER4
;
1242 /* Setup 2eSST speeds */
1243 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_160
)
1244 *cycle
|= VME_2eSST160
;
1245 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_267
)
1246 *cycle
|= VME_2eSST267
;
1247 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_320
)
1248 *cycle
|= VME_2eSST320
;
1250 /* Setup cycle types */
1251 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_SCT
)
1253 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_BLT
)
1255 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_MBLT
)
1257 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eVME
)
1258 *cycle
|= VME_2eVME
;
1259 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eSST
)
1260 *cycle
|= VME_2eSST
;
1261 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eSSTB
)
1262 *cycle
|= VME_2eSSTB
;
1264 if (ctl
& TSI148_LCSR_OTAT_SUP
)
1265 *cycle
|= VME_SUPER
;
1269 if (ctl
& TSI148_LCSR_OTAT_PGM
)
1274 /* Setup data width */
1275 if ((ctl
& TSI148_LCSR_OTAT_DBW_M
) == TSI148_LCSR_OTAT_DBW_16
)
1277 if ((ctl
& TSI148_LCSR_OTAT_DBW_M
) == TSI148_LCSR_OTAT_DBW_32
)
1284 int tsi148_master_get( struct vme_master_resource
*image
, int *enabled
,
1285 unsigned long long *vme_base
, unsigned long long *size
,
1286 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
1290 spin_lock(&(image
->lock
));
1292 retval
= __tsi148_master_get(image
, enabled
, vme_base
, size
, aspace
,
1295 spin_unlock(&(image
->lock
));
1300 ssize_t
tsi148_master_read(struct vme_master_resource
*image
, void *buf
,
1301 size_t count
, loff_t offset
)
1303 int retval
, enabled
;
1304 unsigned long long vme_base
, size
;
1305 vme_address_t aspace
;
1308 struct vme_bus_error
*vme_err
= NULL
;
1310 spin_lock(&(image
->lock
));
1312 memcpy_fromio(buf
, image
->kern_base
+ offset
, (unsigned int)count
);
1318 __tsi148_master_get(image
, &enabled
, &vme_base
, &size
, &aspace
, &cycle
,
1321 vme_err
= tsi148_find_error(aspace
, vme_base
+ offset
, count
);
1322 if(vme_err
!= NULL
) {
1323 dev_err(image
->parent
->parent
, "First VME read error detected "
1324 "an at address 0x%llx\n", vme_err
->address
);
1325 retval
= vme_err
->address
- (vme_base
+ offset
);
1326 /* Clear down save errors in this address range */
1327 tsi148_clear_errors(aspace
, vme_base
+ offset
, count
);
1331 spin_unlock(&(image
->lock
));
1337 /* XXX We need to change vme_master_resource->mtx to a spinlock so that read
1338 * and write functions can be used in an interrupt context
1340 ssize_t
tsi148_master_write(struct vme_master_resource
*image
, void *buf
,
1341 size_t count
, loff_t offset
)
1343 int retval
= 0, enabled
;
1344 unsigned long long vme_base
, size
;
1345 vme_address_t aspace
;
1349 struct vme_bus_error
*vme_err
= NULL
;
1351 spin_lock(&(image
->lock
));
1353 memcpy_toio(image
->kern_base
+ offset
, buf
, (unsigned int)count
);
1357 * Writes are posted. We need to do a read on the VME bus to flush out
1358 * all of the writes before we check for errors. We can't guarentee
1359 * that reading the data we have just written is safe. It is believed
1360 * that there isn't any read, write re-ordering, so we can read any
1361 * location in VME space, so lets read the Device ID from the tsi148's
1362 * own registers as mapped into CR/CSR space.
1364 * We check for saved errors in the written address range/space.
1371 * Get window info first, to maximise the time that the buffers may
1372 * fluch on their own
1374 __tsi148_master_get(image
, &enabled
, &vme_base
, &size
, &aspace
, &cycle
,
1377 ioread16(flush_image
->kern_base
+ 0x7F000);
1379 vme_err
= tsi148_find_error(aspace
, vme_base
+ offset
, count
);
1380 if(vme_err
!= NULL
) {
1381 printk("First VME write error detected an at address 0x%llx\n",
1383 retval
= vme_err
->address
- (vme_base
+ offset
);
1384 /* Clear down save errors in this address range */
1385 tsi148_clear_errors(aspace
, vme_base
+ offset
, count
);
1389 spin_unlock(&(image
->lock
));
1395 * Perform an RMW cycle on the VME bus.
1397 * Requires a previously configured master window, returns final value.
1399 unsigned int tsi148_master_rmw(struct vme_master_resource
*image
,
1400 unsigned int mask
, unsigned int compare
, unsigned int swap
,
1403 unsigned long long pci_addr
;
1404 unsigned int pci_addr_high
, pci_addr_low
;
1409 /* Find the PCI address that maps to the desired VME address */
1412 /* Locking as we can only do one of these at a time */
1413 mutex_lock(&(vme_rmw
));
1416 spin_lock(&(image
->lock
));
1418 pci_addr_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1419 TSI148_LCSR_OFFSET_OTSAU
);
1420 pci_addr_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1421 TSI148_LCSR_OFFSET_OTSAL
);
1423 reg_join(pci_addr_high
, pci_addr_low
, &pci_addr
);
1424 reg_split(pci_addr
+ offset
, &pci_addr_high
, &pci_addr_low
);
1426 /* Configure registers */
1427 iowrite32be(mask
, tsi148_bridge
->base
+ TSI148_LCSR_RMWEN
);
1428 iowrite32be(compare
, tsi148_bridge
->base
+ TSI148_LCSR_RMWC
);
1429 iowrite32be(swap
, tsi148_bridge
->base
+ TSI148_LCSR_RMWS
);
1430 iowrite32be(pci_addr_high
, tsi148_bridge
->base
+ TSI148_LCSR_RMWAU
);
1431 iowrite32be(pci_addr_low
, tsi148_bridge
->base
+ TSI148_LCSR_RMWAL
);
1434 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1435 tmp
|= TSI148_LCSR_VMCTRL_RMWEN
;
1436 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1438 /* Kick process off with a read to the required address. */
1439 result
= ioread32be(image
->kern_base
+ offset
);
1442 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1443 tmp
&= ~TSI148_LCSR_VMCTRL_RMWEN
;
1444 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1446 spin_unlock(&(image
->lock
));
1448 mutex_unlock(&(vme_rmw
));
1453 static int tsi148_dma_set_vme_src_attributes (u32
*attr
, vme_address_t aspace
,
1454 vme_cycle_t cycle
, vme_width_t dwidth
)
1456 /* Setup 2eSST speeds */
1457 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1459 *attr
|= TSI148_LCSR_DSAT_2eSSTM_160
;
1462 *attr
|= TSI148_LCSR_DSAT_2eSSTM_267
;
1465 *attr
|= TSI148_LCSR_DSAT_2eSSTM_320
;
1469 /* Setup cycle types */
1470 if (cycle
& VME_SCT
) {
1471 *attr
|= TSI148_LCSR_DSAT_TM_SCT
;
1473 if (cycle
& VME_BLT
) {
1474 *attr
|= TSI148_LCSR_DSAT_TM_BLT
;
1476 if (cycle
& VME_MBLT
) {
1477 *attr
|= TSI148_LCSR_DSAT_TM_MBLT
;
1479 if (cycle
& VME_2eVME
) {
1480 *attr
|= TSI148_LCSR_DSAT_TM_2eVME
;
1482 if (cycle
& VME_2eSST
) {
1483 *attr
|= TSI148_LCSR_DSAT_TM_2eSST
;
1485 if (cycle
& VME_2eSSTB
) {
1486 printk("Currently not setting Broadcast Select Registers\n");
1487 *attr
|= TSI148_LCSR_DSAT_TM_2eSSTB
;
1490 /* Setup data width */
1493 *attr
|= TSI148_LCSR_DSAT_DBW_16
;
1496 *attr
|= TSI148_LCSR_DSAT_DBW_32
;
1499 printk("Invalid data width\n");
1503 /* Setup address space */
1506 *attr
|= TSI148_LCSR_DSAT_AMODE_A16
;
1509 *attr
|= TSI148_LCSR_DSAT_AMODE_A24
;
1512 *attr
|= TSI148_LCSR_DSAT_AMODE_A32
;
1515 *attr
|= TSI148_LCSR_DSAT_AMODE_A64
;
1518 *attr
|= TSI148_LCSR_DSAT_AMODE_CRCSR
;
1521 *attr
|= TSI148_LCSR_DSAT_AMODE_USER1
;
1524 *attr
|= TSI148_LCSR_DSAT_AMODE_USER2
;
1527 *attr
|= TSI148_LCSR_DSAT_AMODE_USER3
;
1530 *attr
|= TSI148_LCSR_DSAT_AMODE_USER4
;
1533 printk("Invalid address space\n");
1538 if (cycle
& VME_SUPER
)
1539 *attr
|= TSI148_LCSR_DSAT_SUP
;
1540 if (cycle
& VME_PROG
)
1541 *attr
|= TSI148_LCSR_DSAT_PGM
;
1546 static int tsi148_dma_set_vme_dest_attributes(u32
*attr
, vme_address_t aspace
,
1547 vme_cycle_t cycle
, vme_width_t dwidth
)
1549 /* Setup 2eSST speeds */
1550 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1552 *attr
|= TSI148_LCSR_DDAT_2eSSTM_160
;
1555 *attr
|= TSI148_LCSR_DDAT_2eSSTM_267
;
1558 *attr
|= TSI148_LCSR_DDAT_2eSSTM_320
;
1562 /* Setup cycle types */
1563 if (cycle
& VME_SCT
) {
1564 *attr
|= TSI148_LCSR_DDAT_TM_SCT
;
1566 if (cycle
& VME_BLT
) {
1567 *attr
|= TSI148_LCSR_DDAT_TM_BLT
;
1569 if (cycle
& VME_MBLT
) {
1570 *attr
|= TSI148_LCSR_DDAT_TM_MBLT
;
1572 if (cycle
& VME_2eVME
) {
1573 *attr
|= TSI148_LCSR_DDAT_TM_2eVME
;
1575 if (cycle
& VME_2eSST
) {
1576 *attr
|= TSI148_LCSR_DDAT_TM_2eSST
;
1578 if (cycle
& VME_2eSSTB
) {
1579 printk("Currently not setting Broadcast Select Registers\n");
1580 *attr
|= TSI148_LCSR_DDAT_TM_2eSSTB
;
1583 /* Setup data width */
1586 *attr
|= TSI148_LCSR_DDAT_DBW_16
;
1589 *attr
|= TSI148_LCSR_DDAT_DBW_32
;
1592 printk("Invalid data width\n");
1596 /* Setup address space */
1599 *attr
|= TSI148_LCSR_DDAT_AMODE_A16
;
1602 *attr
|= TSI148_LCSR_DDAT_AMODE_A24
;
1605 *attr
|= TSI148_LCSR_DDAT_AMODE_A32
;
1608 *attr
|= TSI148_LCSR_DDAT_AMODE_A64
;
1611 *attr
|= TSI148_LCSR_DDAT_AMODE_CRCSR
;
1614 *attr
|= TSI148_LCSR_DDAT_AMODE_USER1
;
1617 *attr
|= TSI148_LCSR_DDAT_AMODE_USER2
;
1620 *attr
|= TSI148_LCSR_DDAT_AMODE_USER3
;
1623 *attr
|= TSI148_LCSR_DDAT_AMODE_USER4
;
1626 printk("Invalid address space\n");
1631 if (cycle
& VME_SUPER
)
1632 *attr
|= TSI148_LCSR_DDAT_SUP
;
1633 if (cycle
& VME_PROG
)
1634 *attr
|= TSI148_LCSR_DDAT_PGM
;
1640 * Add a link list descriptor to the list
1642 * XXX Need to handle 2eSST Broadcast select bits
1644 int tsi148_dma_list_add (struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
1645 struct vme_dma_attr
*dest
, size_t count
)
1647 struct tsi148_dma_entry
*entry
, *prev
;
1648 u32 address_high
, address_low
;
1649 struct vme_dma_pattern
*pattern_attr
;
1650 struct vme_dma_pci
*pci_attr
;
1651 struct vme_dma_vme
*vme_attr
;
1652 dma_addr_t desc_ptr
;
1655 /* XXX descriptor must be aligned on 64-bit boundaries */
1656 entry
= (struct tsi148_dma_entry
*)kmalloc(
1657 sizeof(struct tsi148_dma_entry
), GFP_KERNEL
);
1658 if (entry
== NULL
) {
1659 printk("Failed to allocate memory for dma resource "
1665 /* Test descriptor alignment */
1666 if ((unsigned long)&(entry
->descriptor
) & 0x7) {
1667 printk("Descriptor not aligned to 8 byte boundary as "
1668 "required: %p\n", &(entry
->descriptor
));
1673 /* Given we are going to fill out the structure, we probably don't
1674 * need to zero it, but better safe than sorry for now.
1676 memset(&(entry
->descriptor
), 0, sizeof(struct tsi148_dma_descriptor
));
1678 /* Fill out source part */
1679 switch (src
->type
) {
1680 case VME_DMA_PATTERN
:
1681 pattern_attr
= (struct vme_dma_pattern
*)src
->private;
1683 entry
->descriptor
.dsal
= pattern_attr
->pattern
;
1684 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_PAT
;
1685 /* Default behaviour is 32 bit pattern */
1686 if (pattern_attr
->type
& VME_DMA_PATTERN_BYTE
) {
1687 entry
->descriptor
.dsat
|= TSI148_LCSR_DSAT_PSZ
;
1689 /* It seems that the default behaviour is to increment */
1690 if ((pattern_attr
->type
& VME_DMA_PATTERN_INCREMENT
) == 0) {
1691 entry
->descriptor
.dsat
|= TSI148_LCSR_DSAT_NIN
;
1695 pci_attr
= (struct vme_dma_pci
*)src
->private;
1697 reg_split((unsigned long long)pci_attr
->address
, &address_high
,
1699 entry
->descriptor
.dsau
= address_high
;
1700 entry
->descriptor
.dsal
= address_low
;
1701 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_PCI
;
1704 vme_attr
= (struct vme_dma_vme
*)src
->private;
1706 reg_split((unsigned long long)vme_attr
->address
, &address_high
,
1708 entry
->descriptor
.dsau
= address_high
;
1709 entry
->descriptor
.dsal
= address_low
;
1710 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_VME
;
1712 retval
= tsi148_dma_set_vme_src_attributes(
1713 &(entry
->descriptor
.dsat
), vme_attr
->aspace
,
1714 vme_attr
->cycle
, vme_attr
->dwidth
);
1719 printk("Invalid source type\n");
1725 /* Assume last link - this will be over-written by adding another */
1726 entry
->descriptor
.dnlau
= 0;
1727 entry
->descriptor
.dnlal
= TSI148_LCSR_DNLAL_LLA
;
1730 /* Fill out destination part */
1731 switch (dest
->type
) {
1733 pci_attr
= (struct vme_dma_pci
*)dest
->private;
1735 reg_split((unsigned long long)pci_attr
->address
, &address_high
,
1737 entry
->descriptor
.ddau
= address_high
;
1738 entry
->descriptor
.ddal
= address_low
;
1739 entry
->descriptor
.ddat
= TSI148_LCSR_DDAT_TYP_PCI
;
1742 vme_attr
= (struct vme_dma_vme
*)dest
->private;
1744 reg_split((unsigned long long)vme_attr
->address
, &address_high
,
1746 entry
->descriptor
.ddau
= address_high
;
1747 entry
->descriptor
.ddal
= address_low
;
1748 entry
->descriptor
.ddat
= TSI148_LCSR_DDAT_TYP_VME
;
1750 retval
= tsi148_dma_set_vme_dest_attributes(
1751 &(entry
->descriptor
.ddat
), vme_attr
->aspace
,
1752 vme_attr
->cycle
, vme_attr
->dwidth
);
1757 printk("Invalid destination type\n");
1763 /* Fill out count */
1764 entry
->descriptor
.dcnt
= (u32
)count
;
1767 list_add_tail(&(entry
->list
), &(list
->entries
));
1769 /* Fill out previous descriptors "Next Address" */
1770 if(entry
->list
.prev
!= &(list
->entries
)){
1771 prev
= list_entry(entry
->list
.prev
, struct tsi148_dma_entry
,
1773 /* We need the bus address for the pointer */
1774 desc_ptr
= virt_to_bus(&(entry
->descriptor
));
1775 reg_split(desc_ptr
, &(prev
->descriptor
.dnlau
),
1776 &(prev
->descriptor
.dnlal
));
1790 * Check to see if the provided DMA channel is busy.
1792 static int tsi148_dma_busy(int channel
)
1796 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
1797 TSI148_LCSR_OFFSET_DSTA
);
1799 if (tmp
& TSI148_LCSR_DSTA_BSY
)
1807 * Execute a previously generated link list
1809 * XXX Need to provide control register configuration.
1811 int tsi148_dma_list_exec(struct vme_dma_list
*list
)
1813 struct vme_dma_resource
*ctrlr
;
1814 int channel
, retval
= 0;
1815 struct tsi148_dma_entry
*entry
;
1816 dma_addr_t bus_addr
;
1817 u32 bus_addr_high
, bus_addr_low
;
1818 u32 val
, dctlreg
= 0;
1823 ctrlr
= list
->parent
;
1825 mutex_lock(&(ctrlr
->mtx
));
1827 channel
= ctrlr
->number
;
1829 if (! list_empty(&(ctrlr
->running
))) {
1831 * XXX We have an active DMA transfer and currently haven't
1832 * sorted out the mechanism for "pending" DMA transfers.
1835 /* Need to add to pending here */
1836 mutex_unlock(&(ctrlr
->mtx
));
1839 list_add(&(list
->list
), &(ctrlr
->running
));
1842 /* XXX Still todo */
1843 for (x
= 0; x
< 8; x
++) { /* vme block size */
1844 if ((32 << x
) >= vmeDma
->maxVmeBlockSize
) {
1850 dctlreg
|= (x
<< 12);
1852 for (x
= 0; x
< 8; x
++) { /* pci block size */
1853 if ((32 << x
) >= vmeDma
->maxPciBlockSize
) {
1859 dctlreg
|= (x
<< 4);
1861 if (vmeDma
->vmeBackOffTimer
) {
1862 for (x
= 1; x
< 8; x
++) { /* vme timer */
1863 if ((1 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
1869 dctlreg
|= (x
<< 8);
1872 if (vmeDma
->pciBackOffTimer
) {
1873 for (x
= 1; x
< 8; x
++) { /* pci timer */
1874 if ((1 << (x
- 1)) >= vmeDma
->pciBackOffTimer
) {
1880 dctlreg
|= (x
<< 0);
1884 /* Get first bus address and write into registers */
1885 entry
= list_first_entry(&(list
->entries
), struct tsi148_dma_entry
,
1888 bus_addr
= virt_to_bus(&(entry
->descriptor
));
1890 mutex_unlock(&(ctrlr
->mtx
));
1892 reg_split(bus_addr
, &bus_addr_high
, &bus_addr_low
);
1894 iowrite32be(bus_addr_high
, tsi148_bridge
->base
+
1895 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DNLAU
);
1896 iowrite32be(bus_addr_low
, tsi148_bridge
->base
+
1897 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DNLAL
);
1899 /* Start the operation */
1900 iowrite32be(dctlreg
| TSI148_LCSR_DCTL_DGO
, tsi148_bridge
->base
+
1901 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCTL
);
1903 wait_event_interruptible(dma_queue
[channel
], tsi148_dma_busy(channel
));
1905 * Read status register, this register is valid until we kick off a
1908 val
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
1909 TSI148_LCSR_OFFSET_DSTA
);
1911 if (val
& TSI148_LCSR_DSTA_VBE
) {
1912 printk(KERN_ERR
"tsi148: DMA Error. DSTA=%08X\n", val
);
1916 /* Remove list from running list */
1917 mutex_lock(&(ctrlr
->mtx
));
1918 list_del(&(list
->list
));
1919 mutex_unlock(&(ctrlr
->mtx
));
1925 * Clean up a previously generated link list
1927 * We have a separate function, don't assume that the chain can't be reused.
1929 int tsi148_dma_list_empty(struct vme_dma_list
*list
)
1931 struct list_head
*pos
, *temp
;
1932 struct tsi148_dma_entry
*entry
;
1934 /* detach and free each entry */
1935 list_for_each_safe(pos
, temp
, &(list
->entries
)) {
1937 entry
= list_entry(pos
, struct tsi148_dma_entry
, list
);
1945 * All 4 location monitors reside at the same base - this is therefore a
1946 * system wide configuration.
1948 * This does not enable the LM monitor - that should be done when the first
1949 * callback is attached and disabled when the last callback is removed.
1951 int tsi148_lm_set(struct vme_lm_resource
*lm
, unsigned long long lm_base
,
1952 vme_address_t aspace
, vme_cycle_t cycle
)
1954 u32 lm_base_high
, lm_base_low
, lm_ctl
= 0;
1957 mutex_lock(&(lm
->mtx
));
1959 /* If we already have a callback attached, we can't move it! */
1960 for (i
= 0; i
< lm
->monitors
; i
++) {
1961 if(lm_callback
[i
] != NULL
) {
1962 mutex_unlock(&(lm
->mtx
));
1963 printk("Location monitor callback attached, can't "
1971 lm_ctl
|= TSI148_LCSR_LMAT_AS_A16
;
1974 lm_ctl
|= TSI148_LCSR_LMAT_AS_A24
;
1977 lm_ctl
|= TSI148_LCSR_LMAT_AS_A32
;
1980 lm_ctl
|= TSI148_LCSR_LMAT_AS_A64
;
1983 mutex_unlock(&(lm
->mtx
));
1984 printk("Invalid address space\n");
1989 if (cycle
& VME_SUPER
)
1990 lm_ctl
|= TSI148_LCSR_LMAT_SUPR
;
1991 if (cycle
& VME_USER
)
1992 lm_ctl
|= TSI148_LCSR_LMAT_NPRIV
;
1993 if (cycle
& VME_PROG
)
1994 lm_ctl
|= TSI148_LCSR_LMAT_PGM
;
1995 if (cycle
& VME_DATA
)
1996 lm_ctl
|= TSI148_LCSR_LMAT_DATA
;
1998 reg_split(lm_base
, &lm_base_high
, &lm_base_low
);
2000 iowrite32be(lm_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_LMBAU
);
2001 iowrite32be(lm_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_LMBAL
);
2002 iowrite32be(lm_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2004 mutex_unlock(&(lm
->mtx
));
2009 /* Get configuration of the callback monitor and return whether it is enabled
2012 int tsi148_lm_get(struct vme_lm_resource
*lm
, unsigned long long *lm_base
,
2013 vme_address_t
*aspace
, vme_cycle_t
*cycle
)
2015 u32 lm_base_high
, lm_base_low
, lm_ctl
, enabled
= 0;
2017 mutex_lock(&(lm
->mtx
));
2019 lm_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMBAU
);
2020 lm_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMBAL
);
2021 lm_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2023 reg_join(lm_base_high
, lm_base_low
, lm_base
);
2025 if (lm_ctl
& TSI148_LCSR_LMAT_EN
)
2028 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A16
) {
2031 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A24
) {
2034 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A32
) {
2037 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A64
) {
2041 if (lm_ctl
& TSI148_LCSR_LMAT_SUPR
)
2042 *cycle
|= VME_SUPER
;
2043 if (lm_ctl
& TSI148_LCSR_LMAT_NPRIV
)
2045 if (lm_ctl
& TSI148_LCSR_LMAT_PGM
)
2047 if (lm_ctl
& TSI148_LCSR_LMAT_DATA
)
2050 mutex_unlock(&(lm
->mtx
));
2056 * Attach a callback to a specific location monitor.
2058 * Callback will be passed the monitor triggered.
2060 int tsi148_lm_attach(struct vme_lm_resource
*lm
, int monitor
,
2061 void (*callback
)(int))
2065 mutex_lock(&(lm
->mtx
));
2067 /* Ensure that the location monitor is configured - need PGM or DATA */
2068 lm_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2069 if ((lm_ctl
& (TSI148_LCSR_LMAT_PGM
| TSI148_LCSR_LMAT_DATA
)) == 0) {
2070 mutex_unlock(&(lm
->mtx
));
2071 printk("Location monitor not properly configured\n");
2075 /* Check that a callback isn't already attached */
2076 if (lm_callback
[monitor
] != NULL
) {
2077 mutex_unlock(&(lm
->mtx
));
2078 printk("Existing callback attached\n");
2082 /* Attach callback */
2083 lm_callback
[monitor
] = callback
;
2085 /* Enable Location Monitor interrupt */
2086 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2087 tmp
|= TSI148_LCSR_INTEN_LMEN
[monitor
];
2088 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2090 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2091 tmp
|= TSI148_LCSR_INTEO_LMEO
[monitor
];
2092 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2094 /* Ensure that global Location Monitor Enable set */
2095 if ((lm_ctl
& TSI148_LCSR_LMAT_EN
) == 0) {
2096 lm_ctl
|= TSI148_LCSR_LMAT_EN
;
2097 iowrite32be(lm_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2100 mutex_unlock(&(lm
->mtx
));
2106 * Detach a callback function forn a specific location monitor.
2108 int tsi148_lm_detach(struct vme_lm_resource
*lm
, int monitor
)
2112 mutex_lock(&(lm
->mtx
));
2114 /* Disable Location Monitor and ensure previous interrupts are clear */
2115 lm_en
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2116 lm_en
&= ~TSI148_LCSR_INTEN_LMEN
[monitor
];
2117 iowrite32be(lm_en
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2119 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2120 tmp
&= ~TSI148_LCSR_INTEO_LMEO
[monitor
];
2121 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2123 iowrite32be(TSI148_LCSR_INTC_LMC
[monitor
],
2124 tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
2126 /* Detach callback */
2127 lm_callback
[monitor
] = NULL
;
2129 /* If all location monitors disabled, disable global Location Monitor */
2130 if ((lm_en
& (TSI148_LCSR_INTS_LM0S
| TSI148_LCSR_INTS_LM1S
|
2131 TSI148_LCSR_INTS_LM2S
| TSI148_LCSR_INTS_LM3S
)) == 0) {
2132 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2133 tmp
&= ~TSI148_LCSR_LMAT_EN
;
2134 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2137 mutex_unlock(&(lm
->mtx
));
2143 * Determine Geographical Addressing
2145 int tsi148_slot_get(void)
2150 slot
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2151 slot
= slot
& TSI148_LCSR_VSTAT_GA_M
;
2158 static int __init
tsi148_init(void)
2160 return pci_register_driver(&tsi148_driver
);
2164 * Configure CR/CSR space
2166 * Access to the CR/CSR can be configured at power-up. The location of the
2167 * CR/CSR registers in the CR/CSR address space is determined by the boards
2168 * Auto-ID or Geographic address. This function ensures that the window is
2169 * enabled at an offset consistent with the boards geopgraphic address.
2171 * Each board has a 512kB window, with the highest 4kB being used for the
2172 * boards registers, this means there is a fix length 508kB window which must
2173 * be mapped onto PCI memory.
2175 static int tsi148_crcsr_init(struct pci_dev
*pdev
)
2177 u32 cbar
, crat
, vstat
;
2178 u32 crcsr_bus_high
, crcsr_bus_low
;
2181 /* Allocate mem for CR/CSR image */
2182 crcsr_kernel
= pci_alloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
2184 if (crcsr_kernel
== NULL
) {
2185 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
2190 memset(crcsr_kernel
, 0, VME_CRCSR_BUF_SIZE
);
2192 reg_split(crcsr_bus
, &crcsr_bus_high
, &crcsr_bus_low
);
2194 iowrite32be(crcsr_bus_high
, tsi148_bridge
->base
+ TSI148_LCSR_CROU
);
2195 iowrite32be(crcsr_bus_low
, tsi148_bridge
->base
+ TSI148_LCSR_CROL
);
2197 /* Ensure that the CR/CSR is configured at the correct offset */
2198 cbar
= ioread32be(tsi148_bridge
->base
+ TSI148_CBAR
);
2199 cbar
= (cbar
& TSI148_CRCSR_CBAR_M
)>>3;
2201 vstat
= tsi148_slot_get();
2203 if (cbar
!= vstat
) {
2205 dev_info(&pdev
->dev
, "Setting CR/CSR offset\n");
2206 iowrite32be(cbar
<<3, tsi148_bridge
->base
+ TSI148_CBAR
);
2208 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", cbar
);
2210 crat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2211 if (crat
& TSI148_LCSR_CRAT_EN
) {
2212 dev_info(&pdev
->dev
, "Enabling CR/CSR space\n");
2213 iowrite32be(crat
| TSI148_LCSR_CRAT_EN
,
2214 tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2216 dev_info(&pdev
->dev
, "CR/CSR already enabled\n");
2218 /* If we want flushed, error-checked writes, set up a window
2219 * over the CR/CSR registers. We read from here to safely flush
2220 * through VME writes.
2223 retval
= tsi148_master_set(flush_image
, 1, (vstat
* 0x80000),
2224 0x80000, VME_CRCSR
, VME_SCT
, VME_D16
);
2226 dev_err(&pdev
->dev
, "Configuring flush image failed\n");
2233 static void tsi148_crcsr_exit(struct pci_dev
*pdev
)
2237 /* Turn off CR/CSR space */
2238 crat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2239 iowrite32be(crat
& ~TSI148_LCSR_CRAT_EN
,
2240 tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2243 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CROU
);
2244 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CROL
);
2246 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, crcsr_kernel
, crcsr_bus
);
2249 static int tsi148_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2251 int retval
, i
, master_num
;
2253 struct list_head
*pos
= NULL
;
2254 struct vme_master_resource
*master_image
;
2255 struct vme_slave_resource
*slave_image
;
2256 struct vme_dma_resource
*dma_ctrlr
;
2257 struct vme_lm_resource
*lm
;
2259 /* If we want to support more than one of each bridge, we need to
2260 * dynamically generate this so we get one per device
2262 tsi148_bridge
= (struct vme_bridge
*)kmalloc(sizeof(struct vme_bridge
),
2264 if (tsi148_bridge
== NULL
) {
2265 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
2271 memset(tsi148_bridge
, 0, sizeof(struct vme_bridge
));
2273 /* Enable the device */
2274 retval
= pci_enable_device(pdev
);
2276 dev_err(&pdev
->dev
, "Unable to enable device\n");
2281 retval
= pci_request_regions(pdev
, driver_name
);
2283 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
2287 /* map registers in BAR 0 */
2288 tsi148_bridge
->base
= ioremap_nocache(pci_resource_start(pdev
, 0), 4096);
2289 if (!tsi148_bridge
->base
) {
2290 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
2295 /* Check to see if the mapping worked out */
2296 data
= ioread32(tsi148_bridge
->base
+ TSI148_PCFS_ID
) & 0x0000FFFF;
2297 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
2298 dev_err(&pdev
->dev
, "CRG region check failed\n");
2303 /* Initialize wait queues & mutual exclusion flags */
2304 /* XXX These need to be moved to the vme_bridge structure */
2305 init_waitqueue_head(&dma_queue
[0]);
2306 init_waitqueue_head(&dma_queue
[1]);
2307 init_waitqueue_head(&iack_queue
);
2308 mutex_init(&(vme_int
));
2309 mutex_init(&(vme_rmw
));
2311 tsi148_bridge
->parent
= &(pdev
->dev
);
2312 strcpy(tsi148_bridge
->name
, driver_name
);
2315 retval
= tsi148_irq_init(tsi148_bridge
);
2317 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
2321 /* If we are going to flush writes, we need to read from the VME bus.
2322 * We need to do this safely, thus we read the devices own CR/CSR
2323 * register. To do this we must set up a window in CR/CSR space and
2324 * hence have one less master window resource available.
2326 master_num
= TSI148_MAX_MASTER
;
2330 flush_image
= (struct vme_master_resource
*)kmalloc(
2331 sizeof(struct vme_master_resource
), GFP_KERNEL
);
2332 if (flush_image
== NULL
) {
2333 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2334 "flush resource structure\n");
2338 flush_image
->parent
= tsi148_bridge
;
2339 spin_lock_init(&(flush_image
->lock
));
2340 flush_image
->locked
= 1;
2341 flush_image
->number
= master_num
;
2342 flush_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2344 flush_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2345 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2346 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2347 VME_PROG
| VME_DATA
;
2348 flush_image
->width_attr
= VME_D16
| VME_D32
;
2349 memset(&(flush_image
->pci_resource
), 0,
2350 sizeof(struct resource
));
2351 flush_image
->kern_base
= NULL
;
2354 /* Add master windows to list */
2355 INIT_LIST_HEAD(&(tsi148_bridge
->master_resources
));
2356 for (i
= 0; i
< master_num
; i
++) {
2357 master_image
= (struct vme_master_resource
*)kmalloc(
2358 sizeof(struct vme_master_resource
), GFP_KERNEL
);
2359 if (master_image
== NULL
) {
2360 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2361 "master resource structure\n");
2365 master_image
->parent
= tsi148_bridge
;
2366 spin_lock_init(&(master_image
->lock
));
2367 master_image
->locked
= 0;
2368 master_image
->number
= i
;
2369 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2371 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2372 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2373 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2374 VME_PROG
| VME_DATA
;
2375 master_image
->width_attr
= VME_D16
| VME_D32
;
2376 memset(&(master_image
->pci_resource
), 0,
2377 sizeof(struct resource
));
2378 master_image
->kern_base
= NULL
;
2379 list_add_tail(&(master_image
->list
),
2380 &(tsi148_bridge
->master_resources
));
2383 /* Add slave windows to list */
2384 INIT_LIST_HEAD(&(tsi148_bridge
->slave_resources
));
2385 for (i
= 0; i
< TSI148_MAX_SLAVE
; i
++) {
2386 slave_image
= (struct vme_slave_resource
*)kmalloc(
2387 sizeof(struct vme_slave_resource
), GFP_KERNEL
);
2388 if (slave_image
== NULL
) {
2389 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2390 "slave resource structure\n");
2394 slave_image
->parent
= tsi148_bridge
;
2395 mutex_init(&(slave_image
->mtx
));
2396 slave_image
->locked
= 0;
2397 slave_image
->number
= i
;
2398 slave_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2399 VME_A64
| VME_CRCSR
| VME_USER1
| VME_USER2
|
2400 VME_USER3
| VME_USER4
;
2401 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2402 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2403 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2404 VME_PROG
| VME_DATA
;
2405 list_add_tail(&(slave_image
->list
),
2406 &(tsi148_bridge
->slave_resources
));
2409 /* Add dma engines to list */
2410 INIT_LIST_HEAD(&(tsi148_bridge
->dma_resources
));
2411 for (i
= 0; i
< TSI148_MAX_DMA
; i
++) {
2412 dma_ctrlr
= (struct vme_dma_resource
*)kmalloc(
2413 sizeof(struct vme_dma_resource
), GFP_KERNEL
);
2414 if (dma_ctrlr
== NULL
) {
2415 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2416 "dma resource structure\n");
2420 dma_ctrlr
->parent
= tsi148_bridge
;
2421 mutex_init(&(dma_ctrlr
->mtx
));
2422 dma_ctrlr
->locked
= 0;
2423 dma_ctrlr
->number
= i
;
2424 INIT_LIST_HEAD(&(dma_ctrlr
->pending
));
2425 INIT_LIST_HEAD(&(dma_ctrlr
->running
));
2426 list_add_tail(&(dma_ctrlr
->list
),
2427 &(tsi148_bridge
->dma_resources
));
2430 /* Add location monitor to list */
2431 INIT_LIST_HEAD(&(tsi148_bridge
->lm_resources
));
2432 lm
= kmalloc(sizeof(struct vme_lm_resource
), GFP_KERNEL
);
2434 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2435 "location monitor resource structure\n");
2439 lm
->parent
= tsi148_bridge
;
2440 mutex_init(&(lm
->mtx
));
2444 list_add_tail(&(lm
->list
), &(tsi148_bridge
->lm_resources
));
2446 tsi148_bridge
->slave_get
= tsi148_slave_get
;
2447 tsi148_bridge
->slave_set
= tsi148_slave_set
;
2448 tsi148_bridge
->master_get
= tsi148_master_get
;
2449 tsi148_bridge
->master_set
= tsi148_master_set
;
2450 tsi148_bridge
->master_read
= tsi148_master_read
;
2451 tsi148_bridge
->master_write
= tsi148_master_write
;
2452 tsi148_bridge
->master_rmw
= tsi148_master_rmw
;
2453 tsi148_bridge
->dma_list_add
= tsi148_dma_list_add
;
2454 tsi148_bridge
->dma_list_exec
= tsi148_dma_list_exec
;
2455 tsi148_bridge
->dma_list_empty
= tsi148_dma_list_empty
;
2456 tsi148_bridge
->irq_set
= tsi148_irq_set
;
2457 tsi148_bridge
->irq_generate
= tsi148_irq_generate
;
2458 tsi148_bridge
->lm_set
= tsi148_lm_set
;
2459 tsi148_bridge
->lm_get
= tsi148_lm_get
;
2460 tsi148_bridge
->lm_attach
= tsi148_lm_attach
;
2461 tsi148_bridge
->lm_detach
= tsi148_lm_detach
;
2462 tsi148_bridge
->slot_get
= tsi148_slot_get
;
2464 data
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2465 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
2466 (data
& TSI148_LCSR_VSTAT_SCONS
)? "" : " not");
2468 dev_info(&pdev
->dev
, "VME geographical address is %d\n",
2469 data
& TSI148_LCSR_VSTAT_GA_M
);
2471 dev_info(&pdev
->dev
, "VME geographical address is set to %d\n",
2474 dev_info(&pdev
->dev
, "VME Write and flush and error check is %s\n",
2475 err_chk
? "enabled" : "disabled");
2477 if(tsi148_crcsr_init(pdev
)) {
2478 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
2483 /* Need to save tsi148_bridge pointer locally in link list for use in
2486 retval
= vme_register_bridge(tsi148_bridge
);
2488 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
2492 /* Clear VME bus "board fail", and "power-up reset" lines */
2493 data
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2494 data
&= ~TSI148_LCSR_VSTAT_BRDFL
;
2495 data
|= TSI148_LCSR_VSTAT_CPURST
;
2496 iowrite32be(data
, tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2500 vme_unregister_bridge(tsi148_bridge
);
2502 tsi148_crcsr_exit(pdev
);
2505 /* resources are stored in link list */
2506 list_for_each(pos
, &(tsi148_bridge
->lm_resources
)) {
2507 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
2512 /* resources are stored in link list */
2513 list_for_each(pos
, &(tsi148_bridge
->dma_resources
)) {
2514 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
2519 /* resources are stored in link list */
2520 list_for_each(pos
, &(tsi148_bridge
->slave_resources
)) {
2521 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
2526 /* resources are stored in link list */
2527 list_for_each(pos
, &(tsi148_bridge
->master_resources
)) {
2528 master_image
= list_entry(pos
, struct vme_master_resource
, list
);
2530 kfree(master_image
);
2533 tsi148_irq_exit(pdev
);
2536 iounmap(tsi148_bridge
->base
);
2538 pci_release_regions(pdev
);
2540 pci_disable_device(pdev
);
2542 kfree(tsi148_bridge
);
2548 static void tsi148_remove(struct pci_dev
*pdev
)
2550 struct list_head
*pos
= NULL
;
2551 struct vme_master_resource
*master_image
;
2552 struct vme_slave_resource
*slave_image
;
2553 struct vme_dma_resource
*dma_ctrlr
;
2556 dev_dbg(&pdev
->dev
, "Driver is being unloaded.\n");
2558 /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2561 * Shutdown all inbound and outbound windows.
2563 for (i
= 0; i
< 8; i
++) {
2564 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
2565 TSI148_LCSR_OFFSET_ITAT
);
2566 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
2567 TSI148_LCSR_OFFSET_OTAT
);
2571 * Shutdown Location monitor.
2573 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2578 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CSRAT
);
2581 * Clear error status.
2583 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
);
2584 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
2585 iowrite32be(0x07000700, tsi148_bridge
->base
+ TSI148_LCSR_PSTAT
);
2588 * Remove VIRQ interrupt (if any)
2590 if (ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
) & 0x800) {
2591 iowrite32be(0x8000, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
2595 * Map all Interrupts to PCI INTA
2597 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTM1
);
2598 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTM2
);
2600 tsi148_irq_exit(pdev
);
2602 vme_unregister_bridge(tsi148_bridge
);
2604 tsi148_crcsr_exit(pdev
);
2606 /* resources are stored in link list */
2607 list_for_each(pos
, &(tsi148_bridge
->dma_resources
)) {
2608 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
2613 /* resources are stored in link list */
2614 list_for_each(pos
, &(tsi148_bridge
->slave_resources
)) {
2615 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
2620 /* resources are stored in link list */
2621 list_for_each(pos
, &(tsi148_bridge
->master_resources
)) {
2622 master_image
= list_entry(pos
, struct vme_master_resource
,
2625 kfree(master_image
);
2628 tsi148_irq_exit(pdev
);
2630 iounmap(tsi148_bridge
->base
);
2632 pci_release_regions(pdev
);
2634 pci_disable_device(pdev
);
2636 kfree(tsi148_bridge
);
2639 static void __exit
tsi148_exit(void)
2641 pci_unregister_driver(&tsi148_driver
);
2643 printk(KERN_DEBUG
"Driver removed.\n");
2646 MODULE_PARM_DESC(err_chk
, "Check for VME errors on reads and writes");
2647 module_param(err_chk
, bool, 0);
2649 MODULE_PARM_DESC(geoid
, "Override geographical addressing");
2650 module_param(geoid
, int, 0);
2652 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2653 MODULE_LICENSE("GPL");
2655 module_init(tsi148_init
);
2656 module_exit(tsi148_exit
);
2658 /*----------------------------------------------------------------------------
2660 *--------------------------------------------------------------------------*/
2664 * Direct Mode DMA transfer
2666 * XXX Not looking at direct mode for now, we can always use link list mode
2667 * with a single entry.
2669 int tsi148_dma_run(struct vme_dma_resource
*resource
, struct vme_dma_attr src
,
2670 struct vme_dma_attr dest
, size_t count
)
2676 struct vmeDmaPacket
*cur_dma
;
2677 struct tsi148_dma_descriptor
*dmaLL
;
2682 for (x
= 0; x
< 8; x
++) { /* vme block size */
2683 if ((32 << x
) >= vmeDma
->maxVmeBlockSize
) {
2689 dctlreg
|= (x
<< 12);
2691 for (x
= 0; x
< 8; x
++) { /* pci block size */
2692 if ((32 << x
) >= vmeDma
->maxPciBlockSize
) {
2698 dctlreg
|= (x
<< 4);
2700 if (vmeDma
->vmeBackOffTimer
) {
2701 for (x
= 1; x
< 8; x
++) { /* vme timer */
2702 if ((1 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
2708 dctlreg
|= (x
<< 8);
2711 if (vmeDma
->pciBackOffTimer
) {
2712 for (x
= 1; x
< 8; x
++) { /* pci timer */
2713 if ((1 << (x
- 1)) >= vmeDma
->pciBackOffTimer
) {
2719 dctlreg
|= (x
<< 0);
2722 /* Program registers for DMA transfer */
2723 iowrite32be(dmaLL
->dsau
, tsi148_bridge
->base
+
2724 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAU
);
2725 iowrite32be(dmaLL
->dsal
, tsi148_bridge
->base
+
2726 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAL
);
2727 iowrite32be(dmaLL
->ddau
, tsi148_bridge
->base
+
2728 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAU
);
2729 iowrite32be(dmaLL
->ddal
, tsi148_bridge
->base
+
2730 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAL
);
2731 iowrite32be(dmaLL
->dsat
, tsi148_bridge
->base
+
2732 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAT
);
2733 iowrite32be(dmaLL
->ddat
, tsi148_bridge
->base
+
2734 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAT
);
2735 iowrite32be(dmaLL
->dcnt
, tsi148_bridge
->base
+
2736 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCNT
);
2737 iowrite32be(dmaLL
->ddbs
, tsi148_bridge
->base
+
2738 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDBS
);
2740 /* Start the operation */
2741 iowrite32be(dctlreg
| 0x2000000, tsi148_bridge
->base
+
2742 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCTL
);
2744 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
2745 TSI148_LCSR_OFFSET_DSTA
);
2746 wait_event_interruptible(dma_queue
[channel
], (tmp
& 0x1000000) == 0);
2749 * Read status register, we should probably do this in some error
2750 * handler rather than here so that we can be sure we haven't kicked off
2751 * another DMA transfer.
2753 val
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
2754 TSI148_LCSR_OFFSET_DSTA
);
2756 vmeDma
->vmeDmaStatus
= 0;
2757 if (val
& 0x10000000) {
2759 "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2761 vmeDma
->vmeDmaStatus
= val
;
2770 /* Global VME controller information */
2771 struct pci_dev
*vme_pci_dev
;
2774 * Set the VME bus arbiter with the requested attributes
2776 int tempe_set_arbiter(vmeArbiterCfg_t
* vmeArb
)
2781 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2782 temp_ctl
&= 0xFFEFFF00;
2784 if (vmeArb
->globalTimeoutTimer
== 0xFFFFFFFF) {
2786 } else if (vmeArb
->globalTimeoutTimer
> 2048) {
2788 } else if (vmeArb
->globalTimeoutTimer
== 0) {
2792 while ((16 * (1 << (gto
- 1))) < vmeArb
->globalTimeoutTimer
) {
2798 if (vmeArb
->arbiterMode
!= VME_PRIORITY_MODE
) {
2802 if (vmeArb
->arbiterTimeoutFlag
) {
2806 if (vmeArb
->noEarlyReleaseFlag
) {
2807 temp_ctl
|= 1 << 20;
2809 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2815 * Return the attributes of the VME bus arbiter.
2817 int tempe_get_arbiter(vmeArbiterCfg_t
* vmeArb
)
2823 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2825 gto
= temp_ctl
& 0xF;
2827 vmeArb
->globalTimeoutTimer
= (16 * (1 << (gto
- 1)));
2830 if (temp_ctl
& (1 << 6)) {
2831 vmeArb
->arbiterMode
= VME_R_ROBIN_MODE
;
2833 vmeArb
->arbiterMode
= VME_PRIORITY_MODE
;
2836 if (temp_ctl
& (1 << 7)) {
2837 vmeArb
->arbiterTimeoutFlag
= 1;
2840 if (temp_ctl
& (1 << 20)) {
2841 vmeArb
->noEarlyReleaseFlag
= 1;
2848 * Set the VME bus requestor with the requested attributes
2850 int tempe_set_requestor(vmeRequesterCfg_t
* vmeReq
)
2854 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2855 temp_ctl
&= 0xFFFF0000;
2857 if (vmeReq
->releaseMode
== 1) {
2858 temp_ctl
|= (1 << 3);
2861 if (vmeReq
->fairMode
== 1) {
2862 temp_ctl
|= (1 << 2);
2865 temp_ctl
|= (vmeReq
->timeonTimeoutTimer
& 7) << 8;
2866 temp_ctl
|= (vmeReq
->timeoffTimeoutTimer
& 7) << 12;
2867 temp_ctl
|= vmeReq
->requestLevel
;
2869 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2874 * Return the attributes of the VME bus requestor
2876 int tempe_get_requestor(vmeRequesterCfg_t
* vmeReq
)
2880 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2882 if (temp_ctl
& 0x18) {
2883 vmeReq
->releaseMode
= 1;
2886 if (temp_ctl
& (1 << 2)) {
2887 vmeReq
->fairMode
= 1;
2890 vmeReq
->requestLevel
= temp_ctl
& 3;
2891 vmeReq
->timeonTimeoutTimer
= (temp_ctl
>> 8) & 7;
2892 vmeReq
->timeoffTimeoutTimer
= (temp_ctl
>> 12) & 7;