2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/version.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/proc_fs.h>
23 #include <linux/pci.h>
24 #include <linux/poll.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
36 static int __init
tsi148_init(void);
37 static int tsi148_probe(struct pci_dev
*, const struct pci_device_id
*);
38 static void tsi148_remove(struct pci_dev
*);
39 static void __exit
tsi148_exit(void);
42 int tsi148_slave_set(struct vme_slave_resource
*, int, unsigned long long,
43 unsigned long long, dma_addr_t
, vme_address_t
, vme_cycle_t
);
44 int tsi148_slave_get(struct vme_slave_resource
*, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t
*, vme_address_t
*, vme_cycle_t
*);
47 int tsi148_master_get(struct vme_master_resource
*, int *, unsigned long long *,
48 unsigned long long *, vme_address_t
*, vme_cycle_t
*, vme_width_t
*);
49 int tsi148_master_set(struct vme_master_resource
*, int, unsigned long long,
50 unsigned long long, vme_address_t
, vme_cycle_t
, vme_width_t
);
51 ssize_t
tsi148_master_read(struct vme_master_resource
*, void *, size_t,
53 ssize_t
tsi148_master_write(struct vme_master_resource
*, void *, size_t,
55 unsigned int tsi148_master_rmw(struct vme_master_resource
*, unsigned int,
56 unsigned int, unsigned int, loff_t
);
57 int tsi148_dma_list_add (struct vme_dma_list
*, struct vme_dma_attr
*,
58 struct vme_dma_attr
*, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list
*);
60 int tsi148_dma_list_empty(struct vme_dma_list
*);
61 int tsi148_generate_irq(int, int);
62 int tsi148_lm_set(unsigned long long, vme_address_t
, vme_cycle_t
);
63 int tsi148_lm_get(unsigned long long *, vme_address_t
*, vme_cycle_t
*);
64 int tsi148_lm_attach(int, void (*callback
)(int));
65 int tsi148_lm_detach(int);
66 int tsi148_slot_get(void);
71 /* XXX These should all be in a per device structure */
72 struct vme_bridge
*tsi148_bridge
;
73 wait_queue_head_t dma_queue
[2];
74 wait_queue_head_t iack_queue
;
75 void (*lm_callback
[4])(int); /* Called in interrupt handler, be careful! */
78 struct vme_master_resource
*flush_image
;
79 struct semaphore vme_rmw
; /* Only one RMW cycle at a time */
80 struct semaphore vme_int
; /*
81 * Only one VME interrupt can be
82 * generated at a time, provide locking
84 struct semaphore vme_irq
; /* Locking for VME irq callback configuration */
85 struct semaphore vme_lm
; /* Locking for location monitor operations */
88 static char driver_name
[] = "vme_tsi148";
90 static struct pci_device_id tsi148_ids
[] = {
91 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_TSI148
) },
95 static struct pci_driver tsi148_driver
= {
97 .id_table
= tsi148_ids
,
98 .probe
= tsi148_probe
,
99 .remove
= tsi148_remove
,
102 static void reg_join(unsigned int high
, unsigned int low
,
103 unsigned long long *variable
)
105 *variable
= (unsigned long long)high
<< 32;
106 *variable
|= (unsigned long long)low
;
109 static void reg_split(unsigned long long variable
, unsigned int *high
,
112 *low
= (unsigned int)variable
& 0xFFFFFFFF;
113 *high
= (unsigned int)(variable
>> 32);
117 * Wakes up DMA queue.
119 static u32
tsi148_DMA_irqhandler(int channel_mask
)
123 if (channel_mask
& TSI148_LCSR_INTS_DMA0S
) {
124 wake_up(&dma_queue
[0]);
125 serviced
|= TSI148_LCSR_INTC_DMA0C
;
127 if (channel_mask
& TSI148_LCSR_INTS_DMA1S
) {
128 wake_up(&dma_queue
[1]);
129 serviced
|= TSI148_LCSR_INTC_DMA1C
;
136 * Wake up location monitor queue
138 static u32
tsi148_LM_irqhandler(u32 stat
)
143 for (i
= 0; i
< 4; i
++) {
144 if(stat
& TSI148_LCSR_INTS_LMS
[i
]) {
145 /* We only enable interrupts if the callback is set */
147 serviced
|= TSI148_LCSR_INTC_LMC
[i
];
155 * Wake up mail box queue.
157 * XXX This functionality is not exposed up though API.
159 static u32
tsi148_MB_irqhandler(u32 stat
)
165 for (i
= 0; i
< 4; i
++) {
166 if(stat
& TSI148_LCSR_INTS_MBS
[i
]) {
167 val
= ioread32be(tsi148_bridge
->base
+
168 TSI148_GCSR_MBOX
[i
]);
169 printk("VME Mailbox %d received: 0x%x\n", i
, val
);
170 serviced
|= TSI148_LCSR_INTC_MBC
[i
];
178 * Display error & status message when PERR (PCI) exception interrupt occurs.
180 static u32
tsi148_PERR_irqhandler(void)
183 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
184 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAU
),
185 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAL
),
186 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
)
189 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
190 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPXA
),
191 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPXS
)
194 iowrite32be(TSI148_LCSR_EDPAT_EDPCL
,
195 tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
);
197 return TSI148_LCSR_INTC_PERRC
;
201 * Save address and status when VME error interrupt occurs.
203 static u32
tsi148_VERR_irqhandler(void)
205 unsigned int error_addr_high
, error_addr_low
;
206 unsigned long long error_addr
;
208 struct vme_bus_error
*error
;
210 error_addr_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAU
);
211 error_addr_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAL
);
212 error_attrib
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
214 reg_join(error_addr_high
, error_addr_low
, &error_addr
);
216 /* Check for exception register overflow (we have lost error data) */
217 if(error_attrib
& TSI148_LCSR_VEAT_VEOF
) {
218 printk(KERN_ERR
"VME Bus Exception Overflow Occurred\n");
221 error
= (struct vme_bus_error
*)kmalloc(sizeof (struct vme_bus_error
),
224 error
->address
= error_addr
;
225 error
->attributes
= error_attrib
;
226 list_add_tail(&(error
->list
), &(tsi148_bridge
->vme_errors
));
229 "Unable to alloc memory for VMEbus Error reporting\n");
231 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
232 error_addr
, error_attrib
);
236 iowrite32be(TSI148_LCSR_VEAT_VESCL
,
237 tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
239 return TSI148_LCSR_INTC_VERRC
;
243 * Wake up IACK queue.
245 static u32
tsi148_IACK_irqhandler(void)
247 printk("tsi148_IACK_irqhandler\n");
248 wake_up(&iack_queue
);
250 return TSI148_LCSR_INTC_IACKC
;
254 * Calling VME bus interrupt callback if provided.
256 static u32
tsi148_VIRQ_irqhandler(u32 stat
)
258 int vec
, i
, serviced
= 0;
259 void (*call
)(int, int, void *);
262 for (i
= 7; i
> 0; i
--) {
263 if (stat
& (1 << i
)) {
265 * Note: Even though the registers are defined
266 * as 32-bits in the spec, we only want to issue
267 * 8-bit IACK cycles on the bus, read from offset
270 vec
= ioread8(tsi148_bridge
->base
+
271 TSI148_LCSR_VIACK
[i
] + 3);
273 call
= tsi148_bridge
->irq
[i
- 1].callback
[vec
].func
;
275 tsi148_bridge
->irq
[i
-1].callback
[vec
].priv_data
;
278 call(i
, vec
, priv_data
);
280 printk("Spurilous VME interrupt, level:%x, "
281 "vector:%x\n", i
, vec
);
283 serviced
|= (1 << i
);
291 * Top level interrupt handler. Clears appropriate interrupt status bits and
292 * then calls appropriate sub handler(s).
294 static irqreturn_t
tsi148_irqhandler(int irq
, void *dev_id
)
296 u32 stat
, enable
, serviced
= 0;
298 /* Determine which interrupts are unmasked and set */
299 enable
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
300 stat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTS
);
302 /* Only look at unmasked interrupts */
305 if (unlikely(!stat
)) {
309 /* Call subhandlers as appropriate */
311 if (stat
& (TSI148_LCSR_INTS_DMA1S
| TSI148_LCSR_INTS_DMA0S
))
312 serviced
|= tsi148_DMA_irqhandler(stat
);
314 /* Location monitor irqs */
315 if (stat
& (TSI148_LCSR_INTS_LM3S
| TSI148_LCSR_INTS_LM2S
|
316 TSI148_LCSR_INTS_LM1S
| TSI148_LCSR_INTS_LM0S
))
317 serviced
|= tsi148_LM_irqhandler(stat
);
320 if (stat
& (TSI148_LCSR_INTS_MB3S
| TSI148_LCSR_INTS_MB2S
|
321 TSI148_LCSR_INTS_MB1S
| TSI148_LCSR_INTS_MB0S
))
322 serviced
|= tsi148_MB_irqhandler(stat
);
325 if (stat
& TSI148_LCSR_INTS_PERRS
)
326 serviced
|= tsi148_PERR_irqhandler();
329 if (stat
& TSI148_LCSR_INTS_VERRS
)
330 serviced
|= tsi148_VERR_irqhandler();
333 if (stat
& TSI148_LCSR_INTS_IACKS
)
334 serviced
|= tsi148_IACK_irqhandler();
337 if (stat
& (TSI148_LCSR_INTS_IRQ7S
| TSI148_LCSR_INTS_IRQ6S
|
338 TSI148_LCSR_INTS_IRQ5S
| TSI148_LCSR_INTS_IRQ4S
|
339 TSI148_LCSR_INTS_IRQ3S
| TSI148_LCSR_INTS_IRQ2S
|
340 TSI148_LCSR_INTS_IRQ1S
))
341 serviced
|= tsi148_VIRQ_irqhandler(stat
);
343 /* Clear serviced interrupts */
344 iowrite32be(serviced
, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
349 static int tsi148_irq_init(struct vme_bridge
*bridge
)
353 struct pci_dev
*pdev
;
356 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
358 /* Initialise list for VME bus errors */
359 INIT_LIST_HEAD(&(bridge
->vme_errors
));
361 result
= request_irq(pdev
->irq
,
366 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
371 /* Enable and unmask interrupts */
372 tmp
= TSI148_LCSR_INTEO_DMA1EO
| TSI148_LCSR_INTEO_DMA0EO
|
373 TSI148_LCSR_INTEO_MB3EO
| TSI148_LCSR_INTEO_MB2EO
|
374 TSI148_LCSR_INTEO_MB1EO
| TSI148_LCSR_INTEO_MB0EO
|
375 TSI148_LCSR_INTEO_PERREO
| TSI148_LCSR_INTEO_VERREO
|
376 TSI148_LCSR_INTEO_IACKEO
;
378 /* XXX This leaves the following interrupts masked.
379 * TSI148_LCSR_INTEO_VIEEO
380 * TSI148_LCSR_INTEO_SYSFLEO
381 * TSI148_LCSR_INTEO_ACFLEO
384 /* Don't enable Location Monitor interrupts here - they will be
385 * enabled when the location monitors are properly configured and
386 * a callback has been attached.
387 * TSI148_LCSR_INTEO_LM0EO
388 * TSI148_LCSR_INTEO_LM1EO
389 * TSI148_LCSR_INTEO_LM2EO
390 * TSI148_LCSR_INTEO_LM3EO
393 /* Don't enable VME interrupts until we add a handler, else the board
394 * will respond to it and we don't want that unless it knows how to
395 * properly deal with it.
396 * TSI148_LCSR_INTEO_IRQ7EO
397 * TSI148_LCSR_INTEO_IRQ6EO
398 * TSI148_LCSR_INTEO_IRQ5EO
399 * TSI148_LCSR_INTEO_IRQ4EO
400 * TSI148_LCSR_INTEO_IRQ3EO
401 * TSI148_LCSR_INTEO_IRQ2EO
402 * TSI148_LCSR_INTEO_IRQ1EO
405 iowrite32be(tmp
, bridge
->base
+ TSI148_LCSR_INTEO
);
406 iowrite32be(tmp
, bridge
->base
+ TSI148_LCSR_INTEN
);
411 static void tsi148_irq_exit(struct pci_dev
*pdev
)
413 /* Turn off interrupts */
414 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
415 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
417 /* Clear all interrupts */
418 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
420 /* Detach interrupt handler */
421 free_irq(pdev
->irq
, pdev
);
425 * Check to see if an IACk has been received, return true (1) or false (0).
427 int tsi148_iack_received(void)
431 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
433 if (tmp
& TSI148_LCSR_VICR_IRQS
)
440 * Set up an VME interrupt
442 int tsi148_request_irq(int level
, int statid
,
443 void (*callback
)(int level
, int vector
, void *priv_data
),
451 if(tsi148_bridge
->irq
[level
- 1].callback
[statid
].func
) {
453 printk("VME Interrupt already taken\n");
458 tsi148_bridge
->irq
[level
- 1].count
++;
459 tsi148_bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
460 tsi148_bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
462 /* Enable IRQ level */
463 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
464 tmp
|= TSI148_LCSR_INTEO_IRQEO
[level
- 1];
465 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
467 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
468 tmp
|= TSI148_LCSR_INTEN_IRQEN
[level
- 1];
469 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
471 /* Release semaphore */
480 void tsi148_free_irq(int level
, int statid
)
483 struct pci_dev
*pdev
;
488 tsi148_bridge
->irq
[level
- 1].count
--;
490 /* Disable IRQ level if no more interrupts attached at this level*/
491 if (tsi148_bridge
->irq
[level
- 1].count
== 0) {
492 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
493 tmp
&= ~TSI148_LCSR_INTEN_IRQEN
[level
- 1];
494 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
496 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
497 tmp
&= ~TSI148_LCSR_INTEO_IRQEO
[level
- 1];
498 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
500 pdev
= container_of(tsi148_bridge
->parent
, struct pci_dev
, dev
);
502 synchronize_irq(pdev
->irq
);
505 tsi148_bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
506 tsi148_bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
508 /* Release semaphore */
513 * Generate a VME bus interrupt at the requested level & vector. Wait for
514 * interrupt to be acked.
516 * Only one interrupt can be generated at a time - so add a semaphore.
518 int tsi148_generate_irq(int level
, int statid
)
525 /* Read VICR register */
526 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
529 tmp
= (tmp
& ~TSI148_LCSR_VICR_STID_M
) |
530 (statid
& TSI148_LCSR_VICR_STID_M
);
531 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
533 /* Assert VMEbus IRQ */
534 tmp
= tmp
| TSI148_LCSR_VICR_IRQL
[level
];
535 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
537 /* XXX Consider implementing a timeout? */
538 wait_event_interruptible(iack_queue
, tsi148_iack_received());
540 /* Release semaphore */
547 * Find the first error in this address range
549 static struct vme_bus_error
*tsi148_find_error(vme_address_t aspace
,
550 unsigned long long address
, size_t count
)
552 struct list_head
*err_pos
;
553 struct vme_bus_error
*vme_err
, *valid
= NULL
;
554 unsigned long long bound
;
556 bound
= address
+ count
;
559 * XXX We are currently not looking at the address space when parsing
560 * for errors. This is because parsing the Address Modifier Codes
561 * is going to be quite resource intensive to do properly. We
562 * should be OK just looking at the addresses and this is certainly
563 * much better than what we had before.
566 /* Iterate through errors */
567 list_for_each(err_pos
, &(tsi148_bridge
->vme_errors
)) {
568 vme_err
= list_entry(err_pos
, struct vme_bus_error
, list
);
569 if((vme_err
->address
>= address
) && (vme_err
->address
< bound
)){
579 * Clear errors in the provided address range.
581 static void tsi148_clear_errors(vme_address_t aspace
,
582 unsigned long long address
, size_t count
)
584 struct list_head
*err_pos
, *temp
;
585 struct vme_bus_error
*vme_err
;
586 unsigned long long bound
;
588 bound
= address
+ count
;
591 * XXX We are currently not looking at the address space when parsing
592 * for errors. This is because parsing the Address Modifier Codes
593 * is going to be quite resource intensive to do properly. We
594 * should be OK just looking at the addresses and this is certainly
595 * much better than what we had before.
598 /* Iterate through errors */
599 list_for_each_safe(err_pos
, temp
, &(tsi148_bridge
->vme_errors
)) {
600 vme_err
= list_entry(err_pos
, struct vme_bus_error
, list
);
602 if((vme_err
->address
>= address
) && (vme_err
->address
< bound
)){
610 * Initialize a slave window with the requested attributes.
612 int tsi148_slave_set(struct vme_slave_resource
*image
, int enabled
,
613 unsigned long long vme_base
, unsigned long long size
,
614 dma_addr_t pci_base
, vme_address_t aspace
, vme_cycle_t cycle
)
616 unsigned int i
, addr
= 0, granularity
= 0;
617 unsigned int temp_ctl
= 0;
618 unsigned int vme_base_low
, vme_base_high
;
619 unsigned int vme_bound_low
, vme_bound_high
;
620 unsigned int pci_offset_low
, pci_offset_high
;
621 unsigned long long vme_bound
, pci_offset
;
624 printk("Set slave image %d to:\n", image
->number
);
625 printk("\tEnabled: %s\n", (enabled
== 1)? "yes" : "no");
626 printk("\tVME Base:0x%llx\n", vme_base
);
627 printk("\tWindow Size:0x%llx\n", size
);
628 printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base
);
629 printk("\tAddress Space:0x%x\n", aspace
);
630 printk("\tTransfer Cycle Properties:0x%x\n", cycle
);
638 addr
|= TSI148_LCSR_ITAT_AS_A16
;
641 granularity
= 0x1000;
642 addr
|= TSI148_LCSR_ITAT_AS_A24
;
645 granularity
= 0x10000;
646 addr
|= TSI148_LCSR_ITAT_AS_A32
;
649 granularity
= 0x10000;
650 addr
|= TSI148_LCSR_ITAT_AS_A64
;
658 printk("Invalid address space\n");
663 /* Convert 64-bit variables to 2x 32-bit variables */
664 reg_split(vme_base
, &vme_base_high
, &vme_base_low
);
667 * Bound address is a valid address for the window, adjust
670 vme_bound
= vme_base
+ size
- granularity
;
671 reg_split(vme_bound
, &vme_bound_high
, &vme_bound_low
);
672 pci_offset
= (unsigned long long)pci_base
- vme_base
;
673 reg_split(pci_offset
, &pci_offset_high
, &pci_offset_low
);
675 if (vme_base_low
& (granularity
- 1)) {
676 printk("Invalid VME base alignment\n");
679 if (vme_bound_low
& (granularity
- 1)) {
680 printk("Invalid VME bound alignment\n");
683 if (pci_offset_low
& (granularity
- 1)) {
684 printk("Invalid PCI Offset alignment\n");
689 printk("\tVME Bound:0x%llx\n", vme_bound
);
690 printk("\tPCI Offset:0x%llx\n", pci_offset
);
693 /* Disable while we are mucking around */
694 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
695 TSI148_LCSR_OFFSET_ITAT
);
696 temp_ctl
&= ~TSI148_LCSR_ITAT_EN
;
697 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
698 TSI148_LCSR_OFFSET_ITAT
);
701 iowrite32be(vme_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
702 TSI148_LCSR_OFFSET_ITSAU
);
703 iowrite32be(vme_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
704 TSI148_LCSR_OFFSET_ITSAL
);
705 iowrite32be(vme_bound_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
706 TSI148_LCSR_OFFSET_ITEAU
);
707 iowrite32be(vme_bound_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
708 TSI148_LCSR_OFFSET_ITEAL
);
709 iowrite32be(pci_offset_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
710 TSI148_LCSR_OFFSET_ITOFU
);
711 iowrite32be(pci_offset_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
712 TSI148_LCSR_OFFSET_ITOFL
);
714 /* XXX Prefetch stuff currently unsupported */
717 for (x
= 0; x
< 4; x
++) {
718 if ((64 << x
) >= vmeIn
->prefetchSize
) {
724 temp_ctl
|= (x
<< 16);
726 if (vmeIn
->prefetchThreshold
)
727 if (vmeIn
->prefetchThreshold
)
731 /* Setup 2eSST speeds */
732 temp_ctl
&= ~TSI148_LCSR_ITAT_2eSSTM_M
;
733 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
735 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_160
;
738 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_267
;
741 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_320
;
745 /* Setup cycle types */
746 temp_ctl
&= ~(0x1F << 7);
748 temp_ctl
|= TSI148_LCSR_ITAT_BLT
;
749 if (cycle
& VME_MBLT
)
750 temp_ctl
|= TSI148_LCSR_ITAT_MBLT
;
751 if (cycle
& VME_2eVME
)
752 temp_ctl
|= TSI148_LCSR_ITAT_2eVME
;
753 if (cycle
& VME_2eSST
)
754 temp_ctl
|= TSI148_LCSR_ITAT_2eSST
;
755 if (cycle
& VME_2eSSTB
)
756 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTB
;
758 /* Setup address space */
759 temp_ctl
&= ~TSI148_LCSR_ITAT_AS_M
;
763 if (cycle
& VME_SUPER
)
764 temp_ctl
|= TSI148_LCSR_ITAT_SUPR
;
765 if (cycle
& VME_USER
)
766 temp_ctl
|= TSI148_LCSR_ITAT_NPRIV
;
767 if (cycle
& VME_PROG
)
768 temp_ctl
|= TSI148_LCSR_ITAT_PGM
;
769 if (cycle
& VME_DATA
)
770 temp_ctl
|= TSI148_LCSR_ITAT_DATA
;
772 /* Write ctl reg without enable */
773 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
774 TSI148_LCSR_OFFSET_ITAT
);
777 temp_ctl
|= TSI148_LCSR_ITAT_EN
;
779 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
780 TSI148_LCSR_OFFSET_ITAT
);
786 * Get slave window configuration.
788 * XXX Prefetch currently unsupported.
790 int tsi148_slave_get(struct vme_slave_resource
*image
, int *enabled
,
791 unsigned long long *vme_base
, unsigned long long *size
,
792 dma_addr_t
*pci_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
794 unsigned int i
, granularity
= 0, ctl
= 0;
795 unsigned int vme_base_low
, vme_base_high
;
796 unsigned int vme_bound_low
, vme_bound_high
;
797 unsigned int pci_offset_low
, pci_offset_high
;
798 unsigned long long vme_bound
, pci_offset
;
804 ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
805 TSI148_LCSR_OFFSET_ITAT
);
807 vme_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
808 TSI148_LCSR_OFFSET_ITSAU
);
809 vme_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
810 TSI148_LCSR_OFFSET_ITSAL
);
811 vme_bound_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
812 TSI148_LCSR_OFFSET_ITEAU
);
813 vme_bound_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
814 TSI148_LCSR_OFFSET_ITEAL
);
815 pci_offset_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
816 TSI148_LCSR_OFFSET_ITOFU
);
817 pci_offset_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
818 TSI148_LCSR_OFFSET_ITOFL
);
820 /* Convert 64-bit variables to 2x 32-bit variables */
821 reg_join(vme_base_high
, vme_base_low
, vme_base
);
822 reg_join(vme_bound_high
, vme_bound_low
, &vme_bound
);
823 reg_join(pci_offset_high
, pci_offset_low
, &pci_offset
);
825 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
831 if (ctl
& TSI148_LCSR_ITAT_EN
)
834 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A16
) {
838 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A24
) {
839 granularity
= 0x1000;
842 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A32
) {
843 granularity
= 0x10000;
846 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A64
) {
847 granularity
= 0x10000;
851 /* Need granularity before we set the size */
852 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
855 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_160
)
856 *cycle
|= VME_2eSST160
;
857 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_267
)
858 *cycle
|= VME_2eSST267
;
859 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_320
)
860 *cycle
|= VME_2eSST320
;
862 if (ctl
& TSI148_LCSR_ITAT_BLT
)
864 if (ctl
& TSI148_LCSR_ITAT_MBLT
)
866 if (ctl
& TSI148_LCSR_ITAT_2eVME
)
868 if (ctl
& TSI148_LCSR_ITAT_2eSST
)
870 if (ctl
& TSI148_LCSR_ITAT_2eSSTB
)
871 *cycle
|= VME_2eSSTB
;
873 if (ctl
& TSI148_LCSR_ITAT_SUPR
)
875 if (ctl
& TSI148_LCSR_ITAT_NPRIV
)
877 if (ctl
& TSI148_LCSR_ITAT_PGM
)
879 if (ctl
& TSI148_LCSR_ITAT_DATA
)
886 * Allocate and map PCI Resource
888 static int tsi148_alloc_resource(struct vme_master_resource
*image
,
889 unsigned long long size
)
891 unsigned long long existing_size
;
893 struct pci_dev
*pdev
;
895 /* Find pci_dev container of dev */
896 if (tsi148_bridge
->parent
== NULL
) {
897 printk("Dev entry NULL\n");
900 pdev
= container_of(tsi148_bridge
->parent
, struct pci_dev
, dev
);
902 existing_size
= (unsigned long long)(image
->pci_resource
.end
-
903 image
->pci_resource
.start
);
905 /* If the existing size is OK, return */
906 if (existing_size
== (size
- 1))
909 if (existing_size
!= 0) {
910 iounmap(image
->kern_base
);
911 image
->kern_base
= NULL
;
912 if (image
->pci_resource
.name
!= NULL
)
913 kfree(image
->pci_resource
.name
);
914 release_resource(&(image
->pci_resource
));
915 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
918 if (image
->pci_resource
.name
== NULL
) {
919 image
->pci_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_KERNEL
);
920 if (image
->pci_resource
.name
== NULL
) {
921 printk(KERN_ERR
"Unable to allocate memory for resource"
928 sprintf((char *)image
->pci_resource
.name
, "%s.%d", tsi148_bridge
->name
,
931 image
->pci_resource
.start
= 0;
932 image
->pci_resource
.end
= (unsigned long)size
;
933 image
->pci_resource
.flags
= IORESOURCE_MEM
;
935 retval
= pci_bus_alloc_resource(pdev
->bus
,
936 &(image
->pci_resource
), size
, size
, PCIBIOS_MIN_MEM
,
939 printk(KERN_ERR
"Failed to allocate mem resource for "
940 "window %d size 0x%lx start 0x%lx\n",
941 image
->number
, (unsigned long)size
,
942 (unsigned long)image
->pci_resource
.start
);
946 image
->kern_base
= ioremap_nocache(
947 image
->pci_resource
.start
, size
);
948 if (image
->kern_base
== NULL
) {
949 printk(KERN_ERR
"Failed to remap resource\n");
956 iounmap(image
->kern_base
);
957 image
->kern_base
= NULL
;
959 release_resource(&(image
->pci_resource
));
961 kfree(image
->pci_resource
.name
);
962 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
968 * Free and unmap PCI Resource
970 static void tsi148_free_resource(struct vme_master_resource
*image
)
972 iounmap(image
->kern_base
);
973 image
->kern_base
= NULL
;
974 release_resource(&(image
->pci_resource
));
975 kfree(image
->pci_resource
.name
);
976 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
980 * Set the attributes of an outbound window.
982 int tsi148_master_set( struct vme_master_resource
*image
, int enabled
,
983 unsigned long long vme_base
, unsigned long long size
,
984 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
988 unsigned int temp_ctl
= 0;
989 unsigned int pci_base_low
, pci_base_high
;
990 unsigned int pci_bound_low
, pci_bound_high
;
991 unsigned int vme_offset_low
, vme_offset_high
;
992 unsigned long long pci_bound
, vme_offset
, pci_base
;
994 /* Verify input data */
995 if (vme_base
& 0xFFFF) {
996 printk("Invalid VME Window alignment\n");
1000 if (size
< 0x10000) {
1001 printk("Invalid VME Window size\n");
1006 spin_lock(&(image
->lock
));
1008 /* Let's allocate the resource here rather than further up the stack as
1009 * it avoids pushing loads of bus dependant stuff up the stack
1011 retval
= tsi148_alloc_resource(image
, size
);
1013 spin_unlock(&(image
->lock
));
1014 printk(KERN_ERR
"Unable to allocate memory for resource "
1020 pci_base
= (unsigned long long)image
->pci_resource
.start
;
1024 * Bound address is a valid address for the window, adjust
1025 * according to window granularity.
1027 pci_bound
= pci_base
+ (size
- 0x10000);
1028 vme_offset
= vme_base
- pci_base
;
1030 /* Convert 64-bit variables to 2x 32-bit variables */
1031 reg_split(pci_base
, &pci_base_high
, &pci_base_low
);
1032 reg_split(pci_bound
, &pci_bound_high
, &pci_bound_low
);
1033 reg_split(vme_offset
, &vme_offset_high
, &vme_offset_low
);
1035 if (pci_base_low
& 0xFFFF) {
1036 spin_unlock(&(image
->lock
));
1037 printk("Invalid PCI base alignment\n");
1041 if (pci_bound_low
& 0xFFFF) {
1042 spin_unlock(&(image
->lock
));
1043 printk("Invalid PCI bound alignment\n");
1047 if (vme_offset_low
& 0xFFFF) {
1048 spin_unlock(&(image
->lock
));
1049 printk("Invalid VME Offset alignment\n");
1056 /* Disable while we are mucking around */
1057 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1058 TSI148_LCSR_OFFSET_OTAT
);
1059 temp_ctl
&= ~TSI148_LCSR_OTAT_EN
;
1060 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1061 TSI148_LCSR_OFFSET_OTAT
);
1063 /* XXX Prefetch stuff currently unsupported */
1065 if (vmeOut
->prefetchEnable
) {
1066 temp_ctl
|= 0x40000;
1067 for (x
= 0; x
< 4; x
++) {
1068 if ((2 << x
) >= vmeOut
->prefetchSize
)
1073 temp_ctl
|= (x
<< 16);
1077 /* Setup 2eSST speeds */
1078 temp_ctl
&= ~TSI148_LCSR_OTAT_2eSSTM_M
;
1079 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1081 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_160
;
1084 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_267
;
1087 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_320
;
1091 /* Setup cycle types */
1092 if (cycle
& VME_BLT
) {
1093 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1094 temp_ctl
|= TSI148_LCSR_OTAT_TM_BLT
;
1096 if (cycle
& VME_MBLT
) {
1097 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1098 temp_ctl
|= TSI148_LCSR_OTAT_TM_MBLT
;
1100 if (cycle
& VME_2eVME
) {
1101 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1102 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eVME
;
1104 if (cycle
& VME_2eSST
) {
1105 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1106 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eSST
;
1108 if (cycle
& VME_2eSSTB
) {
1109 printk("Currently not setting Broadcast Select Registers\n");
1110 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1111 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eSSTB
;
1114 /* Setup data width */
1115 temp_ctl
&= ~TSI148_LCSR_OTAT_DBW_M
;
1118 temp_ctl
|= TSI148_LCSR_OTAT_DBW_16
;
1121 temp_ctl
|= TSI148_LCSR_OTAT_DBW_32
;
1124 spin_unlock(&(image
->lock
));
1125 printk("Invalid data width\n");
1130 /* Setup address space */
1131 temp_ctl
&= ~TSI148_LCSR_OTAT_AMODE_M
;
1134 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A16
;
1137 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A24
;
1140 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A32
;
1143 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A64
;
1146 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_CRCSR
;
1149 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER1
;
1152 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER2
;
1155 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER3
;
1158 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER4
;
1161 spin_unlock(&(image
->lock
));
1162 printk("Invalid address space\n");
1168 temp_ctl
&= ~(3<<4);
1169 if (cycle
& VME_SUPER
)
1170 temp_ctl
|= TSI148_LCSR_OTAT_SUP
;
1171 if (cycle
& VME_PROG
)
1172 temp_ctl
|= TSI148_LCSR_OTAT_PGM
;
1175 iowrite32be(pci_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1176 TSI148_LCSR_OFFSET_OTSAU
);
1177 iowrite32be(pci_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1178 TSI148_LCSR_OFFSET_OTSAL
);
1179 iowrite32be(pci_bound_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1180 TSI148_LCSR_OFFSET_OTEAU
);
1181 iowrite32be(pci_bound_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1182 TSI148_LCSR_OFFSET_OTEAL
);
1183 iowrite32be(vme_offset_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1184 TSI148_LCSR_OFFSET_OTOFU
);
1185 iowrite32be(vme_offset_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1186 TSI148_LCSR_OFFSET_OTOFL
);
1188 /* XXX We need to deal with OTBS */
1190 iowrite32be(vmeOut
->bcastSelect2esst
, tsi148_bridge
->base
+
1191 TSI148_LCSR_OT
[i
] + TSI148_LCSR_OFFSET_OTBS
);
1194 /* Write ctl reg without enable */
1195 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1196 TSI148_LCSR_OFFSET_OTAT
);
1199 temp_ctl
|= TSI148_LCSR_OTAT_EN
;
1201 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1202 TSI148_LCSR_OFFSET_OTAT
);
1204 spin_unlock(&(image
->lock
));
1210 tsi148_free_resource(image
);
1218 * Set the attributes of an outbound window.
1220 * XXX Not parsing prefetch information.
1222 int __tsi148_master_get( struct vme_master_resource
*image
, int *enabled
,
1223 unsigned long long *vme_base
, unsigned long long *size
,
1224 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
1226 unsigned int i
, ctl
;
1227 unsigned int pci_base_low
, pci_base_high
;
1228 unsigned int pci_bound_low
, pci_bound_high
;
1229 unsigned int vme_offset_low
, vme_offset_high
;
1231 unsigned long long pci_base
, pci_bound
, vme_offset
;
1235 ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1236 TSI148_LCSR_OFFSET_OTAT
);
1238 pci_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1239 TSI148_LCSR_OFFSET_OTSAU
);
1240 pci_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1241 TSI148_LCSR_OFFSET_OTSAL
);
1242 pci_bound_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1243 TSI148_LCSR_OFFSET_OTEAU
);
1244 pci_bound_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1245 TSI148_LCSR_OFFSET_OTEAL
);
1246 vme_offset_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1247 TSI148_LCSR_OFFSET_OTOFU
);
1248 vme_offset_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1249 TSI148_LCSR_OFFSET_OTOFL
);
1251 /* Convert 64-bit variables to 2x 32-bit variables */
1252 reg_join(pci_base_high
, pci_base_low
, &pci_base
);
1253 reg_join(pci_bound_high
, pci_bound_low
, &pci_bound
);
1254 reg_join(vme_offset_high
, vme_offset_low
, &vme_offset
);
1256 *vme_base
= pci_base
+ vme_offset
;
1257 *size
= (unsigned long long)(pci_bound
- pci_base
) + 0x10000;
1264 if (ctl
& TSI148_LCSR_OTAT_EN
)
1267 /* Setup address space */
1268 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A16
)
1270 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A24
)
1272 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A32
)
1274 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A64
)
1276 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_CRCSR
)
1277 *aspace
|= VME_CRCSR
;
1278 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER1
)
1279 *aspace
|= VME_USER1
;
1280 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER2
)
1281 *aspace
|= VME_USER2
;
1282 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER3
)
1283 *aspace
|= VME_USER3
;
1284 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER4
)
1285 *aspace
|= VME_USER4
;
1287 /* Setup 2eSST speeds */
1288 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_160
)
1289 *cycle
|= VME_2eSST160
;
1290 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_267
)
1291 *cycle
|= VME_2eSST267
;
1292 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_320
)
1293 *cycle
|= VME_2eSST320
;
1295 /* Setup cycle types */
1296 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_SCT
)
1298 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_BLT
)
1300 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_MBLT
)
1302 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eVME
)
1303 *cycle
|= VME_2eVME
;
1304 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eSST
)
1305 *cycle
|= VME_2eSST
;
1306 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eSSTB
)
1307 *cycle
|= VME_2eSSTB
;
1309 if (ctl
& TSI148_LCSR_OTAT_SUP
)
1310 *cycle
|= VME_SUPER
;
1314 if (ctl
& TSI148_LCSR_OTAT_PGM
)
1319 /* Setup data width */
1320 if ((ctl
& TSI148_LCSR_OTAT_DBW_M
) == TSI148_LCSR_OTAT_DBW_16
)
1322 if ((ctl
& TSI148_LCSR_OTAT_DBW_M
) == TSI148_LCSR_OTAT_DBW_32
)
1329 int tsi148_master_get( struct vme_master_resource
*image
, int *enabled
,
1330 unsigned long long *vme_base
, unsigned long long *size
,
1331 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
1335 spin_lock(&(image
->lock
));
1337 retval
= __tsi148_master_get(image
, enabled
, vme_base
, size
, aspace
,
1340 spin_unlock(&(image
->lock
));
1345 ssize_t
tsi148_master_read(struct vme_master_resource
*image
, void *buf
,
1346 size_t count
, loff_t offset
)
1348 int retval
, enabled
;
1349 unsigned long long vme_base
, size
;
1350 vme_address_t aspace
;
1353 struct vme_bus_error
*vme_err
= NULL
;
1355 spin_lock(&(image
->lock
));
1357 memcpy_fromio(buf
, image
->kern_base
+ offset
, (unsigned int)count
);
1363 __tsi148_master_get(image
, &enabled
, &vme_base
, &size
, &aspace
, &cycle
,
1366 vme_err
= tsi148_find_error(aspace
, vme_base
+ offset
, count
);
1367 if(vme_err
!= NULL
) {
1368 dev_err(image
->parent
->parent
, "First VME read error detected "
1369 "an at address 0x%llx\n", vme_err
->address
);
1370 retval
= vme_err
->address
- (vme_base
+ offset
);
1371 /* Clear down save errors in this address range */
1372 tsi148_clear_errors(aspace
, vme_base
+ offset
, count
);
1376 spin_unlock(&(image
->lock
));
1382 /* XXX We need to change vme_master_resource->sem to a spinlock so that read
1383 * and write functions can be used in an interrupt context
1385 ssize_t
tsi148_master_write(struct vme_master_resource
*image
, void *buf
,
1386 size_t count
, loff_t offset
)
1388 int retval
= 0, enabled
;
1389 unsigned long long vme_base
, size
;
1390 vme_address_t aspace
;
1394 struct vme_bus_error
*vme_err
= NULL
;
1396 spin_lock(&(image
->lock
));
1398 memcpy_toio(image
->kern_base
+ offset
, buf
, (unsigned int)count
);
1402 * Writes are posted. We need to do a read on the VME bus to flush out
1403 * all of the writes before we check for errors. We can't guarentee
1404 * that reading the data we have just written is safe. It is believed
1405 * that there isn't any read, write re-ordering, so we can read any
1406 * location in VME space, so lets read the Device ID from the tsi148's
1407 * own registers as mapped into CR/CSR space.
1409 * We check for saved errors in the written address range/space.
1416 * Get window info first, to maximise the time that the buffers may
1417 * fluch on their own
1419 __tsi148_master_get(image
, &enabled
, &vme_base
, &size
, &aspace
, &cycle
,
1422 ioread16(flush_image
->kern_base
+ 0x7F000);
1424 vme_err
= tsi148_find_error(aspace
, vme_base
+ offset
, count
);
1425 if(vme_err
!= NULL
) {
1426 printk("First VME write error detected an at address 0x%llx\n",
1428 retval
= vme_err
->address
- (vme_base
+ offset
);
1429 /* Clear down save errors in this address range */
1430 tsi148_clear_errors(aspace
, vme_base
+ offset
, count
);
1434 spin_unlock(&(image
->lock
));
1440 * Perform an RMW cycle on the VME bus.
1442 * Requires a previously configured master window, returns final value.
1444 unsigned int tsi148_master_rmw(struct vme_master_resource
*image
,
1445 unsigned int mask
, unsigned int compare
, unsigned int swap
,
1448 unsigned long long pci_addr
;
1449 unsigned int pci_addr_high
, pci_addr_low
;
1454 /* Find the PCI address that maps to the desired VME address */
1457 /* Locking as we can only do one of these at a time */
1461 spin_lock(&(image
->lock
));
1463 pci_addr_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1464 TSI148_LCSR_OFFSET_OTSAU
);
1465 pci_addr_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1466 TSI148_LCSR_OFFSET_OTSAL
);
1468 reg_join(pci_addr_high
, pci_addr_low
, &pci_addr
);
1469 reg_split(pci_addr
+ offset
, &pci_addr_high
, &pci_addr_low
);
1471 /* Configure registers */
1472 iowrite32be(mask
, tsi148_bridge
->base
+ TSI148_LCSR_RMWEN
);
1473 iowrite32be(compare
, tsi148_bridge
->base
+ TSI148_LCSR_RMWC
);
1474 iowrite32be(swap
, tsi148_bridge
->base
+ TSI148_LCSR_RMWS
);
1475 iowrite32be(pci_addr_high
, tsi148_bridge
->base
+ TSI148_LCSR_RMWAU
);
1476 iowrite32be(pci_addr_low
, tsi148_bridge
->base
+ TSI148_LCSR_RMWAL
);
1479 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1480 tmp
|= TSI148_LCSR_VMCTRL_RMWEN
;
1481 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1483 /* Kick process off with a read to the required address. */
1484 result
= ioread32be(image
->kern_base
+ offset
);
1487 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1488 tmp
&= ~TSI148_LCSR_VMCTRL_RMWEN
;
1489 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1491 spin_unlock(&(image
->lock
));
1498 static int tsi148_dma_set_vme_src_attributes (u32
*attr
, vme_address_t aspace
,
1499 vme_cycle_t cycle
, vme_width_t dwidth
)
1501 /* Setup 2eSST speeds */
1502 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1504 *attr
|= TSI148_LCSR_DSAT_2eSSTM_160
;
1507 *attr
|= TSI148_LCSR_DSAT_2eSSTM_267
;
1510 *attr
|= TSI148_LCSR_DSAT_2eSSTM_320
;
1514 /* Setup cycle types */
1515 if (cycle
& VME_SCT
) {
1516 *attr
|= TSI148_LCSR_DSAT_TM_SCT
;
1518 if (cycle
& VME_BLT
) {
1519 *attr
|= TSI148_LCSR_DSAT_TM_BLT
;
1521 if (cycle
& VME_MBLT
) {
1522 *attr
|= TSI148_LCSR_DSAT_TM_MBLT
;
1524 if (cycle
& VME_2eVME
) {
1525 *attr
|= TSI148_LCSR_DSAT_TM_2eVME
;
1527 if (cycle
& VME_2eSST
) {
1528 *attr
|= TSI148_LCSR_DSAT_TM_2eSST
;
1530 if (cycle
& VME_2eSSTB
) {
1531 printk("Currently not setting Broadcast Select Registers\n");
1532 *attr
|= TSI148_LCSR_DSAT_TM_2eSSTB
;
1535 /* Setup data width */
1538 *attr
|= TSI148_LCSR_DSAT_DBW_16
;
1541 *attr
|= TSI148_LCSR_DSAT_DBW_32
;
1544 printk("Invalid data width\n");
1548 /* Setup address space */
1551 *attr
|= TSI148_LCSR_DSAT_AMODE_A16
;
1554 *attr
|= TSI148_LCSR_DSAT_AMODE_A24
;
1557 *attr
|= TSI148_LCSR_DSAT_AMODE_A32
;
1560 *attr
|= TSI148_LCSR_DSAT_AMODE_A64
;
1563 *attr
|= TSI148_LCSR_DSAT_AMODE_CRCSR
;
1566 *attr
|= TSI148_LCSR_DSAT_AMODE_USER1
;
1569 *attr
|= TSI148_LCSR_DSAT_AMODE_USER2
;
1572 *attr
|= TSI148_LCSR_DSAT_AMODE_USER3
;
1575 *attr
|= TSI148_LCSR_DSAT_AMODE_USER4
;
1578 printk("Invalid address space\n");
1583 if (cycle
& VME_SUPER
)
1584 *attr
|= TSI148_LCSR_DSAT_SUP
;
1585 if (cycle
& VME_PROG
)
1586 *attr
|= TSI148_LCSR_DSAT_PGM
;
1591 static int tsi148_dma_set_vme_dest_attributes(u32
*attr
, vme_address_t aspace
,
1592 vme_cycle_t cycle
, vme_width_t dwidth
)
1594 /* Setup 2eSST speeds */
1595 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1597 *attr
|= TSI148_LCSR_DDAT_2eSSTM_160
;
1600 *attr
|= TSI148_LCSR_DDAT_2eSSTM_267
;
1603 *attr
|= TSI148_LCSR_DDAT_2eSSTM_320
;
1607 /* Setup cycle types */
1608 if (cycle
& VME_SCT
) {
1609 *attr
|= TSI148_LCSR_DDAT_TM_SCT
;
1611 if (cycle
& VME_BLT
) {
1612 *attr
|= TSI148_LCSR_DDAT_TM_BLT
;
1614 if (cycle
& VME_MBLT
) {
1615 *attr
|= TSI148_LCSR_DDAT_TM_MBLT
;
1617 if (cycle
& VME_2eVME
) {
1618 *attr
|= TSI148_LCSR_DDAT_TM_2eVME
;
1620 if (cycle
& VME_2eSST
) {
1621 *attr
|= TSI148_LCSR_DDAT_TM_2eSST
;
1623 if (cycle
& VME_2eSSTB
) {
1624 printk("Currently not setting Broadcast Select Registers\n");
1625 *attr
|= TSI148_LCSR_DDAT_TM_2eSSTB
;
1628 /* Setup data width */
1631 *attr
|= TSI148_LCSR_DDAT_DBW_16
;
1634 *attr
|= TSI148_LCSR_DDAT_DBW_32
;
1637 printk("Invalid data width\n");
1641 /* Setup address space */
1644 *attr
|= TSI148_LCSR_DDAT_AMODE_A16
;
1647 *attr
|= TSI148_LCSR_DDAT_AMODE_A24
;
1650 *attr
|= TSI148_LCSR_DDAT_AMODE_A32
;
1653 *attr
|= TSI148_LCSR_DDAT_AMODE_A64
;
1656 *attr
|= TSI148_LCSR_DDAT_AMODE_CRCSR
;
1659 *attr
|= TSI148_LCSR_DDAT_AMODE_USER1
;
1662 *attr
|= TSI148_LCSR_DDAT_AMODE_USER2
;
1665 *attr
|= TSI148_LCSR_DDAT_AMODE_USER3
;
1668 *attr
|= TSI148_LCSR_DDAT_AMODE_USER4
;
1671 printk("Invalid address space\n");
1676 if (cycle
& VME_SUPER
)
1677 *attr
|= TSI148_LCSR_DDAT_SUP
;
1678 if (cycle
& VME_PROG
)
1679 *attr
|= TSI148_LCSR_DDAT_PGM
;
1685 * Add a link list descriptor to the list
1687 * XXX Need to handle 2eSST Broadcast select bits
1689 int tsi148_dma_list_add (struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
1690 struct vme_dma_attr
*dest
, size_t count
)
1692 struct tsi148_dma_entry
*entry
, *prev
;
1693 u32 address_high
, address_low
;
1694 struct vme_dma_pattern
*pattern_attr
;
1695 struct vme_dma_pci
*pci_attr
;
1696 struct vme_dma_vme
*vme_attr
;
1697 dma_addr_t desc_ptr
;
1700 /* XXX descriptor must be aligned on 64-bit boundaries */
1701 entry
= (struct tsi148_dma_entry
*)kmalloc(
1702 sizeof(struct tsi148_dma_entry
), GFP_KERNEL
);
1703 if (entry
== NULL
) {
1704 printk("Failed to allocate memory for dma resource "
1710 /* Test descriptor alignment */
1711 if ((unsigned long)&(entry
->descriptor
) & 0x7) {
1712 printk("Descriptor not aligned to 8 byte boundary as "
1713 "required: %p\n", &(entry
->descriptor
));
1718 /* Given we are going to fill out the structure, we probably don't
1719 * need to zero it, but better safe than sorry for now.
1721 memset(&(entry
->descriptor
), 0, sizeof(struct tsi148_dma_descriptor
));
1723 /* Fill out source part */
1724 switch (src
->type
) {
1725 case VME_DMA_PATTERN
:
1726 pattern_attr
= (struct vme_dma_pattern
*)src
->private;
1728 entry
->descriptor
.dsal
= pattern_attr
->pattern
;
1729 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_PAT
;
1730 /* Default behaviour is 32 bit pattern */
1731 if (pattern_attr
->type
& VME_DMA_PATTERN_BYTE
) {
1732 entry
->descriptor
.dsat
|= TSI148_LCSR_DSAT_PSZ
;
1734 /* It seems that the default behaviour is to increment */
1735 if ((pattern_attr
->type
& VME_DMA_PATTERN_INCREMENT
) == 0) {
1736 entry
->descriptor
.dsat
|= TSI148_LCSR_DSAT_NIN
;
1740 pci_attr
= (struct vme_dma_pci
*)src
->private;
1742 reg_split((unsigned long long)pci_attr
->address
, &address_high
,
1744 entry
->descriptor
.dsau
= address_high
;
1745 entry
->descriptor
.dsal
= address_low
;
1746 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_PCI
;
1749 vme_attr
= (struct vme_dma_vme
*)src
->private;
1751 reg_split((unsigned long long)vme_attr
->address
, &address_high
,
1753 entry
->descriptor
.dsau
= address_high
;
1754 entry
->descriptor
.dsal
= address_low
;
1755 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_VME
;
1757 retval
= tsi148_dma_set_vme_src_attributes(
1758 &(entry
->descriptor
.dsat
), vme_attr
->aspace
,
1759 vme_attr
->cycle
, vme_attr
->dwidth
);
1764 printk("Invalid source type\n");
1770 /* Assume last link - this will be over-written by adding another */
1771 entry
->descriptor
.dnlau
= 0;
1772 entry
->descriptor
.dnlal
= TSI148_LCSR_DNLAL_LLA
;
1775 /* Fill out destination part */
1776 switch (dest
->type
) {
1778 pci_attr
= (struct vme_dma_pci
*)dest
->private;
1780 reg_split((unsigned long long)pci_attr
->address
, &address_high
,
1782 entry
->descriptor
.ddau
= address_high
;
1783 entry
->descriptor
.ddal
= address_low
;
1784 entry
->descriptor
.ddat
= TSI148_LCSR_DDAT_TYP_PCI
;
1787 vme_attr
= (struct vme_dma_vme
*)dest
->private;
1789 reg_split((unsigned long long)vme_attr
->address
, &address_high
,
1791 entry
->descriptor
.ddau
= address_high
;
1792 entry
->descriptor
.ddal
= address_low
;
1793 entry
->descriptor
.ddat
= TSI148_LCSR_DDAT_TYP_VME
;
1795 retval
= tsi148_dma_set_vme_dest_attributes(
1796 &(entry
->descriptor
.ddat
), vme_attr
->aspace
,
1797 vme_attr
->cycle
, vme_attr
->dwidth
);
1802 printk("Invalid destination type\n");
1808 /* Fill out count */
1809 entry
->descriptor
.dcnt
= (u32
)count
;
1812 list_add_tail(&(entry
->list
), &(list
->entries
));
1814 /* Fill out previous descriptors "Next Address" */
1815 if(entry
->list
.prev
!= &(list
->entries
)){
1816 prev
= list_entry(entry
->list
.prev
, struct tsi148_dma_entry
,
1818 /* We need the bus address for the pointer */
1819 desc_ptr
= virt_to_bus(&(entry
->descriptor
));
1820 reg_split(desc_ptr
, &(prev
->descriptor
.dnlau
),
1821 &(prev
->descriptor
.dnlal
));
1835 * Check to see if the provided DMA channel is busy.
1837 static int tsi148_dma_busy(int channel
)
1841 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
1842 TSI148_LCSR_OFFSET_DSTA
);
1844 if (tmp
& TSI148_LCSR_DSTA_BSY
)
1852 * Execute a previously generated link list
1854 * XXX Need to provide control register configuration.
1856 int tsi148_dma_list_exec(struct vme_dma_list
*list
)
1858 struct vme_dma_resource
*ctrlr
;
1859 int channel
, retval
= 0;
1860 struct tsi148_dma_entry
*entry
;
1861 dma_addr_t bus_addr
;
1862 u32 bus_addr_high
, bus_addr_low
;
1863 u32 val
, dctlreg
= 0;
1868 ctrlr
= list
->parent
;
1870 down(&(ctrlr
->sem
));
1872 channel
= ctrlr
->number
;
1874 if (! list_empty(&(ctrlr
->running
))) {
1876 * XXX We have an active DMA transfer and currently haven't
1877 * sorted out the mechanism for "pending" DMA transfers.
1880 /* Need to add to pending here */
1884 list_add(&(list
->list
), &(ctrlr
->running
));
1887 /* XXX Still todo */
1888 for (x
= 0; x
< 8; x
++) { /* vme block size */
1889 if ((32 << x
) >= vmeDma
->maxVmeBlockSize
) {
1895 dctlreg
|= (x
<< 12);
1897 for (x
= 0; x
< 8; x
++) { /* pci block size */
1898 if ((32 << x
) >= vmeDma
->maxPciBlockSize
) {
1904 dctlreg
|= (x
<< 4);
1906 if (vmeDma
->vmeBackOffTimer
) {
1907 for (x
= 1; x
< 8; x
++) { /* vme timer */
1908 if ((1 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
1914 dctlreg
|= (x
<< 8);
1917 if (vmeDma
->pciBackOffTimer
) {
1918 for (x
= 1; x
< 8; x
++) { /* pci timer */
1919 if ((1 << (x
- 1)) >= vmeDma
->pciBackOffTimer
) {
1925 dctlreg
|= (x
<< 0);
1929 /* Get first bus address and write into registers */
1930 entry
= list_first_entry(&(list
->entries
), struct tsi148_dma_entry
,
1933 bus_addr
= virt_to_bus(&(entry
->descriptor
));
1937 reg_split(bus_addr
, &bus_addr_high
, &bus_addr_low
);
1939 iowrite32be(bus_addr_high
, tsi148_bridge
->base
+
1940 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DNLAU
);
1941 iowrite32be(bus_addr_low
, tsi148_bridge
->base
+
1942 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DNLAL
);
1944 /* Start the operation */
1945 iowrite32be(dctlreg
| TSI148_LCSR_DCTL_DGO
, tsi148_bridge
->base
+
1946 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCTL
);
1948 wait_event_interruptible(dma_queue
[channel
], tsi148_dma_busy(channel
));
1950 * Read status register, this register is valid until we kick off a
1953 val
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
1954 TSI148_LCSR_OFFSET_DSTA
);
1956 if (val
& TSI148_LCSR_DSTA_VBE
) {
1957 printk(KERN_ERR
"tsi148: DMA Error. DSTA=%08X\n", val
);
1961 /* Remove list from running list */
1962 down(&(ctrlr
->sem
));
1963 list_del(&(list
->list
));
1970 * Clean up a previously generated link list
1972 * We have a separate function, don't assume that the chain can't be reused.
1974 int tsi148_dma_list_empty(struct vme_dma_list
*list
)
1976 struct list_head
*pos
, *temp
;
1977 struct tsi148_dma_entry
*entry
;
1979 /* detach and free each entry */
1980 list_for_each_safe(pos
, temp
, &(list
->entries
)) {
1982 entry
= list_entry(pos
, struct tsi148_dma_entry
, list
);
1990 * All 4 location monitors reside at the same base - this is therefore a
1991 * system wide configuration.
1993 * This does not enable the LM monitor - that should be done when the first
1994 * callback is attached and disabled when the last callback is removed.
1996 int tsi148_lm_set(unsigned long long lm_base
, vme_address_t aspace
,
1999 u32 lm_base_high
, lm_base_low
, lm_ctl
= 0;
2005 /* If we already have a callback attached, we can't move it! */
2006 for (i
= 0; i
< 4; i
++) {
2007 if(lm_callback
[i
] != NULL
) {
2009 printk("Location monitor callback attached, can't "
2017 lm_ctl
|= TSI148_LCSR_LMAT_AS_A16
;
2020 lm_ctl
|= TSI148_LCSR_LMAT_AS_A24
;
2023 lm_ctl
|= TSI148_LCSR_LMAT_AS_A32
;
2026 lm_ctl
|= TSI148_LCSR_LMAT_AS_A64
;
2030 printk("Invalid address space\n");
2035 if (cycle
& VME_SUPER
)
2036 lm_ctl
|= TSI148_LCSR_LMAT_SUPR
;
2037 if (cycle
& VME_USER
)
2038 lm_ctl
|= TSI148_LCSR_LMAT_NPRIV
;
2039 if (cycle
& VME_PROG
)
2040 lm_ctl
|= TSI148_LCSR_LMAT_PGM
;
2041 if (cycle
& VME_DATA
)
2042 lm_ctl
|= TSI148_LCSR_LMAT_DATA
;
2044 reg_split(lm_base
, &lm_base_high
, &lm_base_low
);
2046 iowrite32be(lm_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_LMBAU
);
2047 iowrite32be(lm_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_LMBAL
);
2048 iowrite32be(lm_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2055 /* Get configuration of the callback monitor and return whether it is enabled
2058 int tsi148_lm_get(unsigned long long *lm_base
, vme_address_t
*aspace
,
2061 u32 lm_base_high
, lm_base_low
, lm_ctl
, enabled
= 0;
2066 lm_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMBAU
);
2067 lm_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMBAL
);
2068 lm_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2070 reg_join(lm_base_high
, lm_base_low
, lm_base
);
2072 if (lm_ctl
& TSI148_LCSR_LMAT_EN
)
2075 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A16
) {
2078 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A24
) {
2081 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A32
) {
2084 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A64
) {
2088 if (lm_ctl
& TSI148_LCSR_LMAT_SUPR
)
2089 *cycle
|= VME_SUPER
;
2090 if (lm_ctl
& TSI148_LCSR_LMAT_NPRIV
)
2092 if (lm_ctl
& TSI148_LCSR_LMAT_PGM
)
2094 if (lm_ctl
& TSI148_LCSR_LMAT_DATA
)
2103 * Attach a callback to a specific location monitor.
2105 * Callback will be passed the monitor triggered.
2107 int tsi148_lm_attach(int monitor
, void (*callback
)(int))
2114 /* Ensure that the location monitor is configured - need PGM or DATA */
2115 lm_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2116 if ((lm_ctl
& (TSI148_LCSR_LMAT_PGM
| TSI148_LCSR_LMAT_DATA
)) == 0) {
2118 printk("Location monitor not properly configured\n");
2122 /* Check that a callback isn't already attached */
2123 if (lm_callback
[monitor
] != NULL
) {
2125 printk("Existing callback attached\n");
2129 /* Attach callback */
2130 lm_callback
[monitor
] = callback
;
2132 /* Enable Location Monitor interrupt */
2133 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2134 tmp
|= TSI148_LCSR_INTEN_LMEN
[monitor
];
2135 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2137 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2138 tmp
|= TSI148_LCSR_INTEO_LMEO
[monitor
];
2139 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2141 /* Ensure that global Location Monitor Enable set */
2142 if ((lm_ctl
& TSI148_LCSR_LMAT_EN
) == 0) {
2143 lm_ctl
|= TSI148_LCSR_LMAT_EN
;
2144 iowrite32be(lm_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2153 * Detach a callback function forn a specific location monitor.
2155 int tsi148_lm_detach(int monitor
)
2162 /* Disable Location Monitor and ensure previous interrupts are clear */
2163 lm_en
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2164 lm_en
&= ~TSI148_LCSR_INTEN_LMEN
[monitor
];
2165 iowrite32be(lm_en
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2167 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2168 tmp
&= ~TSI148_LCSR_INTEO_LMEO
[monitor
];
2169 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2171 iowrite32be(TSI148_LCSR_INTC_LMC
[monitor
],
2172 tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2174 /* Detach callback */
2175 lm_callback
[monitor
] = NULL
;
2177 /* If all location monitors disabled, disable global Location Monitor */
2178 if ((lm_en
& (TSI148_LCSR_INTS_LM0S
| TSI148_LCSR_INTS_LM1S
|
2179 TSI148_LCSR_INTS_LM2S
| TSI148_LCSR_INTS_LM3S
)) == 0) {
2180 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2181 tmp
&= ~TSI148_LCSR_LMAT_EN
;
2182 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2191 * Determine Geographical Addressing
2193 int tsi148_slot_get(void)
2197 slot
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2198 slot
= slot
& TSI148_LCSR_VSTAT_GA_M
;
2202 static int __init
tsi148_init(void)
2204 return pci_register_driver(&tsi148_driver
);
2208 * Configure CR/CSR space
2210 * Access to the CR/CSR can be configured at power-up. The location of the
2211 * CR/CSR registers in the CR/CSR address space is determined by the boards
2212 * Auto-ID or Geographic address. This function ensures that the window is
2213 * enabled at an offset consistent with the boards geopgraphic address.
2215 * Each board has a 512kB window, with the highest 4kB being used for the
2216 * boards registers, this means there is a fix length 508kB window which must
2217 * be mapped onto PCI memory.
2219 static int tsi148_crcsr_init(struct pci_dev
*pdev
)
2221 u32 cbar
, crat
, vstat
;
2222 u32 crcsr_bus_high
, crcsr_bus_low
;
2225 /* Allocate mem for CR/CSR image */
2226 crcsr_kernel
= pci_alloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
2228 if (crcsr_kernel
== NULL
) {
2229 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
2234 memset(crcsr_kernel
, 0, VME_CRCSR_BUF_SIZE
);
2236 reg_split(crcsr_bus
, &crcsr_bus_high
, &crcsr_bus_low
);
2238 iowrite32be(crcsr_bus_high
, tsi148_bridge
->base
+ TSI148_LCSR_CROU
);
2239 iowrite32be(crcsr_bus_low
, tsi148_bridge
->base
+ TSI148_LCSR_CROL
);
2241 /* Ensure that the CR/CSR is configured at the correct offset */
2242 cbar
= ioread32be(tsi148_bridge
->base
+ TSI148_CBAR
);
2243 cbar
= (cbar
& TSI148_CRCSR_CBAR_M
)>>3;
2245 vstat
= tsi148_slot_get();
2247 if (cbar
!= vstat
) {
2248 dev_info(&pdev
->dev
, "Setting CR/CSR offset\n");
2249 iowrite32be(cbar
<<3, tsi148_bridge
->base
+ TSI148_CBAR
);
2251 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", cbar
);
2253 crat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2254 if (crat
& TSI148_LCSR_CRAT_EN
) {
2255 dev_info(&pdev
->dev
, "Enabling CR/CSR space\n");
2256 iowrite32be(crat
| TSI148_LCSR_CRAT_EN
,
2257 tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2259 dev_info(&pdev
->dev
, "CR/CSR already enabled\n");
2261 /* If we want flushed, error-checked writes, set up a window
2262 * over the CR/CSR registers. We read from here to safely flush
2263 * through VME writes.
2266 retval
= tsi148_master_set(flush_image
, 1, (vstat
* 0x80000),
2267 0x80000, VME_CRCSR
, VME_SCT
, VME_D16
);
2269 dev_err(&pdev
->dev
, "Configuring flush image failed\n");
2276 static void tsi148_crcsr_exit(struct pci_dev
*pdev
)
2280 /* Turn off CR/CSR space */
2281 crat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2282 iowrite32be(crat
& ~TSI148_LCSR_CRAT_EN
,
2283 tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2286 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CROU
);
2287 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CROL
);
2289 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, crcsr_kernel
, crcsr_bus
);
2292 static int tsi148_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2294 int retval
, i
, master_num
;
2296 struct list_head
*pos
= NULL
;
2297 struct vme_master_resource
*master_image
;
2298 struct vme_slave_resource
*slave_image
;
2299 struct vme_dma_resource
*dma_ctrlr
;
2301 /* If we want to support more than one of each bridge, we need to
2302 * dynamically generate this so we get one per device
2304 tsi148_bridge
= (struct vme_bridge
*)kmalloc(sizeof(struct vme_bridge
),
2306 if (tsi148_bridge
== NULL
) {
2307 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
2313 memset(tsi148_bridge
, 0, sizeof(struct vme_bridge
));
2315 /* Enable the device */
2316 retval
= pci_enable_device(pdev
);
2318 dev_err(&pdev
->dev
, "Unable to enable device\n");
2323 retval
= pci_request_regions(pdev
, driver_name
);
2325 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
2329 /* map registers in BAR 0 */
2330 tsi148_bridge
->base
= ioremap_nocache(pci_resource_start(pdev
, 0), 4096);
2331 if (!tsi148_bridge
->base
) {
2332 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
2337 /* Check to see if the mapping worked out */
2338 data
= ioread32(tsi148_bridge
->base
+ TSI148_PCFS_ID
) & 0x0000FFFF;
2339 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
2340 dev_err(&pdev
->dev
, "CRG region check failed\n");
2345 /* Initialize wait queues & mutual exclusion flags */
2346 /* XXX These need to be moved to the vme_bridge structure */
2347 init_waitqueue_head(&dma_queue
[0]);
2348 init_waitqueue_head(&dma_queue
[1]);
2349 init_waitqueue_head(&iack_queue
);
2350 init_MUTEX(&(vme_int
));
2351 init_MUTEX(&(vme_irq
));
2352 init_MUTEX(&(vme_rmw
));
2353 init_MUTEX(&(vme_lm
));
2355 tsi148_bridge
->parent
= &(pdev
->dev
);
2356 strcpy(tsi148_bridge
->name
, driver_name
);
2359 retval
= tsi148_irq_init(tsi148_bridge
);
2361 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
2365 /* If we are going to flush writes, we need to read from the VME bus.
2366 * We need to do this safely, thus we read the devices own CR/CSR
2367 * register. To do this we must set up a window in CR/CSR space and
2368 * hence have one less master window resource available.
2370 master_num
= TSI148_MAX_MASTER
;
2374 flush_image
= (struct vme_master_resource
*)kmalloc(
2375 sizeof(struct vme_master_resource
), GFP_KERNEL
);
2376 if (flush_image
== NULL
) {
2377 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2378 "flush resource structure\n");
2382 flush_image
->parent
= tsi148_bridge
;
2383 spin_lock_init(&(flush_image
->lock
));
2384 flush_image
->locked
= 1;
2385 flush_image
->number
= master_num
;
2386 flush_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2388 flush_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2389 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2390 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2391 VME_PROG
| VME_DATA
;
2392 flush_image
->width_attr
= VME_D16
| VME_D32
;
2393 memset(&(flush_image
->pci_resource
), 0,
2394 sizeof(struct resource
));
2395 flush_image
->kern_base
= NULL
;
2398 /* Add master windows to list */
2399 INIT_LIST_HEAD(&(tsi148_bridge
->master_resources
));
2400 for (i
= 0; i
< master_num
; i
++) {
2401 master_image
= (struct vme_master_resource
*)kmalloc(
2402 sizeof(struct vme_master_resource
), GFP_KERNEL
);
2403 if (master_image
== NULL
) {
2404 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2405 "master resource structure\n");
2409 master_image
->parent
= tsi148_bridge
;
2410 spin_lock_init(&(master_image
->lock
));
2411 master_image
->locked
= 0;
2412 master_image
->number
= i
;
2413 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2415 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2416 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2417 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2418 VME_PROG
| VME_DATA
;
2419 master_image
->width_attr
= VME_D16
| VME_D32
;
2420 memset(&(master_image
->pci_resource
), 0,
2421 sizeof(struct resource
));
2422 master_image
->kern_base
= NULL
;
2423 list_add_tail(&(master_image
->list
),
2424 &(tsi148_bridge
->master_resources
));
2427 /* Add slave windows to list */
2428 INIT_LIST_HEAD(&(tsi148_bridge
->slave_resources
));
2429 for (i
= 0; i
< TSI148_MAX_SLAVE
; i
++) {
2430 slave_image
= (struct vme_slave_resource
*)kmalloc(
2431 sizeof(struct vme_slave_resource
), GFP_KERNEL
);
2432 if (slave_image
== NULL
) {
2433 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2434 "slave resource structure\n");
2438 slave_image
->parent
= tsi148_bridge
;
2439 init_MUTEX(&(slave_image
->sem
));
2440 slave_image
->locked
= 0;
2441 slave_image
->number
= i
;
2442 slave_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2443 VME_A64
| VME_CRCSR
| VME_USER1
| VME_USER2
|
2444 VME_USER3
| VME_USER4
;
2445 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2446 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2447 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2448 VME_PROG
| VME_DATA
;
2449 list_add_tail(&(slave_image
->list
),
2450 &(tsi148_bridge
->slave_resources
));
2453 /* Add dma engines to list */
2454 INIT_LIST_HEAD(&(tsi148_bridge
->dma_resources
));
2455 for (i
= 0; i
< TSI148_MAX_DMA
; i
++) {
2456 dma_ctrlr
= (struct vme_dma_resource
*)kmalloc(
2457 sizeof(struct vme_dma_resource
), GFP_KERNEL
);
2458 if (dma_ctrlr
== NULL
) {
2459 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2460 "dma resource structure\n");
2464 dma_ctrlr
->parent
= tsi148_bridge
;
2465 init_MUTEX(&(dma_ctrlr
->sem
));
2466 dma_ctrlr
->locked
= 0;
2467 dma_ctrlr
->number
= i
;
2468 INIT_LIST_HEAD(&(dma_ctrlr
->pending
));
2469 INIT_LIST_HEAD(&(dma_ctrlr
->running
));
2470 list_add_tail(&(dma_ctrlr
->list
),
2471 &(tsi148_bridge
->dma_resources
));
2474 tsi148_bridge
->slave_get
= tsi148_slave_get
;
2475 tsi148_bridge
->slave_set
= tsi148_slave_set
;
2476 tsi148_bridge
->master_get
= tsi148_master_get
;
2477 tsi148_bridge
->master_set
= tsi148_master_set
;
2478 tsi148_bridge
->master_read
= tsi148_master_read
;
2479 tsi148_bridge
->master_write
= tsi148_master_write
;
2480 tsi148_bridge
->master_rmw
= tsi148_master_rmw
;
2481 tsi148_bridge
->dma_list_add
= tsi148_dma_list_add
;
2482 tsi148_bridge
->dma_list_exec
= tsi148_dma_list_exec
;
2483 tsi148_bridge
->dma_list_empty
= tsi148_dma_list_empty
;
2484 tsi148_bridge
->request_irq
= tsi148_request_irq
;
2485 tsi148_bridge
->free_irq
= tsi148_free_irq
;
2486 tsi148_bridge
->generate_irq
= tsi148_generate_irq
;
2487 tsi148_bridge
->lm_set
= tsi148_lm_set
;
2488 tsi148_bridge
->lm_get
= tsi148_lm_get
;
2489 tsi148_bridge
->lm_attach
= tsi148_lm_attach
;
2490 tsi148_bridge
->lm_detach
= tsi148_lm_detach
;
2491 tsi148_bridge
->slot_get
= tsi148_slot_get
;
2493 data
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2494 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
2495 (data
& TSI148_LCSR_VSTAT_SCONS
)? "" : " not");
2496 dev_info(&pdev
->dev
, "VME geographical address is %d\n",
2497 data
& TSI148_LCSR_VSTAT_GA_M
);
2498 dev_info(&pdev
->dev
, "VME Write and flush and error check is %s\n",
2499 err_chk
? "enabled" : "disabled");
2501 if(tsi148_crcsr_init(pdev
)) {
2502 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
2507 /* Need to save tsi148_bridge pointer locally in link list for use in
2510 retval
= vme_register_bridge(tsi148_bridge
);
2512 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
2516 /* Clear VME bus "board fail", and "power-up reset" lines */
2517 data
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2518 data
&= ~TSI148_LCSR_VSTAT_BRDFL
;
2519 data
|= TSI148_LCSR_VSTAT_CPURST
;
2520 iowrite32be(data
, tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2524 vme_unregister_bridge(tsi148_bridge
);
2526 tsi148_crcsr_exit(pdev
);
2529 /* resources are stored in link list */
2530 list_for_each(pos
, &(tsi148_bridge
->dma_resources
)) {
2531 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
2536 /* resources are stored in link list */
2537 list_for_each(pos
, &(tsi148_bridge
->slave_resources
)) {
2538 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
2543 /* resources are stored in link list */
2544 list_for_each(pos
, &(tsi148_bridge
->master_resources
)) {
2545 master_image
= list_entry(pos
, struct vme_master_resource
, list
);
2547 kfree(master_image
);
2550 tsi148_irq_exit(pdev
);
2553 iounmap(tsi148_bridge
->base
);
2555 pci_release_regions(pdev
);
2557 pci_disable_device(pdev
);
2559 kfree(tsi148_bridge
);
2565 static void tsi148_remove(struct pci_dev
*pdev
)
2567 struct list_head
*pos
= NULL
;
2568 struct vme_master_resource
*master_image
;
2569 struct vme_slave_resource
*slave_image
;
2570 struct vme_dma_resource
*dma_ctrlr
;
2573 dev_dbg(&pdev
->dev
, "Driver is being unloaded.\n");
2575 /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2578 * Shutdown all inbound and outbound windows.
2580 for (i
= 0; i
< 8; i
++) {
2581 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
2582 TSI148_LCSR_OFFSET_ITAT
);
2583 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
2584 TSI148_LCSR_OFFSET_OTAT
);
2588 * Shutdown Location monitor.
2590 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2595 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CSRAT
);
2598 * Clear error status.
2600 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
);
2601 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
2602 iowrite32be(0x07000700, tsi148_bridge
->base
+ TSI148_LCSR_PSTAT
);
2605 * Remove VIRQ interrupt (if any)
2607 if (ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
) & 0x800) {
2608 iowrite32be(0x8000, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
2612 * Disable and clear all interrupts.
2614 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2615 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
2616 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2619 * Map all Interrupts to PCI INTA
2621 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTM1
);
2622 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTM2
);
2624 tsi148_irq_exit(pdev
);
2626 vme_unregister_bridge(tsi148_bridge
);
2628 tsi148_crcsr_exit(pdev
);
2630 /* resources are stored in link list */
2631 list_for_each(pos
, &(tsi148_bridge
->dma_resources
)) {
2632 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
2637 /* resources are stored in link list */
2638 list_for_each(pos
, &(tsi148_bridge
->slave_resources
)) {
2639 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
2644 /* resources are stored in link list */
2645 list_for_each(pos
, &(tsi148_bridge
->master_resources
)) {
2646 master_image
= list_entry(pos
, struct vme_master_resource
, list
);
2648 kfree(master_image
);
2651 tsi148_irq_exit(pdev
);
2653 iounmap(tsi148_bridge
->base
);
2655 pci_release_regions(pdev
);
2657 pci_disable_device(pdev
);
2659 kfree(tsi148_bridge
);
2662 static void __exit
tsi148_exit(void)
2664 pci_unregister_driver(&tsi148_driver
);
2666 printk(KERN_DEBUG
"Driver removed.\n");
2669 MODULE_PARM_DESC(err_chk
, "Check for VME errors on reads and writes");
2670 module_param(err_chk
, bool, 0);
2672 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2673 MODULE_LICENSE("GPL");
2675 module_init(tsi148_init
);
2676 module_exit(tsi148_exit
);
2678 /*----------------------------------------------------------------------------
2680 *--------------------------------------------------------------------------*/
2684 * Direct Mode DMA transfer
2686 * XXX Not looking at direct mode for now, we can always use link list mode
2687 * with a single entry.
2689 int tsi148_dma_run(struct vme_dma_resource
*resource
, struct vme_dma_attr src
,
2690 struct vme_dma_attr dest
, size_t count
)
2696 struct vmeDmaPacket
*cur_dma
;
2697 struct tsi148_dma_descriptor
*dmaLL
;
2702 for (x
= 0; x
< 8; x
++) { /* vme block size */
2703 if ((32 << x
) >= vmeDma
->maxVmeBlockSize
) {
2709 dctlreg
|= (x
<< 12);
2711 for (x
= 0; x
< 8; x
++) { /* pci block size */
2712 if ((32 << x
) >= vmeDma
->maxPciBlockSize
) {
2718 dctlreg
|= (x
<< 4);
2720 if (vmeDma
->vmeBackOffTimer
) {
2721 for (x
= 1; x
< 8; x
++) { /* vme timer */
2722 if ((1 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
2728 dctlreg
|= (x
<< 8);
2731 if (vmeDma
->pciBackOffTimer
) {
2732 for (x
= 1; x
< 8; x
++) { /* pci timer */
2733 if ((1 << (x
- 1)) >= vmeDma
->pciBackOffTimer
) {
2739 dctlreg
|= (x
<< 0);
2742 /* Program registers for DMA transfer */
2743 iowrite32be(dmaLL
->dsau
, tsi148_bridge
->base
+
2744 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAU
);
2745 iowrite32be(dmaLL
->dsal
, tsi148_bridge
->base
+
2746 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAL
);
2747 iowrite32be(dmaLL
->ddau
, tsi148_bridge
->base
+
2748 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAU
);
2749 iowrite32be(dmaLL
->ddal
, tsi148_bridge
->base
+
2750 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAL
);
2751 iowrite32be(dmaLL
->dsat
, tsi148_bridge
->base
+
2752 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAT
);
2753 iowrite32be(dmaLL
->ddat
, tsi148_bridge
->base
+
2754 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAT
);
2755 iowrite32be(dmaLL
->dcnt
, tsi148_bridge
->base
+
2756 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCNT
);
2757 iowrite32be(dmaLL
->ddbs
, tsi148_bridge
->base
+
2758 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDBS
);
2760 /* Start the operation */
2761 iowrite32be(dctlreg
| 0x2000000, tsi148_bridge
->base
+
2762 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCTL
);
2764 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
2765 TSI148_LCSR_OFFSET_DSTA
);
2766 wait_event_interruptible(dma_queue
[channel
], (tmp
& 0x1000000) == 0);
2769 * Read status register, we should probably do this in some error
2770 * handler rather than here so that we can be sure we haven't kicked off
2771 * another DMA transfer.
2773 val
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
2774 TSI148_LCSR_OFFSET_DSTA
);
2776 vmeDma
->vmeDmaStatus
= 0;
2777 if (val
& 0x10000000) {
2779 "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2781 vmeDma
->vmeDmaStatus
= val
;
2790 /* Global VME controller information */
2791 struct pci_dev
*vme_pci_dev
;
2794 * Set the VME bus arbiter with the requested attributes
2796 int tempe_set_arbiter(vmeArbiterCfg_t
* vmeArb
)
2801 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2802 temp_ctl
&= 0xFFEFFF00;
2804 if (vmeArb
->globalTimeoutTimer
== 0xFFFFFFFF) {
2806 } else if (vmeArb
->globalTimeoutTimer
> 2048) {
2808 } else if (vmeArb
->globalTimeoutTimer
== 0) {
2812 while ((16 * (1 << (gto
- 1))) < vmeArb
->globalTimeoutTimer
) {
2818 if (vmeArb
->arbiterMode
!= VME_PRIORITY_MODE
) {
2822 if (vmeArb
->arbiterTimeoutFlag
) {
2826 if (vmeArb
->noEarlyReleaseFlag
) {
2827 temp_ctl
|= 1 << 20;
2829 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2835 * Return the attributes of the VME bus arbiter.
2837 int tempe_get_arbiter(vmeArbiterCfg_t
* vmeArb
)
2843 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2845 gto
= temp_ctl
& 0xF;
2847 vmeArb
->globalTimeoutTimer
= (16 * (1 << (gto
- 1)));
2850 if (temp_ctl
& (1 << 6)) {
2851 vmeArb
->arbiterMode
= VME_R_ROBIN_MODE
;
2853 vmeArb
->arbiterMode
= VME_PRIORITY_MODE
;
2856 if (temp_ctl
& (1 << 7)) {
2857 vmeArb
->arbiterTimeoutFlag
= 1;
2860 if (temp_ctl
& (1 << 20)) {
2861 vmeArb
->noEarlyReleaseFlag
= 1;
2868 * Set the VME bus requestor with the requested attributes
2870 int tempe_set_requestor(vmeRequesterCfg_t
* vmeReq
)
2874 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2875 temp_ctl
&= 0xFFFF0000;
2877 if (vmeReq
->releaseMode
== 1) {
2878 temp_ctl
|= (1 << 3);
2881 if (vmeReq
->fairMode
== 1) {
2882 temp_ctl
|= (1 << 2);
2885 temp_ctl
|= (vmeReq
->timeonTimeoutTimer
& 7) << 8;
2886 temp_ctl
|= (vmeReq
->timeoffTimeoutTimer
& 7) << 12;
2887 temp_ctl
|= vmeReq
->requestLevel
;
2889 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2894 * Return the attributes of the VME bus requestor
2896 int tempe_get_requestor(vmeRequesterCfg_t
* vmeReq
)
2900 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2902 if (temp_ctl
& 0x18) {
2903 vmeReq
->releaseMode
= 1;
2906 if (temp_ctl
& (1 << 2)) {
2907 vmeReq
->fairMode
= 1;
2910 vmeReq
->requestLevel
= temp_ctl
& 3;
2911 vmeReq
->timeonTimeoutTimer
= (temp_ctl
>> 8) & 7;
2912 vmeReq
->timeoffTimeoutTimer
= (temp_ctl
>> 12) & 7;