4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/init.h>
17 #include <linux/export.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/vme.h>
35 #include "vme_bridge.h"
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers
;
39 static LIST_HEAD(vme_bus_list
);
40 static DEFINE_MUTEX(vme_buses_lock
);
42 static int __init
vme_init(void);
44 static struct vme_dev
*dev_to_vme_dev(struct device
*dev
)
46 return container_of(dev
, struct vme_dev
, dev
);
50 * Find the bridge that the resource is associated with.
52 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
54 /* Get list to search */
55 switch (resource
->type
) {
57 return list_entry(resource
->entry
, struct vme_master_resource
,
61 return list_entry(resource
->entry
, struct vme_slave_resource
,
65 return list_entry(resource
->entry
, struct vme_dma_resource
,
69 return list_entry(resource
->entry
, struct vme_lm_resource
,
73 printk(KERN_ERR
"Unknown resource type\n");
80 * vme_free_consistent - Allocate contiguous memory.
81 * @resource: Pointer to VME resource.
82 * @size: Size of allocation required.
83 * @dma: Pointer to variable to store physical address of allocation.
85 * Allocate a contiguous block of memory for use by the driver. This is used to
86 * create the buffers for the slave windows.
88 * Return: Virtual address of allocation on success, NULL on failure.
90 void *vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
93 struct vme_bridge
*bridge
;
95 if (resource
== NULL
) {
96 printk(KERN_ERR
"No resource\n");
100 bridge
= find_bridge(resource
);
101 if (bridge
== NULL
) {
102 printk(KERN_ERR
"Can't find bridge\n");
106 if (bridge
->parent
== NULL
) {
107 printk(KERN_ERR
"Dev entry NULL for bridge %s\n", bridge
->name
);
111 if (bridge
->alloc_consistent
== NULL
) {
112 printk(KERN_ERR
"alloc_consistent not supported by bridge %s\n",
117 return bridge
->alloc_consistent(bridge
->parent
, size
, dma
);
119 EXPORT_SYMBOL(vme_alloc_consistent
);
122 * vme_free_consistent - Free previously allocated memory.
123 * @resource: Pointer to VME resource.
124 * @size: Size of allocation to free.
125 * @vaddr: Virtual address of allocation.
126 * @dma: Physical address of allocation.
128 * Free previously allocated block of contiguous memory.
130 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
131 void *vaddr
, dma_addr_t dma
)
133 struct vme_bridge
*bridge
;
135 if (resource
== NULL
) {
136 printk(KERN_ERR
"No resource\n");
140 bridge
= find_bridge(resource
);
141 if (bridge
== NULL
) {
142 printk(KERN_ERR
"Can't find bridge\n");
146 if (bridge
->parent
== NULL
) {
147 printk(KERN_ERR
"Dev entry NULL for bridge %s\n", bridge
->name
);
151 if (bridge
->free_consistent
== NULL
) {
152 printk(KERN_ERR
"free_consistent not supported by bridge %s\n",
157 bridge
->free_consistent(bridge
->parent
, size
, vaddr
, dma
);
159 EXPORT_SYMBOL(vme_free_consistent
);
162 * vme_get_size - Helper function returning size of a VME window
163 * @resource: Pointer to VME slave or master resource.
165 * Determine the size of the VME window provided. This is a helper
166 * function, wrappering the call to vme_master_get or vme_slave_get
167 * depending on the type of window resource handed to it.
169 * Return: Size of the window on success, zero on failure.
171 size_t vme_get_size(struct vme_resource
*resource
)
174 unsigned long long base
, size
;
176 u32 aspace
, cycle
, dwidth
;
178 switch (resource
->type
) {
180 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
181 &aspace
, &cycle
, &dwidth
);
188 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
189 &buf_base
, &aspace
, &cycle
);
199 printk(KERN_ERR
"Unknown resource type\n");
204 EXPORT_SYMBOL(vme_get_size
);
206 int vme_check_window(u32 aspace
, unsigned long long vme_base
,
207 unsigned long long size
)
213 if (((vme_base
+ size
) > VME_A16_MAX
) ||
214 (vme_base
> VME_A16_MAX
))
218 if (((vme_base
+ size
) > VME_A24_MAX
) ||
219 (vme_base
> VME_A24_MAX
))
223 if (((vme_base
+ size
) > VME_A32_MAX
) ||
224 (vme_base
> VME_A32_MAX
))
228 if ((size
!= 0) && (vme_base
> U64_MAX
+ 1 - size
))
232 if (((vme_base
+ size
) > VME_CRCSR_MAX
) ||
233 (vme_base
> VME_CRCSR_MAX
))
243 printk(KERN_ERR
"Invalid address space\n");
250 EXPORT_SYMBOL(vme_check_window
);
252 static u32
vme_get_aspace(int am
)
286 * vme_slave_request - Request a VME slave window resource.
287 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
288 * @address: Required VME address space.
289 * @cycle: Required VME data transfer cycle type.
291 * Request use of a VME window resource capable of being set for the requested
292 * address space and data transfer cycle.
294 * Return: Pointer to VME resource on success, NULL on failure.
296 struct vme_resource
*vme_slave_request(struct vme_dev
*vdev
, u32 address
,
299 struct vme_bridge
*bridge
;
300 struct list_head
*slave_pos
= NULL
;
301 struct vme_slave_resource
*allocated_image
= NULL
;
302 struct vme_slave_resource
*slave_image
= NULL
;
303 struct vme_resource
*resource
= NULL
;
305 bridge
= vdev
->bridge
;
306 if (bridge
== NULL
) {
307 printk(KERN_ERR
"Can't find VME bus\n");
311 /* Loop through slave resources */
312 list_for_each(slave_pos
, &bridge
->slave_resources
) {
313 slave_image
= list_entry(slave_pos
,
314 struct vme_slave_resource
, list
);
316 if (slave_image
== NULL
) {
317 printk(KERN_ERR
"Registered NULL Slave resource\n");
321 /* Find an unlocked and compatible image */
322 mutex_lock(&slave_image
->mtx
);
323 if (((slave_image
->address_attr
& address
) == address
) &&
324 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
325 (slave_image
->locked
== 0)) {
327 slave_image
->locked
= 1;
328 mutex_unlock(&slave_image
->mtx
);
329 allocated_image
= slave_image
;
332 mutex_unlock(&slave_image
->mtx
);
336 if (allocated_image
== NULL
)
339 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
343 resource
->type
= VME_SLAVE
;
344 resource
->entry
= &allocated_image
->list
;
350 mutex_lock(&slave_image
->mtx
);
351 slave_image
->locked
= 0;
352 mutex_unlock(&slave_image
->mtx
);
357 EXPORT_SYMBOL(vme_slave_request
);
360 * vme_slave_set - Set VME slave window configuration.
361 * @resource: Pointer to VME slave resource.
362 * @enabled: State to which the window should be configured.
363 * @vme_base: Base address for the window.
364 * @size: Size of the VME window.
365 * @buf_base: Based address of buffer used to provide VME slave window storage.
366 * @aspace: VME address space for the VME window.
367 * @cycle: VME data transfer cycle type for the VME window.
369 * Set configuration for provided VME slave window.
371 * Return: Zero on success, -EINVAL if operation is not supported on this
372 * device, if an invalid resource has been provided or invalid
373 * attributes are provided. Hardware specific errors may also be
376 int vme_slave_set(struct vme_resource
*resource
, int enabled
,
377 unsigned long long vme_base
, unsigned long long size
,
378 dma_addr_t buf_base
, u32 aspace
, u32 cycle
)
380 struct vme_bridge
*bridge
= find_bridge(resource
);
381 struct vme_slave_resource
*image
;
384 if (resource
->type
!= VME_SLAVE
) {
385 printk(KERN_ERR
"Not a slave resource\n");
389 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
391 if (bridge
->slave_set
== NULL
) {
392 printk(KERN_ERR
"Function not supported\n");
396 if (!(((image
->address_attr
& aspace
) == aspace
) &&
397 ((image
->cycle_attr
& cycle
) == cycle
))) {
398 printk(KERN_ERR
"Invalid attributes\n");
402 retval
= vme_check_window(aspace
, vme_base
, size
);
406 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
409 EXPORT_SYMBOL(vme_slave_set
);
412 * vme_slave_get - Retrieve VME slave window configuration.
413 * @resource: Pointer to VME slave resource.
414 * @enabled: Pointer to variable for storing state.
415 * @vme_base: Pointer to variable for storing window base address.
416 * @size: Pointer to variable for storing window size.
417 * @buf_base: Pointer to variable for storing slave buffer base address.
418 * @aspace: Pointer to variable for storing VME address space.
419 * @cycle: Pointer to variable for storing VME data transfer cycle type.
421 * Return configuration for provided VME slave window.
423 * Return: Zero on success, -EINVAL if operation is not supported on this
424 * device or if an invalid resource has been provided.
426 int vme_slave_get(struct vme_resource
*resource
, int *enabled
,
427 unsigned long long *vme_base
, unsigned long long *size
,
428 dma_addr_t
*buf_base
, u32
*aspace
, u32
*cycle
)
430 struct vme_bridge
*bridge
= find_bridge(resource
);
431 struct vme_slave_resource
*image
;
433 if (resource
->type
!= VME_SLAVE
) {
434 printk(KERN_ERR
"Not a slave resource\n");
438 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
440 if (bridge
->slave_get
== NULL
) {
441 printk(KERN_ERR
"vme_slave_get not supported\n");
445 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
448 EXPORT_SYMBOL(vme_slave_get
);
451 * vme_slave_free - Free VME slave window
452 * @resource: Pointer to VME slave resource.
454 * Free the provided slave resource so that it may be reallocated.
456 void vme_slave_free(struct vme_resource
*resource
)
458 struct vme_slave_resource
*slave_image
;
460 if (resource
->type
!= VME_SLAVE
) {
461 printk(KERN_ERR
"Not a slave resource\n");
465 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
467 if (slave_image
== NULL
) {
468 printk(KERN_ERR
"Can't find slave resource\n");
473 mutex_lock(&slave_image
->mtx
);
474 if (slave_image
->locked
== 0)
475 printk(KERN_ERR
"Image is already free\n");
477 slave_image
->locked
= 0;
478 mutex_unlock(&slave_image
->mtx
);
480 /* Free up resource memory */
483 EXPORT_SYMBOL(vme_slave_free
);
486 * vme_master_request - Request a VME master window resource.
487 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
488 * @address: Required VME address space.
489 * @cycle: Required VME data transfer cycle type.
490 * @dwidth: Required VME data transfer width.
492 * Request use of a VME window resource capable of being set for the requested
493 * address space, data transfer cycle and width.
495 * Return: Pointer to VME resource on success, NULL on failure.
497 struct vme_resource
*vme_master_request(struct vme_dev
*vdev
, u32 address
,
498 u32 cycle
, u32 dwidth
)
500 struct vme_bridge
*bridge
;
501 struct list_head
*master_pos
= NULL
;
502 struct vme_master_resource
*allocated_image
= NULL
;
503 struct vme_master_resource
*master_image
= NULL
;
504 struct vme_resource
*resource
= NULL
;
506 bridge
= vdev
->bridge
;
507 if (bridge
== NULL
) {
508 printk(KERN_ERR
"Can't find VME bus\n");
512 /* Loop through master resources */
513 list_for_each(master_pos
, &bridge
->master_resources
) {
514 master_image
= list_entry(master_pos
,
515 struct vme_master_resource
, list
);
517 if (master_image
== NULL
) {
518 printk(KERN_WARNING
"Registered NULL master resource\n");
522 /* Find an unlocked and compatible image */
523 spin_lock(&master_image
->lock
);
524 if (((master_image
->address_attr
& address
) == address
) &&
525 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
526 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
527 (master_image
->locked
== 0)) {
529 master_image
->locked
= 1;
530 spin_unlock(&master_image
->lock
);
531 allocated_image
= master_image
;
534 spin_unlock(&master_image
->lock
);
537 /* Check to see if we found a resource */
538 if (allocated_image
== NULL
) {
539 printk(KERN_ERR
"Can't find a suitable resource\n");
543 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
547 resource
->type
= VME_MASTER
;
548 resource
->entry
= &allocated_image
->list
;
554 spin_lock(&master_image
->lock
);
555 master_image
->locked
= 0;
556 spin_unlock(&master_image
->lock
);
561 EXPORT_SYMBOL(vme_master_request
);
564 * vme_master_set - Set VME master window configuration.
565 * @resource: Pointer to VME master resource.
566 * @enabled: State to which the window should be configured.
567 * @vme_base: Base address for the window.
568 * @size: Size of the VME window.
569 * @aspace: VME address space for the VME window.
570 * @cycle: VME data transfer cycle type for the VME window.
571 * @dwidth: VME data transfer width for the VME window.
573 * Set configuration for provided VME master window.
575 * Return: Zero on success, -EINVAL if operation is not supported on this
576 * device, if an invalid resource has been provided or invalid
577 * attributes are provided. Hardware specific errors may also be
580 int vme_master_set(struct vme_resource
*resource
, int enabled
,
581 unsigned long long vme_base
, unsigned long long size
, u32 aspace
,
582 u32 cycle
, u32 dwidth
)
584 struct vme_bridge
*bridge
= find_bridge(resource
);
585 struct vme_master_resource
*image
;
588 if (resource
->type
!= VME_MASTER
) {
589 printk(KERN_ERR
"Not a master resource\n");
593 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
595 if (bridge
->master_set
== NULL
) {
596 printk(KERN_WARNING
"vme_master_set not supported\n");
600 if (!(((image
->address_attr
& aspace
) == aspace
) &&
601 ((image
->cycle_attr
& cycle
) == cycle
) &&
602 ((image
->width_attr
& dwidth
) == dwidth
))) {
603 printk(KERN_WARNING
"Invalid attributes\n");
607 retval
= vme_check_window(aspace
, vme_base
, size
);
611 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
614 EXPORT_SYMBOL(vme_master_set
);
617 * vme_master_get - Retrieve VME master window configuration.
618 * @resource: Pointer to VME master resource.
619 * @enabled: Pointer to variable for storing state.
620 * @vme_base: Pointer to variable for storing window base address.
621 * @size: Pointer to variable for storing window size.
622 * @aspace: Pointer to variable for storing VME address space.
623 * @cycle: Pointer to variable for storing VME data transfer cycle type.
624 * @dwidth: Pointer to variable for storing VME data transfer width.
626 * Return configuration for provided VME master window.
628 * Return: Zero on success, -EINVAL if operation is not supported on this
629 * device or if an invalid resource has been provided.
631 int vme_master_get(struct vme_resource
*resource
, int *enabled
,
632 unsigned long long *vme_base
, unsigned long long *size
, u32
*aspace
,
633 u32
*cycle
, u32
*dwidth
)
635 struct vme_bridge
*bridge
= find_bridge(resource
);
636 struct vme_master_resource
*image
;
638 if (resource
->type
!= VME_MASTER
) {
639 printk(KERN_ERR
"Not a master resource\n");
643 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
645 if (bridge
->master_get
== NULL
) {
646 printk(KERN_WARNING
"%s not supported\n", __func__
);
650 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
653 EXPORT_SYMBOL(vme_master_get
);
656 * vme_master_write - Read data from VME space into a buffer.
657 * @resource: Pointer to VME master resource.
658 * @buf: Pointer to buffer where data should be transferred.
659 * @count: Number of bytes to transfer.
660 * @offset: Offset into VME master window at which to start transfer.
662 * Perform read of count bytes of data from location on VME bus which maps into
663 * the VME master window at offset to buf.
665 * Return: Number of bytes read, -EINVAL if resource is not a VME master
666 * resource or read operation is not supported. -EFAULT returned if
667 * invalid offset is provided. Hardware specific errors may also be
670 ssize_t
vme_master_read(struct vme_resource
*resource
, void *buf
, size_t count
,
673 struct vme_bridge
*bridge
= find_bridge(resource
);
674 struct vme_master_resource
*image
;
677 if (bridge
->master_read
== NULL
) {
678 printk(KERN_WARNING
"Reading from resource not supported\n");
682 if (resource
->type
!= VME_MASTER
) {
683 printk(KERN_ERR
"Not a master resource\n");
687 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
689 length
= vme_get_size(resource
);
691 if (offset
> length
) {
692 printk(KERN_WARNING
"Invalid Offset\n");
696 if ((offset
+ count
) > length
)
697 count
= length
- offset
;
699 return bridge
->master_read(image
, buf
, count
, offset
);
702 EXPORT_SYMBOL(vme_master_read
);
705 * vme_master_write - Write data out to VME space from a buffer.
706 * @resource: Pointer to VME master resource.
707 * @buf: Pointer to buffer holding data to transfer.
708 * @count: Number of bytes to transfer.
709 * @offset: Offset into VME master window at which to start transfer.
711 * Perform write of count bytes of data from buf to location on VME bus which
712 * maps into the VME master window at offset.
714 * Return: Number of bytes written, -EINVAL if resource is not a VME master
715 * resource or write operation is not supported. -EFAULT returned if
716 * invalid offset is provided. Hardware specific errors may also be
719 ssize_t
vme_master_write(struct vme_resource
*resource
, void *buf
,
720 size_t count
, loff_t offset
)
722 struct vme_bridge
*bridge
= find_bridge(resource
);
723 struct vme_master_resource
*image
;
726 if (bridge
->master_write
== NULL
) {
727 printk(KERN_WARNING
"Writing to resource not supported\n");
731 if (resource
->type
!= VME_MASTER
) {
732 printk(KERN_ERR
"Not a master resource\n");
736 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
738 length
= vme_get_size(resource
);
740 if (offset
> length
) {
741 printk(KERN_WARNING
"Invalid Offset\n");
745 if ((offset
+ count
) > length
)
746 count
= length
- offset
;
748 return bridge
->master_write(image
, buf
, count
, offset
);
750 EXPORT_SYMBOL(vme_master_write
);
753 * vme_master_rmw - Perform read-modify-write cycle.
754 * @resource: Pointer to VME master resource.
755 * @mask: Bits to be compared and swapped in operation.
756 * @compare: Bits to be compared with data read from offset.
757 * @swap: Bits to be swapped in data read from offset.
758 * @offset: Offset into VME master window at which to perform operation.
760 * Perform read-modify-write cycle on provided location:
761 * - Location on VME bus is read.
762 * - Bits selected by mask are compared with compare.
763 * - Where a selected bit matches that in compare and are selected in swap,
764 * the bit is swapped.
765 * - Result written back to location on VME bus.
767 * Return: Bytes written on success, -EINVAL if resource is not a VME master
768 * resource or RMW operation is not supported. Hardware specific
769 * errors may also be returned.
771 unsigned int vme_master_rmw(struct vme_resource
*resource
, unsigned int mask
,
772 unsigned int compare
, unsigned int swap
, loff_t offset
)
774 struct vme_bridge
*bridge
= find_bridge(resource
);
775 struct vme_master_resource
*image
;
777 if (bridge
->master_rmw
== NULL
) {
778 printk(KERN_WARNING
"Writing to resource not supported\n");
782 if (resource
->type
!= VME_MASTER
) {
783 printk(KERN_ERR
"Not a master resource\n");
787 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
789 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
791 EXPORT_SYMBOL(vme_master_rmw
);
794 * vme_master_mmap - Mmap region of VME master window.
795 * @resource: Pointer to VME master resource.
796 * @vma: Pointer to definition of user mapping.
798 * Memory map a region of the VME master window into user space.
800 * Return: Zero on success, -EINVAL if resource is not a VME master
801 * resource or -EFAULT if map exceeds window size. Other generic mmap
802 * errors may also be returned.
804 int vme_master_mmap(struct vme_resource
*resource
, struct vm_area_struct
*vma
)
806 struct vme_master_resource
*image
;
807 phys_addr_t phys_addr
;
808 unsigned long vma_size
;
810 if (resource
->type
!= VME_MASTER
) {
811 pr_err("Not a master resource\n");
815 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
816 phys_addr
= image
->bus_resource
.start
+ (vma
->vm_pgoff
<< PAGE_SHIFT
);
817 vma_size
= vma
->vm_end
- vma
->vm_start
;
819 if (phys_addr
+ vma_size
> image
->bus_resource
.end
+ 1) {
820 pr_err("Map size cannot exceed the window size\n");
824 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
826 return vm_iomap_memory(vma
, phys_addr
, vma
->vm_end
- vma
->vm_start
);
828 EXPORT_SYMBOL(vme_master_mmap
);
831 * vme_master_free - Free VME master window
832 * @resource: Pointer to VME master resource.
834 * Free the provided master resource so that it may be reallocated.
836 void vme_master_free(struct vme_resource
*resource
)
838 struct vme_master_resource
*master_image
;
840 if (resource
->type
!= VME_MASTER
) {
841 printk(KERN_ERR
"Not a master resource\n");
845 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
847 if (master_image
== NULL
) {
848 printk(KERN_ERR
"Can't find master resource\n");
853 spin_lock(&master_image
->lock
);
854 if (master_image
->locked
== 0)
855 printk(KERN_ERR
"Image is already free\n");
857 master_image
->locked
= 0;
858 spin_unlock(&master_image
->lock
);
860 /* Free up resource memory */
863 EXPORT_SYMBOL(vme_master_free
);
866 * vme_dma_request - Request a DMA controller.
867 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
868 * @route: Required src/destination combination.
870 * Request a VME DMA controller with capability to perform transfers bewteen
871 * requested source/destination combination.
873 * Return: Pointer to VME DMA resource on success, NULL on failure.
875 struct vme_resource
*vme_dma_request(struct vme_dev
*vdev
, u32 route
)
877 struct vme_bridge
*bridge
;
878 struct list_head
*dma_pos
= NULL
;
879 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
880 struct vme_dma_resource
*dma_ctrlr
= NULL
;
881 struct vme_resource
*resource
= NULL
;
883 /* XXX Not checking resource attributes */
884 printk(KERN_ERR
"No VME resource Attribute tests done\n");
886 bridge
= vdev
->bridge
;
887 if (bridge
== NULL
) {
888 printk(KERN_ERR
"Can't find VME bus\n");
892 /* Loop through DMA resources */
893 list_for_each(dma_pos
, &bridge
->dma_resources
) {
894 dma_ctrlr
= list_entry(dma_pos
,
895 struct vme_dma_resource
, list
);
897 if (dma_ctrlr
== NULL
) {
898 printk(KERN_ERR
"Registered NULL DMA resource\n");
902 /* Find an unlocked and compatible controller */
903 mutex_lock(&dma_ctrlr
->mtx
);
904 if (((dma_ctrlr
->route_attr
& route
) == route
) &&
905 (dma_ctrlr
->locked
== 0)) {
907 dma_ctrlr
->locked
= 1;
908 mutex_unlock(&dma_ctrlr
->mtx
);
909 allocated_ctrlr
= dma_ctrlr
;
912 mutex_unlock(&dma_ctrlr
->mtx
);
915 /* Check to see if we found a resource */
916 if (allocated_ctrlr
== NULL
)
919 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
923 resource
->type
= VME_DMA
;
924 resource
->entry
= &allocated_ctrlr
->list
;
930 mutex_lock(&dma_ctrlr
->mtx
);
931 dma_ctrlr
->locked
= 0;
932 mutex_unlock(&dma_ctrlr
->mtx
);
937 EXPORT_SYMBOL(vme_dma_request
);
940 * vme_new_dma_list - Create new VME DMA list.
941 * @resource: Pointer to VME DMA resource.
943 * Create a new VME DMA list. It is the responsibility of the user to free
944 * the list once it is no longer required with vme_dma_list_free().
946 * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
949 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
951 struct vme_dma_resource
*ctrlr
;
952 struct vme_dma_list
*dma_list
;
954 if (resource
->type
!= VME_DMA
) {
955 printk(KERN_ERR
"Not a DMA resource\n");
959 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
961 dma_list
= kmalloc(sizeof(*dma_list
), GFP_KERNEL
);
965 INIT_LIST_HEAD(&dma_list
->entries
);
966 dma_list
->parent
= ctrlr
;
967 mutex_init(&dma_list
->mtx
);
971 EXPORT_SYMBOL(vme_new_dma_list
);
974 * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
975 * @pattern: Value to use used as pattern
976 * @type: Type of pattern to be written.
978 * Create VME DMA list attribute for pattern generation. It is the
979 * responsibility of the user to free used attributes using
980 * vme_dma_free_attribute().
982 * Return: Pointer to VME DMA attribute, NULL on failure.
984 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
, u32 type
)
986 struct vme_dma_attr
*attributes
;
987 struct vme_dma_pattern
*pattern_attr
;
989 attributes
= kmalloc(sizeof(*attributes
), GFP_KERNEL
);
993 pattern_attr
= kmalloc(sizeof(*pattern_attr
), GFP_KERNEL
);
997 attributes
->type
= VME_DMA_PATTERN
;
998 attributes
->private = (void *)pattern_attr
;
1000 pattern_attr
->pattern
= pattern
;
1001 pattern_attr
->type
= type
;
1010 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
1013 * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
1014 * @address: PCI base address for DMA transfer.
1016 * Create VME DMA list attribute pointing to a location on PCI for DMA
1017 * transfers. It is the responsibility of the user to free used attributes
1018 * using vme_dma_free_attribute().
1020 * Return: Pointer to VME DMA attribute, NULL on failure.
1022 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
1024 struct vme_dma_attr
*attributes
;
1025 struct vme_dma_pci
*pci_attr
;
1027 /* XXX Run some sanity checks here */
1029 attributes
= kmalloc(sizeof(*attributes
), GFP_KERNEL
);
1033 pci_attr
= kmalloc(sizeof(*pci_attr
), GFP_KERNEL
);
1037 attributes
->type
= VME_DMA_PCI
;
1038 attributes
->private = (void *)pci_attr
;
1040 pci_attr
->address
= address
;
1049 EXPORT_SYMBOL(vme_dma_pci_attribute
);
1052 * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
1053 * @address: VME base address for DMA transfer.
1054 * @aspace: VME address space to use for DMA transfer.
1055 * @cycle: VME bus cycle to use for DMA transfer.
1056 * @dwidth: VME data width to use for DMA transfer.
1058 * Create VME DMA list attribute pointing to a location on the VME bus for DMA
1059 * transfers. It is the responsibility of the user to free used attributes
1060 * using vme_dma_free_attribute().
1062 * Return: Pointer to VME DMA attribute, NULL on failure.
1064 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
1065 u32 aspace
, u32 cycle
, u32 dwidth
)
1067 struct vme_dma_attr
*attributes
;
1068 struct vme_dma_vme
*vme_attr
;
1070 attributes
= kmalloc(sizeof(*attributes
), GFP_KERNEL
);
1074 vme_attr
= kmalloc(sizeof(*vme_attr
), GFP_KERNEL
);
1078 attributes
->type
= VME_DMA_VME
;
1079 attributes
->private = (void *)vme_attr
;
1081 vme_attr
->address
= address
;
1082 vme_attr
->aspace
= aspace
;
1083 vme_attr
->cycle
= cycle
;
1084 vme_attr
->dwidth
= dwidth
;
1093 EXPORT_SYMBOL(vme_dma_vme_attribute
);
1096 * vme_dma_free_attribute - Free DMA list attribute.
1097 * @attributes: Pointer to DMA list attribute.
1099 * Free VME DMA list attribute. VME DMA list attributes can be safely freed
1100 * once vme_dma_list_add() has returned.
1102 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
1104 kfree(attributes
->private);
1107 EXPORT_SYMBOL(vme_dma_free_attribute
);
1110 * vme_dma_list_add - Add enty to a VME DMA list.
1111 * @list: Pointer to VME list.
1112 * @src: Pointer to DMA list attribute to use as source.
1113 * @dest: Pointer to DMA list attribute to use as destination.
1114 * @count: Number of bytes to transfer.
1116 * Add an entry to the provided VME DMA list. Entry requires pointers to source
1117 * and destination DMA attributes and a count.
1119 * Please note, the attributes supported as source and destinations for
1120 * transfers are hardware dependent.
1122 * Return: Zero on success, -EINVAL if operation is not supported on this
1123 * device or if the link list has already been submitted for execution.
1124 * Hardware specific errors also possible.
1126 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
1127 struct vme_dma_attr
*dest
, size_t count
)
1129 struct vme_bridge
*bridge
= list
->parent
->parent
;
1132 if (bridge
->dma_list_add
== NULL
) {
1133 printk(KERN_WARNING
"Link List DMA generation not supported\n");
1137 if (!mutex_trylock(&list
->mtx
)) {
1138 printk(KERN_ERR
"Link List already submitted\n");
1142 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
1144 mutex_unlock(&list
->mtx
);
1148 EXPORT_SYMBOL(vme_dma_list_add
);
1151 * vme_dma_list_exec - Queue a VME DMA list for execution.
1152 * @list: Pointer to VME list.
1154 * Queue the provided VME DMA list for execution. The call will return once the
1155 * list has been executed.
1157 * Return: Zero on success, -EINVAL if operation is not supported on this
1158 * device. Hardware specific errors also possible.
1160 int vme_dma_list_exec(struct vme_dma_list
*list
)
1162 struct vme_bridge
*bridge
= list
->parent
->parent
;
1165 if (bridge
->dma_list_exec
== NULL
) {
1166 printk(KERN_ERR
"Link List DMA execution not supported\n");
1170 mutex_lock(&list
->mtx
);
1172 retval
= bridge
->dma_list_exec(list
);
1174 mutex_unlock(&list
->mtx
);
1178 EXPORT_SYMBOL(vme_dma_list_exec
);
1181 * vme_dma_list_free - Free a VME DMA list.
1182 * @list: Pointer to VME list.
1184 * Free the provided DMA list and all its entries.
1186 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1187 * is still in use. Hardware specific errors also possible.
1189 int vme_dma_list_free(struct vme_dma_list
*list
)
1191 struct vme_bridge
*bridge
= list
->parent
->parent
;
1194 if (bridge
->dma_list_empty
== NULL
) {
1195 printk(KERN_WARNING
"Emptying of Link Lists not supported\n");
1199 if (!mutex_trylock(&list
->mtx
)) {
1200 printk(KERN_ERR
"Link List in use\n");
1205 * Empty out all of the entries from the DMA list. We need to go to the
1206 * low level driver as DMA entries are driver specific.
1208 retval
= bridge
->dma_list_empty(list
);
1210 printk(KERN_ERR
"Unable to empty link-list entries\n");
1211 mutex_unlock(&list
->mtx
);
1214 mutex_unlock(&list
->mtx
);
1219 EXPORT_SYMBOL(vme_dma_list_free
);
1222 * vme_dma_free - Free a VME DMA resource.
1223 * @resource: Pointer to VME DMA resource.
1225 * Free the provided DMA resource so that it may be reallocated.
1227 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1230 int vme_dma_free(struct vme_resource
*resource
)
1232 struct vme_dma_resource
*ctrlr
;
1234 if (resource
->type
!= VME_DMA
) {
1235 printk(KERN_ERR
"Not a DMA resource\n");
1239 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
1241 if (!mutex_trylock(&ctrlr
->mtx
)) {
1242 printk(KERN_ERR
"Resource busy, can't free\n");
1246 if (!(list_empty(&ctrlr
->pending
) && list_empty(&ctrlr
->running
))) {
1247 printk(KERN_WARNING
"Resource still processing transfers\n");
1248 mutex_unlock(&ctrlr
->mtx
);
1254 mutex_unlock(&ctrlr
->mtx
);
1260 EXPORT_SYMBOL(vme_dma_free
);
1262 void vme_bus_error_handler(struct vme_bridge
*bridge
,
1263 unsigned long long address
, int am
)
1265 struct list_head
*handler_pos
= NULL
;
1266 struct vme_error_handler
*handler
;
1267 int handler_triggered
= 0;
1268 u32 aspace
= vme_get_aspace(am
);
1270 list_for_each(handler_pos
, &bridge
->vme_error_handlers
) {
1271 handler
= list_entry(handler_pos
, struct vme_error_handler
,
1273 if ((aspace
== handler
->aspace
) &&
1274 (address
>= handler
->start
) &&
1275 (address
< handler
->end
)) {
1276 if (!handler
->num_errors
)
1277 handler
->first_error
= address
;
1278 if (handler
->num_errors
!= UINT_MAX
)
1279 handler
->num_errors
++;
1280 handler_triggered
= 1;
1284 if (!handler_triggered
)
1285 dev_err(bridge
->parent
,
1286 "Unhandled VME access error at address 0x%llx\n",
1289 EXPORT_SYMBOL(vme_bus_error_handler
);
1291 struct vme_error_handler
*vme_register_error_handler(
1292 struct vme_bridge
*bridge
, u32 aspace
,
1293 unsigned long long address
, size_t len
)
1295 struct vme_error_handler
*handler
;
1297 handler
= kmalloc(sizeof(*handler
), GFP_KERNEL
);
1301 handler
->aspace
= aspace
;
1302 handler
->start
= address
;
1303 handler
->end
= address
+ len
;
1304 handler
->num_errors
= 0;
1305 handler
->first_error
= 0;
1306 list_add_tail(&handler
->list
, &bridge
->vme_error_handlers
);
1310 EXPORT_SYMBOL(vme_register_error_handler
);
1312 void vme_unregister_error_handler(struct vme_error_handler
*handler
)
1314 list_del(&handler
->list
);
1317 EXPORT_SYMBOL(vme_unregister_error_handler
);
1319 void vme_irq_handler(struct vme_bridge
*bridge
, int level
, int statid
)
1321 void (*call
)(int, int, void *);
1324 call
= bridge
->irq
[level
- 1].callback
[statid
].func
;
1325 priv_data
= bridge
->irq
[level
- 1].callback
[statid
].priv_data
;
1328 call(level
, statid
, priv_data
);
1330 printk(KERN_WARNING
"Spurious VME interrupt, level:%x, vector:%x\n",
1333 EXPORT_SYMBOL(vme_irq_handler
);
1336 * vme_irq_request - Request a specific VME interrupt.
1337 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1338 * @level: Interrupt priority being requested.
1339 * @statid: Interrupt vector being requested.
1340 * @callback: Pointer to callback function called when VME interrupt/vector
1342 * @priv_data: Generic pointer that will be passed to the callback function.
1344 * Request callback to be attached as a handler for VME interrupts with provided
1347 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1348 * function is not supported, -EBUSY if the level/statid combination is
1349 * already in use. Hardware specific errors also possible.
1351 int vme_irq_request(struct vme_dev
*vdev
, int level
, int statid
,
1352 void (*callback
)(int, int, void *),
1355 struct vme_bridge
*bridge
;
1357 bridge
= vdev
->bridge
;
1358 if (bridge
== NULL
) {
1359 printk(KERN_ERR
"Can't find VME bus\n");
1363 if ((level
< 1) || (level
> 7)) {
1364 printk(KERN_ERR
"Invalid interrupt level\n");
1368 if (bridge
->irq_set
== NULL
) {
1369 printk(KERN_ERR
"Configuring interrupts not supported\n");
1373 mutex_lock(&bridge
->irq_mtx
);
1375 if (bridge
->irq
[level
- 1].callback
[statid
].func
) {
1376 mutex_unlock(&bridge
->irq_mtx
);
1377 printk(KERN_WARNING
"VME Interrupt already taken\n");
1381 bridge
->irq
[level
- 1].count
++;
1382 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
1383 bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
1385 /* Enable IRQ level */
1386 bridge
->irq_set(bridge
, level
, 1, 1);
1388 mutex_unlock(&bridge
->irq_mtx
);
1392 EXPORT_SYMBOL(vme_irq_request
);
1395 * vme_irq_free - Free a VME interrupt.
1396 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1397 * @level: Interrupt priority of interrupt being freed.
1398 * @statid: Interrupt vector of interrupt being freed.
1400 * Remove previously attached callback from VME interrupt priority/vector.
1402 void vme_irq_free(struct vme_dev
*vdev
, int level
, int statid
)
1404 struct vme_bridge
*bridge
;
1406 bridge
= vdev
->bridge
;
1407 if (bridge
== NULL
) {
1408 printk(KERN_ERR
"Can't find VME bus\n");
1412 if ((level
< 1) || (level
> 7)) {
1413 printk(KERN_ERR
"Invalid interrupt level\n");
1417 if (bridge
->irq_set
== NULL
) {
1418 printk(KERN_ERR
"Configuring interrupts not supported\n");
1422 mutex_lock(&bridge
->irq_mtx
);
1424 bridge
->irq
[level
- 1].count
--;
1426 /* Disable IRQ level if no more interrupts attached at this level*/
1427 if (bridge
->irq
[level
- 1].count
== 0)
1428 bridge
->irq_set(bridge
, level
, 0, 1);
1430 bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
1431 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
1433 mutex_unlock(&bridge
->irq_mtx
);
1435 EXPORT_SYMBOL(vme_irq_free
);
1438 * vme_irq_generate - Generate VME interrupt.
1439 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1440 * @level: Interrupt priority at which to assert the interrupt.
1441 * @statid: Interrupt vector to associate with the interrupt.
1443 * Generate a VME interrupt of the provided level and with the provided
1446 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1447 * function is not supported. Hardware specific errors also possible.
1449 int vme_irq_generate(struct vme_dev
*vdev
, int level
, int statid
)
1451 struct vme_bridge
*bridge
;
1453 bridge
= vdev
->bridge
;
1454 if (bridge
== NULL
) {
1455 printk(KERN_ERR
"Can't find VME bus\n");
1459 if ((level
< 1) || (level
> 7)) {
1460 printk(KERN_WARNING
"Invalid interrupt level\n");
1464 if (bridge
->irq_generate
== NULL
) {
1465 printk(KERN_WARNING
"Interrupt generation not supported\n");
1469 return bridge
->irq_generate(bridge
, level
, statid
);
1471 EXPORT_SYMBOL(vme_irq_generate
);
1474 * vme_lm_request - Request a VME location monitor
1475 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1477 * Allocate a location monitor resource to the driver. A location monitor
1478 * allows the driver to monitor accesses to a contiguous number of
1479 * addresses on the VME bus.
1481 * Return: Pointer to a VME resource on success or NULL on failure.
1483 struct vme_resource
*vme_lm_request(struct vme_dev
*vdev
)
1485 struct vme_bridge
*bridge
;
1486 struct list_head
*lm_pos
= NULL
;
1487 struct vme_lm_resource
*allocated_lm
= NULL
;
1488 struct vme_lm_resource
*lm
= NULL
;
1489 struct vme_resource
*resource
= NULL
;
1491 bridge
= vdev
->bridge
;
1492 if (bridge
== NULL
) {
1493 printk(KERN_ERR
"Can't find VME bus\n");
1497 /* Loop through LM resources */
1498 list_for_each(lm_pos
, &bridge
->lm_resources
) {
1499 lm
= list_entry(lm_pos
,
1500 struct vme_lm_resource
, list
);
1503 printk(KERN_ERR
"Registered NULL Location Monitor resource\n");
1507 /* Find an unlocked controller */
1508 mutex_lock(&lm
->mtx
);
1509 if (lm
->locked
== 0) {
1511 mutex_unlock(&lm
->mtx
);
1515 mutex_unlock(&lm
->mtx
);
1518 /* Check to see if we found a resource */
1519 if (allocated_lm
== NULL
)
1522 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
1526 resource
->type
= VME_LM
;
1527 resource
->entry
= &allocated_lm
->list
;
1533 mutex_lock(&lm
->mtx
);
1535 mutex_unlock(&lm
->mtx
);
1540 EXPORT_SYMBOL(vme_lm_request
);
1543 * vme_lm_count - Determine number of VME Addresses monitored
1544 * @resource: Pointer to VME location monitor resource.
1546 * The number of contiguous addresses monitored is hardware dependent.
1547 * Return the number of contiguous addresses monitored by the
1550 * Return: Count of addresses monitored or -EINVAL when provided with an
1551 * invalid location monitor resource.
1553 int vme_lm_count(struct vme_resource
*resource
)
1555 struct vme_lm_resource
*lm
;
1557 if (resource
->type
!= VME_LM
) {
1558 printk(KERN_ERR
"Not a Location Monitor resource\n");
1562 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1564 return lm
->monitors
;
1566 EXPORT_SYMBOL(vme_lm_count
);
1569 * vme_lm_set - Configure location monitor
1570 * @resource: Pointer to VME location monitor resource.
1571 * @lm_base: Base address to monitor.
1572 * @aspace: VME address space to monitor.
1573 * @cycle: VME bus cycle type to monitor.
1575 * Set the base address, address space and cycle type of accesses to be
1576 * monitored by the location monitor.
1578 * Return: Zero on success, -EINVAL when provided with an invalid location
1579 * monitor resource or function is not supported. Hardware specific
1580 * errors may also be returned.
1582 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1583 u32 aspace
, u32 cycle
)
1585 struct vme_bridge
*bridge
= find_bridge(resource
);
1586 struct vme_lm_resource
*lm
;
1588 if (resource
->type
!= VME_LM
) {
1589 printk(KERN_ERR
"Not a Location Monitor resource\n");
1593 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1595 if (bridge
->lm_set
== NULL
) {
1596 printk(KERN_ERR
"vme_lm_set not supported\n");
1600 return bridge
->lm_set(lm
, lm_base
, aspace
, cycle
);
1602 EXPORT_SYMBOL(vme_lm_set
);
1605 * vme_lm_get - Retrieve location monitor settings
1606 * @resource: Pointer to VME location monitor resource.
1607 * @lm_base: Pointer used to output the base address monitored.
1608 * @aspace: Pointer used to output the address space monitored.
1609 * @cycle: Pointer used to output the VME bus cycle type monitored.
1611 * Retrieve the base address, address space and cycle type of accesses to
1612 * be monitored by the location monitor.
1614 * Return: Zero on success, -EINVAL when provided with an invalid location
1615 * monitor resource or function is not supported. Hardware specific
1616 * errors may also be returned.
1618 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1619 u32
*aspace
, u32
*cycle
)
1621 struct vme_bridge
*bridge
= find_bridge(resource
);
1622 struct vme_lm_resource
*lm
;
1624 if (resource
->type
!= VME_LM
) {
1625 printk(KERN_ERR
"Not a Location Monitor resource\n");
1629 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1631 if (bridge
->lm_get
== NULL
) {
1632 printk(KERN_ERR
"vme_lm_get not supported\n");
1636 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1638 EXPORT_SYMBOL(vme_lm_get
);
1641 * vme_lm_attach - Provide callback for location monitor address
1642 * @resource: Pointer to VME location monitor resource.
1643 * @monitor: Offset to which callback should be attached.
1644 * @callback: Pointer to callback function called when triggered.
1645 * @data: Generic pointer that will be passed to the callback function.
1647 * Attach a callback to the specificed offset into the location monitors
1648 * monitored addresses. A generic pointer is provided to allow data to be
1649 * passed to the callback when called.
1651 * Return: Zero on success, -EINVAL when provided with an invalid location
1652 * monitor resource or function is not supported. Hardware specific
1653 * errors may also be returned.
1655 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1656 void (*callback
)(void *), void *data
)
1658 struct vme_bridge
*bridge
= find_bridge(resource
);
1659 struct vme_lm_resource
*lm
;
1661 if (resource
->type
!= VME_LM
) {
1662 printk(KERN_ERR
"Not a Location Monitor resource\n");
1666 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1668 if (bridge
->lm_attach
== NULL
) {
1669 printk(KERN_ERR
"vme_lm_attach not supported\n");
1673 return bridge
->lm_attach(lm
, monitor
, callback
, data
);
1675 EXPORT_SYMBOL(vme_lm_attach
);
1678 * vme_lm_detach - Remove callback for location monitor address
1679 * @resource: Pointer to VME location monitor resource.
1680 * @monitor: Offset to which callback should be removed.
1682 * Remove the callback associated with the specificed offset into the
1683 * location monitors monitored addresses.
1685 * Return: Zero on success, -EINVAL when provided with an invalid location
1686 * monitor resource or function is not supported. Hardware specific
1687 * errors may also be returned.
1689 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1691 struct vme_bridge
*bridge
= find_bridge(resource
);
1692 struct vme_lm_resource
*lm
;
1694 if (resource
->type
!= VME_LM
) {
1695 printk(KERN_ERR
"Not a Location Monitor resource\n");
1699 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1701 if (bridge
->lm_detach
== NULL
) {
1702 printk(KERN_ERR
"vme_lm_detach not supported\n");
1706 return bridge
->lm_detach(lm
, monitor
);
1708 EXPORT_SYMBOL(vme_lm_detach
);
1711 * vme_lm_free - Free allocated VME location monitor
1712 * @resource: Pointer to VME location monitor resource.
1714 * Free allocation of a VME location monitor.
1716 * WARNING: This function currently expects that any callbacks that have
1717 * been attached to the location monitor have been removed.
1719 * Return: Zero on success, -EINVAL when provided with an invalid location
1722 void vme_lm_free(struct vme_resource
*resource
)
1724 struct vme_lm_resource
*lm
;
1726 if (resource
->type
!= VME_LM
) {
1727 printk(KERN_ERR
"Not a Location Monitor resource\n");
1731 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1733 mutex_lock(&lm
->mtx
);
1736 * Check to see that there aren't any callbacks still attached, if
1737 * there are we should probably be detaching them!
1742 mutex_unlock(&lm
->mtx
);
1746 EXPORT_SYMBOL(vme_lm_free
);
1749 * vme_slot_num - Retrieve slot ID
1750 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1752 * Retrieve the slot ID associated with the provided VME device.
1754 * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
1755 * or the function is not supported. Hardware specific errors may also
1758 int vme_slot_num(struct vme_dev
*vdev
)
1760 struct vme_bridge
*bridge
;
1762 bridge
= vdev
->bridge
;
1763 if (bridge
== NULL
) {
1764 printk(KERN_ERR
"Can't find VME bus\n");
1768 if (bridge
->slot_get
== NULL
) {
1769 printk(KERN_WARNING
"vme_slot_num not supported\n");
1773 return bridge
->slot_get(bridge
);
1775 EXPORT_SYMBOL(vme_slot_num
);
1778 * vme_bus_num - Retrieve bus number
1779 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1781 * Retrieve the bus enumeration associated with the provided VME device.
1783 * Return: The bus number on success, -EINVAL if VME bridge cannot be
1786 int vme_bus_num(struct vme_dev
*vdev
)
1788 struct vme_bridge
*bridge
;
1790 bridge
= vdev
->bridge
;
1791 if (bridge
== NULL
) {
1792 pr_err("Can't find VME bus\n");
1798 EXPORT_SYMBOL(vme_bus_num
);
1800 /* - Bridge Registration --------------------------------------------------- */
1802 static void vme_dev_release(struct device
*dev
)
1804 kfree(dev_to_vme_dev(dev
));
1807 /* Common bridge initialization */
1808 struct vme_bridge
*vme_init_bridge(struct vme_bridge
*bridge
)
1810 INIT_LIST_HEAD(&bridge
->vme_error_handlers
);
1811 INIT_LIST_HEAD(&bridge
->master_resources
);
1812 INIT_LIST_HEAD(&bridge
->slave_resources
);
1813 INIT_LIST_HEAD(&bridge
->dma_resources
);
1814 INIT_LIST_HEAD(&bridge
->lm_resources
);
1815 mutex_init(&bridge
->irq_mtx
);
1819 EXPORT_SYMBOL(vme_init_bridge
);
1821 int vme_register_bridge(struct vme_bridge
*bridge
)
1826 mutex_lock(&vme_buses_lock
);
1827 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1828 if ((vme_bus_numbers
& (1 << i
)) == 0) {
1829 vme_bus_numbers
|= (1 << i
);
1831 INIT_LIST_HEAD(&bridge
->devices
);
1832 list_add_tail(&bridge
->bus_list
, &vme_bus_list
);
1837 mutex_unlock(&vme_buses_lock
);
1841 EXPORT_SYMBOL(vme_register_bridge
);
1843 void vme_unregister_bridge(struct vme_bridge
*bridge
)
1845 struct vme_dev
*vdev
;
1846 struct vme_dev
*tmp
;
1848 mutex_lock(&vme_buses_lock
);
1849 vme_bus_numbers
&= ~(1 << bridge
->num
);
1850 list_for_each_entry_safe(vdev
, tmp
, &bridge
->devices
, bridge_list
) {
1851 list_del(&vdev
->drv_list
);
1852 list_del(&vdev
->bridge_list
);
1853 device_unregister(&vdev
->dev
);
1855 list_del(&bridge
->bus_list
);
1856 mutex_unlock(&vme_buses_lock
);
1858 EXPORT_SYMBOL(vme_unregister_bridge
);
1860 /* - Driver Registration --------------------------------------------------- */
1862 static int __vme_register_driver_bus(struct vme_driver
*drv
,
1863 struct vme_bridge
*bridge
, unsigned int ndevs
)
1867 struct vme_dev
*vdev
;
1868 struct vme_dev
*tmp
;
1870 for (i
= 0; i
< ndevs
; i
++) {
1871 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
1877 vdev
->bridge
= bridge
;
1878 vdev
->dev
.platform_data
= drv
;
1879 vdev
->dev
.release
= vme_dev_release
;
1880 vdev
->dev
.parent
= bridge
->parent
;
1881 vdev
->dev
.bus
= &vme_bus_type
;
1882 dev_set_name(&vdev
->dev
, "%s.%u-%u", drv
->name
, bridge
->num
,
1885 err
= device_register(&vdev
->dev
);
1889 if (vdev
->dev
.platform_data
) {
1890 list_add_tail(&vdev
->drv_list
, &drv
->devices
);
1891 list_add_tail(&vdev
->bridge_list
, &bridge
->devices
);
1893 device_unregister(&vdev
->dev
);
1898 put_device(&vdev
->dev
);
1901 list_for_each_entry_safe(vdev
, tmp
, &drv
->devices
, drv_list
) {
1902 list_del(&vdev
->drv_list
);
1903 list_del(&vdev
->bridge_list
);
1904 device_unregister(&vdev
->dev
);
1909 static int __vme_register_driver(struct vme_driver
*drv
, unsigned int ndevs
)
1911 struct vme_bridge
*bridge
;
1914 mutex_lock(&vme_buses_lock
);
1915 list_for_each_entry(bridge
, &vme_bus_list
, bus_list
) {
1917 * This cannot cause trouble as we already have vme_buses_lock
1918 * and if the bridge is removed, it will have to go through
1919 * vme_unregister_bridge() to do it (which calls remove() on
1920 * the bridge which in turn tries to acquire vme_buses_lock and
1921 * will have to wait).
1923 err
= __vme_register_driver_bus(drv
, bridge
, ndevs
);
1927 mutex_unlock(&vme_buses_lock
);
1932 * vme_register_driver - Register a VME driver
1933 * @drv: Pointer to VME driver structure to register.
1934 * @ndevs: Maximum number of devices to allow to be enumerated.
1936 * Register a VME device driver with the VME subsystem.
1938 * Return: Zero on success, error value on registration failure.
1940 int vme_register_driver(struct vme_driver
*drv
, unsigned int ndevs
)
1944 drv
->driver
.name
= drv
->name
;
1945 drv
->driver
.bus
= &vme_bus_type
;
1946 INIT_LIST_HEAD(&drv
->devices
);
1948 err
= driver_register(&drv
->driver
);
1952 err
= __vme_register_driver(drv
, ndevs
);
1954 driver_unregister(&drv
->driver
);
1958 EXPORT_SYMBOL(vme_register_driver
);
1961 * vme_unregister_driver - Unregister a VME driver
1962 * @drv: Pointer to VME driver structure to unregister.
1964 * Unregister a VME device driver from the VME subsystem.
1966 void vme_unregister_driver(struct vme_driver
*drv
)
1968 struct vme_dev
*dev
, *dev_tmp
;
1970 mutex_lock(&vme_buses_lock
);
1971 list_for_each_entry_safe(dev
, dev_tmp
, &drv
->devices
, drv_list
) {
1972 list_del(&dev
->drv_list
);
1973 list_del(&dev
->bridge_list
);
1974 device_unregister(&dev
->dev
);
1976 mutex_unlock(&vme_buses_lock
);
1978 driver_unregister(&drv
->driver
);
1980 EXPORT_SYMBOL(vme_unregister_driver
);
1982 /* - Bus Registration ------------------------------------------------------ */
1984 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1986 struct vme_driver
*vme_drv
;
1988 vme_drv
= container_of(drv
, struct vme_driver
, driver
);
1990 if (dev
->platform_data
== vme_drv
) {
1991 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
1993 if (vme_drv
->match
&& vme_drv
->match(vdev
))
1996 dev
->platform_data
= NULL
;
2001 static int vme_bus_probe(struct device
*dev
)
2003 int retval
= -ENODEV
;
2004 struct vme_driver
*driver
;
2005 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
2007 driver
= dev
->platform_data
;
2009 if (driver
->probe
!= NULL
)
2010 retval
= driver
->probe(vdev
);
2015 static int vme_bus_remove(struct device
*dev
)
2017 int retval
= -ENODEV
;
2018 struct vme_driver
*driver
;
2019 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
2021 driver
= dev
->platform_data
;
2023 if (driver
->remove
!= NULL
)
2024 retval
= driver
->remove(vdev
);
2029 struct bus_type vme_bus_type
= {
2031 .match
= vme_bus_match
,
2032 .probe
= vme_bus_probe
,
2033 .remove
= vme_bus_remove
,
2035 EXPORT_SYMBOL(vme_bus_type
);
2037 static int __init
vme_init(void)
2039 return bus_register(&vme_bus_type
);
2041 subsys_initcall(vme_init
);