]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/vme/devices/vme_user.c
docs: fix locations of several documents that got moved
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vme / devices / vme_user.c
1 /*
2 * VMEbus User access driver
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/atomic.h>
21 #include <linux/cdev.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/kernel.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h>
32 #include <linux/pci.h>
33 #include <linux/mutex.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/syscalls.h>
37 #include <linux/types.h>
38
39 #include <linux/io.h>
40 #include <linux/uaccess.h>
41 #include <linux/vme.h>
42
43 #include "vme_user.h"
44
45 static const char driver_name[] = "vme_user";
46
47 static int bus[VME_USER_BUS_MAX];
48 static unsigned int bus_num;
49
50 /* Currently Documentation/admin-guide/devices.rst defines the following for VME:
51 *
52 * 221 char VME bus
53 * 0 = /dev/bus/vme/m0 First master image
54 * 1 = /dev/bus/vme/m1 Second master image
55 * 2 = /dev/bus/vme/m2 Third master image
56 * 3 = /dev/bus/vme/m3 Fourth master image
57 * 4 = /dev/bus/vme/s0 First slave image
58 * 5 = /dev/bus/vme/s1 Second slave image
59 * 6 = /dev/bus/vme/s2 Third slave image
60 * 7 = /dev/bus/vme/s3 Fourth slave image
61 * 8 = /dev/bus/vme/ctl Control
62 *
63 * It is expected that all VME bus drivers will use the
64 * same interface. For interface documentation see
65 * http://www.vmelinux.org/.
66 *
67 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
69 * We'll run with this for now as far as possible, however it probably makes
70 * sense to get rid of the old mappings and just do everything dynamically.
71 *
72 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73 * defined above and try to support at least some of the interface from
74 * http://www.vmelinux.org/ as an alternative the driver can be written
75 * providing a saner interface later.
76 *
77 * The vmelinux.org driver never supported slave images, the devices reserved
78 * for slaves were repurposed to support all 8 master images on the UniverseII!
79 * We shall support 4 masters and 4 slaves with this driver.
80 */
81 #define VME_MAJOR 221 /* VME Major Device Number */
82 #define VME_DEVS 9 /* Number of dev entries */
83
84 #define MASTER_MINOR 0
85 #define MASTER_MAX 3
86 #define SLAVE_MINOR 4
87 #define SLAVE_MAX 7
88 #define CONTROL_MINOR 8
89
90 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
91
92 /*
93 * Structure to handle image related parameters.
94 */
95 struct image_desc {
96 void *kern_buf; /* Buffer address in kernel space */
97 dma_addr_t pci_buf; /* Buffer address in PCI address space */
98 unsigned long long size_buf; /* Buffer size */
99 struct mutex mutex; /* Mutex for locking image */
100 struct device *device; /* Sysfs device */
101 struct vme_resource *resource; /* VME resource */
102 int mmap_count; /* Number of current mmap's */
103 };
104
105 static struct image_desc image[VME_DEVS];
106
107 static struct cdev *vme_user_cdev; /* Character device */
108 static struct class *vme_user_sysfs_class; /* Sysfs class */
109 static struct vme_dev *vme_user_bridge; /* Pointer to user device */
110
111 static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
112 MASTER_MINOR, MASTER_MINOR,
113 SLAVE_MINOR, SLAVE_MINOR,
114 SLAVE_MINOR, SLAVE_MINOR,
115 CONTROL_MINOR
116 };
117
118 struct vme_user_vma_priv {
119 unsigned int minor;
120 atomic_t refcnt;
121 };
122
123 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
124 loff_t *ppos)
125 {
126 ssize_t copied = 0;
127
128 if (count > image[minor].size_buf)
129 count = image[minor].size_buf;
130
131 copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
132 count, *ppos);
133 if (copied < 0)
134 return (int)copied;
135
136 if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
137 return -EFAULT;
138
139 return copied;
140 }
141
142 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
143 size_t count, loff_t *ppos)
144 {
145 if (count > image[minor].size_buf)
146 count = image[minor].size_buf;
147
148 if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
149 return -EFAULT;
150
151 return vme_master_write(image[minor].resource, image[minor].kern_buf,
152 count, *ppos);
153 }
154
155 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
156 size_t count, loff_t *ppos)
157 {
158 void *image_ptr;
159
160 image_ptr = image[minor].kern_buf + *ppos;
161 if (__copy_to_user(buf, image_ptr, (unsigned long)count))
162 return -EFAULT;
163
164 return count;
165 }
166
167 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
168 size_t count, loff_t *ppos)
169 {
170 void *image_ptr;
171
172 image_ptr = image[minor].kern_buf + *ppos;
173 if (__copy_from_user(image_ptr, buf, (unsigned long)count))
174 return -EFAULT;
175
176 return count;
177 }
178
179 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
180 loff_t *ppos)
181 {
182 unsigned int minor = MINOR(file_inode(file)->i_rdev);
183 ssize_t retval;
184 size_t image_size;
185
186 if (minor == CONTROL_MINOR)
187 return 0;
188
189 mutex_lock(&image[minor].mutex);
190
191 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
192 image_size = vme_get_size(image[minor].resource);
193
194 /* Ensure we are starting at a valid location */
195 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
196 mutex_unlock(&image[minor].mutex);
197 return 0;
198 }
199
200 /* Ensure not reading past end of the image */
201 if (*ppos + count > image_size)
202 count = image_size - *ppos;
203
204 switch (type[minor]) {
205 case MASTER_MINOR:
206 retval = resource_to_user(minor, buf, count, ppos);
207 break;
208 case SLAVE_MINOR:
209 retval = buffer_to_user(minor, buf, count, ppos);
210 break;
211 default:
212 retval = -EINVAL;
213 }
214
215 mutex_unlock(&image[minor].mutex);
216 if (retval > 0)
217 *ppos += retval;
218
219 return retval;
220 }
221
222 static ssize_t vme_user_write(struct file *file, const char __user *buf,
223 size_t count, loff_t *ppos)
224 {
225 unsigned int minor = MINOR(file_inode(file)->i_rdev);
226 ssize_t retval;
227 size_t image_size;
228
229 if (minor == CONTROL_MINOR)
230 return 0;
231
232 mutex_lock(&image[minor].mutex);
233
234 image_size = vme_get_size(image[minor].resource);
235
236 /* Ensure we are starting at a valid location */
237 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
238 mutex_unlock(&image[minor].mutex);
239 return 0;
240 }
241
242 /* Ensure not reading past end of the image */
243 if (*ppos + count > image_size)
244 count = image_size - *ppos;
245
246 switch (type[minor]) {
247 case MASTER_MINOR:
248 retval = resource_from_user(minor, buf, count, ppos);
249 break;
250 case SLAVE_MINOR:
251 retval = buffer_from_user(minor, buf, count, ppos);
252 break;
253 default:
254 retval = -EINVAL;
255 }
256
257 mutex_unlock(&image[minor].mutex);
258
259 if (retval > 0)
260 *ppos += retval;
261
262 return retval;
263 }
264
265 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
266 {
267 unsigned int minor = MINOR(file_inode(file)->i_rdev);
268 size_t image_size;
269 loff_t res;
270
271 switch (type[minor]) {
272 case MASTER_MINOR:
273 case SLAVE_MINOR:
274 mutex_lock(&image[minor].mutex);
275 image_size = vme_get_size(image[minor].resource);
276 res = fixed_size_llseek(file, off, whence, image_size);
277 mutex_unlock(&image[minor].mutex);
278 return res;
279 }
280
281 return -EINVAL;
282 }
283
284 /*
285 * The ioctls provided by the old VME access method (the one at vmelinux.org)
286 * are most certainly wrong as the effectively push the registers layout
287 * through to user space. Given that the VME core can handle multiple bridges,
288 * with different register layouts this is most certainly not the way to go.
289 *
290 * We aren't using the structures defined in the Motorola driver either - these
291 * are also quite low level, however we should use the definitions that have
292 * already been defined.
293 */
294 static int vme_user_ioctl(struct inode *inode, struct file *file,
295 unsigned int cmd, unsigned long arg)
296 {
297 struct vme_master master;
298 struct vme_slave slave;
299 struct vme_irq_id irq_req;
300 unsigned long copied;
301 unsigned int minor = MINOR(inode->i_rdev);
302 int retval;
303 dma_addr_t pci_addr;
304 void __user *argp = (void __user *)arg;
305
306 switch (type[minor]) {
307 case CONTROL_MINOR:
308 switch (cmd) {
309 case VME_IRQ_GEN:
310 copied = copy_from_user(&irq_req, argp,
311 sizeof(irq_req));
312 if (copied) {
313 pr_warn("Partial copy from userspace\n");
314 return -EFAULT;
315 }
316
317 return vme_irq_generate(vme_user_bridge,
318 irq_req.level,
319 irq_req.statid);
320 }
321 break;
322 case MASTER_MINOR:
323 switch (cmd) {
324 case VME_GET_MASTER:
325 memset(&master, 0, sizeof(master));
326
327 /* XXX We do not want to push aspace, cycle and width
328 * to userspace as they are
329 */
330 retval = vme_master_get(image[minor].resource,
331 &master.enable,
332 &master.vme_addr,
333 &master.size, &master.aspace,
334 &master.cycle, &master.dwidth);
335
336 copied = copy_to_user(argp, &master,
337 sizeof(master));
338 if (copied) {
339 pr_warn("Partial copy to userspace\n");
340 return -EFAULT;
341 }
342
343 return retval;
344
345 case VME_SET_MASTER:
346
347 if (image[minor].mmap_count != 0) {
348 pr_warn("Can't adjust mapped window\n");
349 return -EPERM;
350 }
351
352 copied = copy_from_user(&master, argp, sizeof(master));
353 if (copied) {
354 pr_warn("Partial copy from userspace\n");
355 return -EFAULT;
356 }
357
358 /* XXX We do not want to push aspace, cycle and width
359 * to userspace as they are
360 */
361 return vme_master_set(image[minor].resource,
362 master.enable, master.vme_addr, master.size,
363 master.aspace, master.cycle, master.dwidth);
364
365 break;
366 }
367 break;
368 case SLAVE_MINOR:
369 switch (cmd) {
370 case VME_GET_SLAVE:
371 memset(&slave, 0, sizeof(slave));
372
373 /* XXX We do not want to push aspace, cycle and width
374 * to userspace as they are
375 */
376 retval = vme_slave_get(image[minor].resource,
377 &slave.enable, &slave.vme_addr,
378 &slave.size, &pci_addr,
379 &slave.aspace, &slave.cycle);
380
381 copied = copy_to_user(argp, &slave,
382 sizeof(slave));
383 if (copied) {
384 pr_warn("Partial copy to userspace\n");
385 return -EFAULT;
386 }
387
388 return retval;
389
390 case VME_SET_SLAVE:
391
392 copied = copy_from_user(&slave, argp, sizeof(slave));
393 if (copied) {
394 pr_warn("Partial copy from userspace\n");
395 return -EFAULT;
396 }
397
398 /* XXX We do not want to push aspace, cycle and width
399 * to userspace as they are
400 */
401 return vme_slave_set(image[minor].resource,
402 slave.enable, slave.vme_addr, slave.size,
403 image[minor].pci_buf, slave.aspace,
404 slave.cycle);
405
406 break;
407 }
408 break;
409 }
410
411 return -EINVAL;
412 }
413
414 static long
415 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
416 {
417 int ret;
418 struct inode *inode = file_inode(file);
419 unsigned int minor = MINOR(inode->i_rdev);
420
421 mutex_lock(&image[minor].mutex);
422 ret = vme_user_ioctl(inode, file, cmd, arg);
423 mutex_unlock(&image[minor].mutex);
424
425 return ret;
426 }
427
428 static void vme_user_vm_open(struct vm_area_struct *vma)
429 {
430 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
431
432 atomic_inc(&vma_priv->refcnt);
433 }
434
435 static void vme_user_vm_close(struct vm_area_struct *vma)
436 {
437 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
438 unsigned int minor = vma_priv->minor;
439
440 if (!atomic_dec_and_test(&vma_priv->refcnt))
441 return;
442
443 mutex_lock(&image[minor].mutex);
444 image[minor].mmap_count--;
445 mutex_unlock(&image[minor].mutex);
446
447 kfree(vma_priv);
448 }
449
450 static const struct vm_operations_struct vme_user_vm_ops = {
451 .open = vme_user_vm_open,
452 .close = vme_user_vm_close,
453 };
454
455 static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
456 {
457 int err;
458 struct vme_user_vma_priv *vma_priv;
459
460 mutex_lock(&image[minor].mutex);
461
462 err = vme_master_mmap(image[minor].resource, vma);
463 if (err) {
464 mutex_unlock(&image[minor].mutex);
465 return err;
466 }
467
468 vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
469 if (!vma_priv) {
470 mutex_unlock(&image[minor].mutex);
471 return -ENOMEM;
472 }
473
474 vma_priv->minor = minor;
475 atomic_set(&vma_priv->refcnt, 1);
476 vma->vm_ops = &vme_user_vm_ops;
477 vma->vm_private_data = vma_priv;
478
479 image[minor].mmap_count++;
480
481 mutex_unlock(&image[minor].mutex);
482
483 return 0;
484 }
485
486 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
487 {
488 unsigned int minor = MINOR(file_inode(file)->i_rdev);
489
490 if (type[minor] == MASTER_MINOR)
491 return vme_user_master_mmap(minor, vma);
492
493 return -ENODEV;
494 }
495
496 static const struct file_operations vme_user_fops = {
497 .read = vme_user_read,
498 .write = vme_user_write,
499 .llseek = vme_user_llseek,
500 .unlocked_ioctl = vme_user_unlocked_ioctl,
501 .compat_ioctl = vme_user_unlocked_ioctl,
502 .mmap = vme_user_mmap,
503 };
504
505 static int vme_user_match(struct vme_dev *vdev)
506 {
507 int i;
508
509 int cur_bus = vme_bus_num(vdev);
510 int cur_slot = vme_slot_num(vdev);
511
512 for (i = 0; i < bus_num; i++)
513 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
514 return 1;
515
516 return 0;
517 }
518
519 /*
520 * In this simple access driver, the old behaviour is being preserved as much
521 * as practical. We will therefore reserve the buffers and request the images
522 * here so that we don't have to do it later.
523 */
524 static int vme_user_probe(struct vme_dev *vdev)
525 {
526 int i, err;
527 char *name;
528
529 /* Save pointer to the bridge device */
530 if (vme_user_bridge) {
531 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
532 err = -EINVAL;
533 goto err_dev;
534 }
535 vme_user_bridge = vdev;
536
537 /* Initialise descriptors */
538 for (i = 0; i < VME_DEVS; i++) {
539 image[i].kern_buf = NULL;
540 image[i].pci_buf = 0;
541 mutex_init(&image[i].mutex);
542 image[i].device = NULL;
543 image[i].resource = NULL;
544 }
545
546 /* Assign major and minor numbers for the driver */
547 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
548 driver_name);
549 if (err) {
550 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
551 VME_MAJOR);
552 goto err_region;
553 }
554
555 /* Register the driver as a char device */
556 vme_user_cdev = cdev_alloc();
557 if (!vme_user_cdev) {
558 err = -ENOMEM;
559 goto err_char;
560 }
561 vme_user_cdev->ops = &vme_user_fops;
562 vme_user_cdev->owner = THIS_MODULE;
563 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
564 if (err)
565 goto err_char;
566
567 /* Request slave resources and allocate buffers (128kB wide) */
568 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
569 /* XXX Need to properly request attributes */
570 /* For ca91cx42 bridge there are only two slave windows
571 * supporting A16 addressing, so we request A24 supported
572 * by all windows.
573 */
574 image[i].resource = vme_slave_request(vme_user_bridge,
575 VME_A24, VME_SCT);
576 if (!image[i].resource) {
577 dev_warn(&vdev->dev,
578 "Unable to allocate slave resource\n");
579 err = -ENOMEM;
580 goto err_slave;
581 }
582 image[i].size_buf = PCI_BUF_SIZE;
583 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
584 image[i].size_buf, &image[i].pci_buf);
585 if (!image[i].kern_buf) {
586 dev_warn(&vdev->dev,
587 "Unable to allocate memory for buffer\n");
588 image[i].pci_buf = 0;
589 vme_slave_free(image[i].resource);
590 err = -ENOMEM;
591 goto err_slave;
592 }
593 }
594
595 /*
596 * Request master resources allocate page sized buffers for small
597 * reads and writes
598 */
599 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
600 /* XXX Need to properly request attributes */
601 image[i].resource = vme_master_request(vme_user_bridge,
602 VME_A32, VME_SCT, VME_D32);
603 if (!image[i].resource) {
604 dev_warn(&vdev->dev,
605 "Unable to allocate master resource\n");
606 err = -ENOMEM;
607 goto err_master;
608 }
609 image[i].size_buf = PCI_BUF_SIZE;
610 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
611 if (!image[i].kern_buf) {
612 err = -ENOMEM;
613 vme_master_free(image[i].resource);
614 goto err_master;
615 }
616 }
617
618 /* Create sysfs entries - on udev systems this creates the dev files */
619 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
620 if (IS_ERR(vme_user_sysfs_class)) {
621 dev_err(&vdev->dev, "Error creating vme_user class.\n");
622 err = PTR_ERR(vme_user_sysfs_class);
623 goto err_class;
624 }
625
626 /* Add sysfs Entries */
627 for (i = 0; i < VME_DEVS; i++) {
628 int num;
629
630 switch (type[i]) {
631 case MASTER_MINOR:
632 name = "bus/vme/m%d";
633 break;
634 case CONTROL_MINOR:
635 name = "bus/vme/ctl";
636 break;
637 case SLAVE_MINOR:
638 name = "bus/vme/s%d";
639 break;
640 default:
641 err = -EINVAL;
642 goto err_sysfs;
643 }
644
645 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
646 image[i].device = device_create(vme_user_sysfs_class, NULL,
647 MKDEV(VME_MAJOR, i), NULL, name, num);
648 if (IS_ERR(image[i].device)) {
649 dev_info(&vdev->dev, "Error creating sysfs device\n");
650 err = PTR_ERR(image[i].device);
651 goto err_sysfs;
652 }
653 }
654
655 return 0;
656
657 err_sysfs:
658 while (i > 0) {
659 i--;
660 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
661 }
662 class_destroy(vme_user_sysfs_class);
663
664 /* Ensure counter set correcty to unalloc all master windows */
665 i = MASTER_MAX + 1;
666 err_master:
667 while (i > MASTER_MINOR) {
668 i--;
669 kfree(image[i].kern_buf);
670 vme_master_free(image[i].resource);
671 }
672
673 /*
674 * Ensure counter set correcty to unalloc all slave windows and buffers
675 */
676 i = SLAVE_MAX + 1;
677 err_slave:
678 while (i > SLAVE_MINOR) {
679 i--;
680 vme_free_consistent(image[i].resource, image[i].size_buf,
681 image[i].kern_buf, image[i].pci_buf);
682 vme_slave_free(image[i].resource);
683 }
684 err_class:
685 cdev_del(vme_user_cdev);
686 err_char:
687 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
688 err_region:
689 err_dev:
690 return err;
691 }
692
693 static int vme_user_remove(struct vme_dev *dev)
694 {
695 int i;
696
697 /* Remove sysfs Entries */
698 for (i = 0; i < VME_DEVS; i++) {
699 mutex_destroy(&image[i].mutex);
700 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
701 }
702 class_destroy(vme_user_sysfs_class);
703
704 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
705 kfree(image[i].kern_buf);
706 vme_master_free(image[i].resource);
707 }
708
709 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
710 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
711 vme_free_consistent(image[i].resource, image[i].size_buf,
712 image[i].kern_buf, image[i].pci_buf);
713 vme_slave_free(image[i].resource);
714 }
715
716 /* Unregister device driver */
717 cdev_del(vme_user_cdev);
718
719 /* Unregiser the major and minor device numbers */
720 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
721
722 return 0;
723 }
724
725 static struct vme_driver vme_user_driver = {
726 .name = driver_name,
727 .match = vme_user_match,
728 .probe = vme_user_probe,
729 .remove = vme_user_remove,
730 };
731
732 static int __init vme_user_init(void)
733 {
734 int retval = 0;
735
736 pr_info("VME User Space Access Driver\n");
737
738 if (bus_num == 0) {
739 pr_err("No cards, skipping registration\n");
740 retval = -ENODEV;
741 goto err_nocard;
742 }
743
744 /* Let's start by supporting one bus, we can support more than one
745 * in future revisions if that ever becomes necessary.
746 */
747 if (bus_num > VME_USER_BUS_MAX) {
748 pr_err("Driver only able to handle %d buses\n",
749 VME_USER_BUS_MAX);
750 bus_num = VME_USER_BUS_MAX;
751 }
752
753 /*
754 * Here we just register the maximum number of devices we can and
755 * leave vme_user_match() to allow only 1 to go through to probe().
756 * This way, if we later want to allow multiple user access devices,
757 * we just change the code in vme_user_match().
758 */
759 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
760 if (retval)
761 goto err_reg;
762
763 return retval;
764
765 err_reg:
766 err_nocard:
767 return retval;
768 }
769
770 static void __exit vme_user_exit(void)
771 {
772 vme_unregister_driver(&vme_user_driver);
773 }
774
775 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
776 module_param_array(bus, int, &bus_num, 0000);
777
778 MODULE_DESCRIPTION("VME User Space Access Driver");
779 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
780 MODULE_LICENSE("GPL");
781
782 module_init(vme_user_init);
783 module_exit(vme_user_exit);