]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/staging/vme/devices/vme_user.c
Merge tag 'pinctrl-v4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vme / devices / vme_user.c
CommitLineData
f00a86d9
MW
1/*
2 * VMEbus User access driver
3 *
66bd8db5
MW
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
f00a86d9
MW
6 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
0093e5f8
YT
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
c74a804f 20#include <linux/atomic.h>
f00a86d9
MW
21#include <linux/cdev.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/pagemap.h>
32#include <linux/pci.h>
ecb3b80f 33#include <linux/mutex.h>
5a0e3ad6 34#include <linux/slab.h>
f00a86d9
MW
35#include <linux/spinlock.h>
36#include <linux/syscalls.h>
37#include <linux/types.h>
f00a86d9 38
45f9f018
NC
39#include <linux/io.h>
40#include <linux/uaccess.h>
db3b9e99 41#include <linux/vme.h>
f00a86d9 42
f00a86d9
MW
43#include "vme_user.h"
44
584721ca 45static const char driver_name[] = "vme_user";
238add52 46
0a4b6b02 47static int bus[VME_USER_BUS_MAX];
c9492318 48static unsigned int bus_num;
238add52 49
f00a86d9
MW
50/* Currently Documentation/devices.txt defines the following for VME:
51 *
52 * 221 char VME bus
45f9f018
NC
53 * 0 = /dev/bus/vme/m0 First master image
54 * 1 = /dev/bus/vme/m1 Second master image
55 * 2 = /dev/bus/vme/m2 Third master image
56 * 3 = /dev/bus/vme/m3 Fourth master image
57 * 4 = /dev/bus/vme/s0 First slave image
58 * 5 = /dev/bus/vme/s1 Second slave image
59 * 6 = /dev/bus/vme/s2 Third slave image
60 * 7 = /dev/bus/vme/s3 Fourth slave image
61 * 8 = /dev/bus/vme/ctl Control
f00a86d9 62 *
45f9f018
NC
63 * It is expected that all VME bus drivers will use the
64 * same interface. For interface documentation see
65 * http://www.vmelinux.org/.
f00a86d9
MW
66 *
67 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
95605332 69 * We'll run with this for now as far as possible, however it probably makes
f00a86d9
MW
70 * sense to get rid of the old mappings and just do everything dynamically.
71 *
72 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73 * defined above and try to support at least some of the interface from
95605332
JM
74 * http://www.vmelinux.org/ as an alternative the driver can be written
75 * providing a saner interface later.
238add52
MW
76 *
77 * The vmelinux.org driver never supported slave images, the devices reserved
78 * for slaves were repurposed to support all 8 master images on the UniverseII!
79 * We shall support 4 masters and 4 slaves with this driver.
f00a86d9
MW
80 */
81#define VME_MAJOR 221 /* VME Major Device Number */
82#define VME_DEVS 9 /* Number of dev entries */
83
84#define MASTER_MINOR 0
85#define MASTER_MAX 3
86#define SLAVE_MINOR 4
87#define SLAVE_MAX 7
88#define CONTROL_MINOR 8
89
90#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
91
92/*
93 * Structure to handle image related parameters.
94 */
584721ca 95struct image_desc {
0a81a0f7 96 void *kern_buf; /* Buffer address in kernel space */
f00a86d9
MW
97 dma_addr_t pci_buf; /* Buffer address in PCI address space */
98 unsigned long long size_buf; /* Buffer size */
ecb3b80f 99 struct mutex mutex; /* Mutex for locking image */
f00a86d9
MW
100 struct device *device; /* Sysfs device */
101 struct vme_resource *resource; /* VME resource */
102 int users; /* Number of current users */
c74a804f 103 int mmap_count; /* Number of current mmap's */
584721ca
VB
104};
105static struct image_desc image[VME_DEVS];
f00a86d9 106
584721ca 107struct driver_stats {
f00a86d9
MW
108 unsigned long reads;
109 unsigned long writes;
110 unsigned long ioctls;
111 unsigned long irqs;
112 unsigned long berrs;
f827c165 113 unsigned long dmaerrors;
f00a86d9
MW
114 unsigned long timeouts;
115 unsigned long external;
584721ca
VB
116};
117static struct driver_stats statistics;
f00a86d9 118
b9cc2934
EC
119static struct cdev *vme_user_cdev; /* Character device */
120static struct class *vme_user_sysfs_class; /* Sysfs class */
8f966dc4 121static struct vme_dev *vme_user_bridge; /* Pointer to user device */
f00a86d9 122
f00a86d9
MW
123
124static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
125 MASTER_MINOR, MASTER_MINOR,
126 SLAVE_MINOR, SLAVE_MINOR,
127 SLAVE_MINOR, SLAVE_MINOR,
128 CONTROL_MINOR
129 };
130
131
132static int vme_user_open(struct inode *, struct file *);
133static int vme_user_release(struct inode *, struct file *);
1a85f207
EC
134static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
135static ssize_t vme_user_write(struct file *, const char __user *, size_t,
136 loff_t *);
f00a86d9 137static loff_t vme_user_llseek(struct file *, loff_t, int);
b1f2ac07 138static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
c74a804f
DK
139static int vme_user_mmap(struct file *file, struct vm_area_struct *vma);
140
141static void vme_user_vm_open(struct vm_area_struct *vma);
142static void vme_user_vm_close(struct vm_area_struct *vma);
f00a86d9 143
5d6abf37 144static int vme_user_match(struct vme_dev *);
d7e530d2 145static int vme_user_probe(struct vme_dev *);
f21a8247 146static int vme_user_remove(struct vme_dev *);
f00a86d9 147
584721ca 148static const struct file_operations vme_user_fops = {
45f9f018
NC
149 .open = vme_user_open,
150 .release = vme_user_release,
151 .read = vme_user_read,
152 .write = vme_user_write,
153 .llseek = vme_user_llseek,
154 .unlocked_ioctl = vme_user_unlocked_ioctl,
89b1cc2d 155 .compat_ioctl = vme_user_unlocked_ioctl,
c74a804f
DK
156 .mmap = vme_user_mmap,
157};
158
159struct vme_user_vma_priv {
160 unsigned int minor;
161 atomic_t refcnt;
162};
163
164static const struct vm_operations_struct vme_user_vm_ops = {
165 .open = vme_user_vm_open,
166 .close = vme_user_vm_close,
f00a86d9
MW
167};
168
169
170/*
171 * Reset all the statistic counters
172 */
173static void reset_counters(void)
174{
45f9f018
NC
175 statistics.reads = 0;
176 statistics.writes = 0;
177 statistics.ioctls = 0;
178 statistics.irqs = 0;
179 statistics.berrs = 0;
f827c165 180 statistics.dmaerrors = 0;
45f9f018 181 statistics.timeouts = 0;
f00a86d9
MW
182}
183
f00a86d9
MW
184static int vme_user_open(struct inode *inode, struct file *file)
185{
186 int err;
187 unsigned int minor = MINOR(inode->i_rdev);
188
ecb3b80f 189 mutex_lock(&image[minor].mutex);
05614fbf
VB
190 /* Allow device to be opened if a resource is needed and allocated. */
191 if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
0093e5f8 192 pr_err("No resources allocated for device\n");
f00a86d9
MW
193 err = -EINVAL;
194 goto err_res;
195 }
196
197 /* Increment user count */
198 image[minor].users++;
199
ecb3b80f 200 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
201
202 return 0;
203
204err_res:
ecb3b80f 205 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
206
207 return err;
208}
209
210static int vme_user_release(struct inode *inode, struct file *file)
211{
212 unsigned int minor = MINOR(inode->i_rdev);
213
ecb3b80f 214 mutex_lock(&image[minor].mutex);
f00a86d9
MW
215
216 /* Decrement user count */
217 image[minor].users--;
218
ecb3b80f 219 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
220
221 return 0;
222}
223
224/*
225 * We are going ot alloc a page during init per window for small transfers.
226 * Small transfers will go VME -> buffer -> user space. Larger (more than a
227 * page) transfers will lock the user space buffer into memory and then
228 * transfer the data directly into the user space buffers.
229 */
230static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
231 loff_t *ppos)
232{
233 ssize_t retval;
234 ssize_t copied = 0;
235
236 if (count <= image[minor].size_buf) {
237 /* We copy to kernel buffer */
238 copied = vme_master_read(image[minor].resource,
239 image[minor].kern_buf, count, *ppos);
45f9f018 240 if (copied < 0)
f00a86d9 241 return (int)copied;
f00a86d9
MW
242
243 retval = __copy_to_user(buf, image[minor].kern_buf,
244 (unsigned long)copied);
245 if (retval != 0) {
246 copied = (copied - retval);
0093e5f8 247 pr_info("User copy failed\n");
f00a86d9
MW
248 return -EINVAL;
249 }
250
251 } else {
252 /* XXX Need to write this */
0093e5f8 253 pr_info("Currently don't support large transfers\n");
f00a86d9
MW
254 /* Map in pages from userspace */
255
256 /* Call vme_master_read to do the transfer */
257 return -EINVAL;
258 }
259
260 return copied;
261}
262
263/*
95605332 264 * We are going to alloc a page during init per window for small transfers.
f00a86d9
MW
265 * Small transfers will go user space -> buffer -> VME. Larger (more than a
266 * page) transfers will lock the user space buffer into memory and then
267 * transfer the data directly from the user space buffers out to VME.
268 */
1a85f207 269static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
f00a86d9
MW
270 size_t count, loff_t *ppos)
271{
272 ssize_t retval;
273 ssize_t copied = 0;
274
275 if (count <= image[minor].size_buf) {
276 retval = __copy_from_user(image[minor].kern_buf, buf,
277 (unsigned long)count);
278 if (retval != 0)
279 copied = (copied - retval);
280 else
281 copied = count;
282
283 copied = vme_master_write(image[minor].resource,
284 image[minor].kern_buf, copied, *ppos);
285 } else {
286 /* XXX Need to write this */
0093e5f8 287 pr_info("Currently don't support large transfers\n");
f00a86d9
MW
288 /* Map in pages from userspace */
289
290 /* Call vme_master_write to do the transfer */
291 return -EINVAL;
292 }
293
294 return copied;
295}
296
297static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
298 size_t count, loff_t *ppos)
299{
0a81a0f7 300 void *image_ptr;
f00a86d9
MW
301 ssize_t retval;
302
303 image_ptr = image[minor].kern_buf + *ppos;
304
305 retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
306 if (retval != 0) {
307 retval = (count - retval);
0093e5f8 308 pr_warn("Partial copy to userspace\n");
f00a86d9
MW
309 } else
310 retval = count;
311
312 /* Return number of bytes successfully read */
313 return retval;
314}
315
1a85f207 316static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
f00a86d9
MW
317 size_t count, loff_t *ppos)
318{
0a81a0f7 319 void *image_ptr;
f00a86d9
MW
320 size_t retval;
321
322 image_ptr = image[minor].kern_buf + *ppos;
323
324 retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
325 if (retval != 0) {
326 retval = (count - retval);
0093e5f8 327 pr_warn("Partial copy to userspace\n");
f00a86d9
MW
328 } else
329 retval = count;
330
331 /* Return number of bytes successfully read */
332 return retval;
333}
334
1a85f207 335static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
45f9f018 336 loff_t *ppos)
f00a86d9 337{
496ad9aa 338 unsigned int minor = MINOR(file_inode(file)->i_rdev);
f00a86d9
MW
339 ssize_t retval;
340 size_t image_size;
341 size_t okcount;
342
05614fbf
VB
343 if (minor == CONTROL_MINOR)
344 return 0;
345
ecb3b80f 346 mutex_lock(&image[minor].mutex);
f00a86d9
MW
347
348 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
349 image_size = vme_get_size(image[minor].resource);
350
351 /* Ensure we are starting at a valid location */
352 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
ecb3b80f 353 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
354 return 0;
355 }
356
357 /* Ensure not reading past end of the image */
358 if (*ppos + count > image_size)
359 okcount = image_size - *ppos;
360 else
361 okcount = count;
362
45f9f018 363 switch (type[minor]) {
f00a86d9
MW
364 case MASTER_MINOR:
365 retval = resource_to_user(minor, buf, okcount, ppos);
366 break;
367 case SLAVE_MINOR:
368 retval = buffer_to_user(minor, buf, okcount, ppos);
369 break;
370 default:
371 retval = -EINVAL;
372 }
373
ecb3b80f 374 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
375 if (retval > 0)
376 *ppos += retval;
377
378 return retval;
379}
380
1a85f207
EC
381static ssize_t vme_user_write(struct file *file, const char __user *buf,
382 size_t count, loff_t *ppos)
f00a86d9 383{
496ad9aa 384 unsigned int minor = MINOR(file_inode(file)->i_rdev);
f00a86d9
MW
385 ssize_t retval;
386 size_t image_size;
387 size_t okcount;
388
05614fbf
VB
389 if (minor == CONTROL_MINOR)
390 return 0;
391
ecb3b80f 392 mutex_lock(&image[minor].mutex);
f00a86d9
MW
393
394 image_size = vme_get_size(image[minor].resource);
395
396 /* Ensure we are starting at a valid location */
397 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
ecb3b80f 398 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
399 return 0;
400 }
401
402 /* Ensure not reading past end of the image */
403 if (*ppos + count > image_size)
404 okcount = image_size - *ppos;
405 else
406 okcount = count;
407
45f9f018 408 switch (type[minor]) {
f00a86d9
MW
409 case MASTER_MINOR:
410 retval = resource_from_user(minor, buf, okcount, ppos);
411 break;
412 case SLAVE_MINOR:
413 retval = buffer_from_user(minor, buf, okcount, ppos);
414 break;
415 default:
416 retval = -EINVAL;
417 }
538a697a 418
ecb3b80f 419 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
420
421 if (retval > 0)
422 *ppos += retval;
423
424 return retval;
425}
426
427static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
428{
496ad9aa 429 unsigned int minor = MINOR(file_inode(file)->i_rdev);
877de4b4 430 size_t image_size;
59482291 431 loff_t res;
877de4b4 432
05614fbf
VB
433 if (minor == CONTROL_MINOR)
434 return -EINVAL;
435
ecb3b80f 436 mutex_lock(&image[minor].mutex);
877de4b4 437 image_size = vme_get_size(image[minor].resource);
59482291 438 res = fixed_size_llseek(file, off, whence, image_size);
ecb3b80f 439 mutex_unlock(&image[minor].mutex);
877de4b4 440
59482291 441 return res;
f00a86d9
MW
442}
443
238add52
MW
444/*
445 * The ioctls provided by the old VME access method (the one at vmelinux.org)
446 * are most certainly wrong as the effectively push the registers layout
447 * through to user space. Given that the VME core can handle multiple bridges,
448 * with different register layouts this is most certainly not the way to go.
449 *
450 * We aren't using the structures defined in the Motorola driver either - these
451 * are also quite low level, however we should use the definitions that have
452 * already been defined.
453 */
f00a86d9
MW
454static int vme_user_ioctl(struct inode *inode, struct file *file,
455 unsigned int cmd, unsigned long arg)
456{
238add52
MW
457 struct vme_master master;
458 struct vme_slave slave;
dca22184 459 struct vme_irq_id irq_req;
238add52 460 unsigned long copied;
f00a86d9 461 unsigned int minor = MINOR(inode->i_rdev);
238add52
MW
462 int retval;
463 dma_addr_t pci_addr;
1a85f207 464 void __user *argp = (void __user *)arg;
f00a86d9
MW
465
466 statistics.ioctls++;
238add52 467
f00a86d9
MW
468 switch (type[minor]) {
469 case CONTROL_MINOR:
dca22184
VB
470 switch (cmd) {
471 case VME_IRQ_GEN:
a7f3943c 472 copied = copy_from_user(&irq_req, argp,
dca22184
VB
473 sizeof(struct vme_irq_id));
474 if (copied != 0) {
0093e5f8 475 pr_warn("Partial copy from userspace\n");
dca22184
VB
476 return -EFAULT;
477 }
478
fc489a52 479 return vme_irq_generate(vme_user_bridge,
dca22184
VB
480 irq_req.level,
481 irq_req.statid);
dca22184 482 }
f00a86d9
MW
483 break;
484 case MASTER_MINOR:
f00a86d9 485 switch (cmd) {
238add52
MW
486 case VME_GET_MASTER:
487 memset(&master, 0, sizeof(struct vme_master));
488
489 /* XXX We do not want to push aspace, cycle and width
490 * to userspace as they are
491 */
492 retval = vme_master_get(image[minor].resource,
886953e9
EC
493 &master.enable, &master.vme_addr,
494 &master.size, &master.aspace,
495 &master.cycle, &master.dwidth);
238add52 496
1a85f207 497 copied = copy_to_user(argp, &master,
238add52
MW
498 sizeof(struct vme_master));
499 if (copied != 0) {
0093e5f8 500 pr_warn("Partial copy to userspace\n");
238add52
MW
501 return -EFAULT;
502 }
f00a86d9 503
238add52 504 return retval;
238add52
MW
505
506 case VME_SET_MASTER:
507
c74a804f
DK
508 if (image[minor].mmap_count != 0) {
509 pr_warn("Can't adjust mapped window\n");
510 return -EPERM;
511 }
512
1a85f207 513 copied = copy_from_user(&master, argp, sizeof(master));
238add52 514 if (copied != 0) {
0093e5f8 515 pr_warn("Partial copy from userspace\n");
f00a86d9
MW
516 return -EFAULT;
517 }
518
238add52
MW
519 /* XXX We do not want to push aspace, cycle and width
520 * to userspace as they are
521 */
522 return vme_master_set(image[minor].resource,
523 master.enable, master.vme_addr, master.size,
524 master.aspace, master.cycle, master.dwidth);
f00a86d9
MW
525
526 break;
238add52
MW
527 }
528 break;
529 case SLAVE_MINOR:
530 switch (cmd) {
f00a86d9 531 case VME_GET_SLAVE:
238add52
MW
532 memset(&slave, 0, sizeof(struct vme_slave));
533
534 /* XXX We do not want to push aspace, cycle and width
535 * to userspace as they are
536 */
537 retval = vme_slave_get(image[minor].resource,
886953e9
EC
538 &slave.enable, &slave.vme_addr,
539 &slave.size, &pci_addr, &slave.aspace,
540 &slave.cycle);
238add52 541
1a85f207 542 copied = copy_to_user(argp, &slave,
238add52
MW
543 sizeof(struct vme_slave));
544 if (copied != 0) {
0093e5f8 545 pr_warn("Partial copy to userspace\n");
238add52
MW
546 return -EFAULT;
547 }
548
549 return retval;
f00a86d9 550
238add52 551 case VME_SET_SLAVE:
f00a86d9 552
1a85f207 553 copied = copy_from_user(&slave, argp, sizeof(slave));
238add52 554 if (copied != 0) {
0093e5f8 555 pr_warn("Partial copy from userspace\n");
f00a86d9
MW
556 return -EFAULT;
557 }
558
238add52
MW
559 /* XXX We do not want to push aspace, cycle and width
560 * to userspace as they are
561 */
562 return vme_slave_set(image[minor].resource,
563 slave.enable, slave.vme_addr, slave.size,
564 image[minor].pci_buf, slave.aspace,
565 slave.cycle);
566
f00a86d9 567 break;
f00a86d9
MW
568 }
569 break;
570 }
571
572 return -EINVAL;
573}
574
b1f2ac07
AB
575static long
576vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
577{
578 int ret;
0cd189a4
DK
579 struct inode *inode = file_inode(file);
580 unsigned int minor = MINOR(inode->i_rdev);
b1f2ac07 581
0cd189a4
DK
582 mutex_lock(&image[minor].mutex);
583 ret = vme_user_ioctl(inode, file, cmd, arg);
584 mutex_unlock(&image[minor].mutex);
b1f2ac07
AB
585
586 return ret;
587}
588
c74a804f
DK
589static void vme_user_vm_open(struct vm_area_struct *vma)
590{
591 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
592
593 atomic_inc(&vma_priv->refcnt);
594}
595
596static void vme_user_vm_close(struct vm_area_struct *vma)
597{
598 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
599 unsigned int minor = vma_priv->minor;
600
601 if (!atomic_dec_and_test(&vma_priv->refcnt))
602 return;
603
604 mutex_lock(&image[minor].mutex);
605 image[minor].mmap_count--;
606 mutex_unlock(&image[minor].mutex);
607
608 kfree(vma_priv);
609}
610
611static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
612{
613 int err;
614 struct vme_user_vma_priv *vma_priv;
615
616 mutex_lock(&image[minor].mutex);
617
618 err = vme_master_mmap(image[minor].resource, vma);
619 if (err) {
620 mutex_unlock(&image[minor].mutex);
621 return err;
622 }
623
624 vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL);
625 if (vma_priv == NULL) {
626 mutex_unlock(&image[minor].mutex);
627 return -ENOMEM;
628 }
629
630 vma_priv->minor = minor;
631 atomic_set(&vma_priv->refcnt, 1);
632 vma->vm_ops = &vme_user_vm_ops;
633 vma->vm_private_data = vma_priv;
634
635 image[minor].mmap_count++;
636
637 mutex_unlock(&image[minor].mutex);
638
639 return 0;
640}
641
642static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
643{
644 unsigned int minor = MINOR(file_inode(file)->i_rdev);
645
646 if (type[minor] == MASTER_MINOR)
647 return vme_user_master_mmap(minor, vma);
648
649 return -ENODEV;
650}
651
f00a86d9
MW
652
653/*
654 * Unallocate a previously allocated buffer
655 */
45f9f018 656static void buf_unalloc(int num)
f00a86d9
MW
657{
658 if (image[num].kern_buf) {
659#ifdef VME_DEBUG
0093e5f8
YT
660 pr_debug("UniverseII:Releasing buffer at %p\n",
661 image[num].pci_buf);
f00a86d9
MW
662#endif
663
664 vme_free_consistent(image[num].resource, image[num].size_buf,
665 image[num].kern_buf, image[num].pci_buf);
666
667 image[num].kern_buf = NULL;
668 image[num].pci_buf = 0;
669 image[num].size_buf = 0;
670
671#ifdef VME_DEBUG
672 } else {
0093e5f8 673 pr_debug("UniverseII: Buffer not allocated\n");
f00a86d9
MW
674#endif
675 }
676}
677
678static struct vme_driver vme_user_driver = {
45f9f018 679 .name = driver_name,
5d6abf37 680 .match = vme_user_match,
45f9f018 681 .probe = vme_user_probe,
38930755 682 .remove = vme_user_remove,
f00a86d9
MW
683};
684
685
238add52 686static int __init vme_user_init(void)
f00a86d9 687{
238add52 688 int retval = 0;
238add52 689
0093e5f8 690 pr_info("VME User Space Access Driver\n");
238add52
MW
691
692 if (bus_num == 0) {
0093e5f8 693 pr_err("No cards, skipping registration\n");
55db5020 694 retval = -ENODEV;
238add52
MW
695 goto err_nocard;
696 }
697
698 /* Let's start by supporting one bus, we can support more than one
699 * in future revisions if that ever becomes necessary.
700 */
0a4b6b02 701 if (bus_num > VME_USER_BUS_MAX) {
0093e5f8
YT
702 pr_err("Driver only able to handle %d buses\n",
703 VME_USER_BUS_MAX);
0a4b6b02 704 bus_num = VME_USER_BUS_MAX;
238add52
MW
705 }
706
5d6abf37
MV
707 /*
708 * Here we just register the maximum number of devices we can and
709 * leave vme_user_match() to allow only 1 to go through to probe().
710 * This way, if we later want to allow multiple user access devices,
711 * we just change the code in vme_user_match().
712 */
713 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
238add52
MW
714 if (retval != 0)
715 goto err_reg;
716
717 return retval;
718
238add52 719err_reg:
238add52 720err_nocard:
f00a86d9
MW
721 return retval;
722}
723
5d6abf37
MV
724static int vme_user_match(struct vme_dev *vdev)
725{
978f47d6
MW
726 int i;
727
728 int cur_bus = vme_bus_num(vdev);
d7729f0f 729 int cur_slot = vme_slot_num(vdev);
978f47d6
MW
730
731 for (i = 0; i < bus_num; i++)
732 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
733 return 1;
734
735 return 0;
5d6abf37
MV
736}
737
f00a86d9 738/*
238add52
MW
739 * In this simple access driver, the old behaviour is being preserved as much
740 * as practical. We will therefore reserve the buffers and request the images
741 * here so that we don't have to do it later.
f00a86d9 742 */
d7e530d2 743static int vme_user_probe(struct vme_dev *vdev)
f00a86d9
MW
744{
745 int i, err;
f1552cbd 746 char *name;
f00a86d9 747
238add52
MW
748 /* Save pointer to the bridge device */
749 if (vme_user_bridge != NULL) {
0093e5f8 750 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
238add52
MW
751 err = -EINVAL;
752 goto err_dev;
753 }
8f966dc4 754 vme_user_bridge = vdev;
f00a86d9
MW
755
756 /* Initialise descriptors */
757 for (i = 0; i < VME_DEVS; i++) {
758 image[i].kern_buf = NULL;
759 image[i].pci_buf = 0;
ecb3b80f 760 mutex_init(&image[i].mutex);
f00a86d9
MW
761 image[i].device = NULL;
762 image[i].resource = NULL;
763 image[i].users = 0;
764 }
765
766 /* Initialise statistics counters */
767 reset_counters();
768
769 /* Assign major and minor numbers for the driver */
770 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
771 driver_name);
772 if (err) {
0093e5f8
YT
773 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
774 VME_MAJOR);
f00a86d9
MW
775 goto err_region;
776 }
777
778 /* Register the driver as a char device */
779 vme_user_cdev = cdev_alloc();
d4113a69
KAM
780 if (!vme_user_cdev) {
781 err = -ENOMEM;
782 goto err_char;
783 }
f00a86d9
MW
784 vme_user_cdev->ops = &vme_user_fops;
785 vme_user_cdev->owner = THIS_MODULE;
786 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
787 if (err) {
0093e5f8 788 dev_warn(&vdev->dev, "cdev_all failed\n");
f00a86d9
MW
789 goto err_char;
790 }
791
792 /* Request slave resources and allocate buffers (128kB wide) */
793 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
794 /* XXX Need to properly request attributes */
5188d74c
AB
795 /* For ca91cx42 bridge there are only two slave windows
796 * supporting A16 addressing, so we request A24 supported
797 * by all windows.
798 */
f00a86d9 799 image[i].resource = vme_slave_request(vme_user_bridge,
5188d74c 800 VME_A24, VME_SCT);
f00a86d9 801 if (image[i].resource == NULL) {
0093e5f8
YT
802 dev_warn(&vdev->dev,
803 "Unable to allocate slave resource\n");
465ff28d 804 err = -ENOMEM;
238add52 805 goto err_slave;
f00a86d9
MW
806 }
807 image[i].size_buf = PCI_BUF_SIZE;
808 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
886953e9 809 image[i].size_buf, &image[i].pci_buf);
f00a86d9 810 if (image[i].kern_buf == NULL) {
0093e5f8
YT
811 dev_warn(&vdev->dev,
812 "Unable to allocate memory for buffer\n");
f00a86d9
MW
813 image[i].pci_buf = 0;
814 vme_slave_free(image[i].resource);
815 err = -ENOMEM;
238add52 816 goto err_slave;
f00a86d9
MW
817 }
818 }
819
820 /*
821 * Request master resources allocate page sized buffers for small
822 * reads and writes
823 */
824 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
825 /* XXX Need to properly request attributes */
826 image[i].resource = vme_master_request(vme_user_bridge,
827 VME_A32, VME_SCT, VME_D32);
828 if (image[i].resource == NULL) {
0093e5f8
YT
829 dev_warn(&vdev->dev,
830 "Unable to allocate master resource\n");
465ff28d 831 err = -ENOMEM;
238add52 832 goto err_master;
f00a86d9 833 }
33e920d9
AB
834 image[i].size_buf = PCI_BUF_SIZE;
835 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
836 if (image[i].kern_buf == NULL) {
33e920d9 837 err = -ENOMEM;
1a524893
DY
838 vme_master_free(image[i].resource);
839 goto err_master;
33e920d9 840 }
f00a86d9
MW
841 }
842
843 /* Create sysfs entries - on udev systems this creates the dev files */
844 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
845 if (IS_ERR(vme_user_sysfs_class)) {
0093e5f8 846 dev_err(&vdev->dev, "Error creating vme_user class.\n");
f00a86d9
MW
847 err = PTR_ERR(vme_user_sysfs_class);
848 goto err_class;
849 }
850
851 /* Add sysfs Entries */
45f9f018 852 for (i = 0; i < VME_DEVS; i++) {
584721ca 853 int num;
938acb99 854
f00a86d9
MW
855 switch (type[i]) {
856 case MASTER_MINOR:
f1552cbd 857 name = "bus/vme/m%d";
f00a86d9
MW
858 break;
859 case CONTROL_MINOR:
f1552cbd 860 name = "bus/vme/ctl";
f00a86d9
MW
861 break;
862 case SLAVE_MINOR:
f1552cbd 863 name = "bus/vme/s%d";
f00a86d9
MW
864 break;
865 default:
866 err = -EINVAL;
867 goto err_sysfs;
f00a86d9
MW
868 }
869
584721ca
VB
870 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
871 image[i].device = device_create(vme_user_sysfs_class, NULL,
872 MKDEV(VME_MAJOR, i), NULL, name, num);
f00a86d9 873 if (IS_ERR(image[i].device)) {
0093e5f8 874 dev_info(&vdev->dev, "Error creating sysfs device\n");
f00a86d9
MW
875 err = PTR_ERR(image[i].device);
876 goto err_sysfs;
877 }
878 }
879
f00a86d9
MW
880 return 0;
881
f00a86d9 882err_sysfs:
45f9f018 883 while (i > 0) {
f00a86d9
MW
884 i--;
885 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
886 }
887 class_destroy(vme_user_sysfs_class);
888
238add52
MW
889 /* Ensure counter set correcty to unalloc all master windows */
890 i = MASTER_MAX + 1;
891err_master:
892 while (i > MASTER_MINOR) {
893 i--;
1a524893 894 kfree(image[i].kern_buf);
238add52
MW
895 vme_master_free(image[i].resource);
896 }
897
898 /*
899 * Ensure counter set correcty to unalloc all slave windows and buffers
900 */
f00a86d9 901 i = SLAVE_MAX + 1;
238add52
MW
902err_slave:
903 while (i > SLAVE_MINOR) {
f00a86d9 904 i--;
f00a86d9 905 buf_unalloc(i);
1daa38d3 906 vme_slave_free(image[i].resource);
f00a86d9
MW
907 }
908err_class:
909 cdev_del(vme_user_cdev);
910err_char:
911 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
912err_region:
238add52 913err_dev:
f00a86d9
MW
914 return err;
915}
916
f21a8247 917static int vme_user_remove(struct vme_dev *dev)
f00a86d9
MW
918{
919 int i;
920
921 /* Remove sysfs Entries */
ecb3b80f
SN
922 for (i = 0; i < VME_DEVS; i++) {
923 mutex_destroy(&image[i].mutex);
f00a86d9 924 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
ecb3b80f 925 }
f00a86d9
MW
926 class_destroy(vme_user_sysfs_class);
927
b62c99b1 928 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
33e920d9 929 kfree(image[i].kern_buf);
b62c99b1
EC
930 vme_master_free(image[i].resource);
931 }
33e920d9 932
f00a86d9 933 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
238add52 934 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
f00a86d9 935 buf_unalloc(i);
1daa38d3 936 vme_slave_free(image[i].resource);
f00a86d9
MW
937 }
938
939 /* Unregister device driver */
940 cdev_del(vme_user_cdev);
941
942 /* Unregiser the major and minor device numbers */
943 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
238add52
MW
944
945 return 0;
f00a86d9
MW
946}
947
238add52
MW
948static void __exit vme_user_exit(void)
949{
950 vme_unregister_driver(&vme_user_driver);
238add52
MW
951}
952
953
954MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
955module_param_array(bus, int, &bus_num, 0);
956
f00a86d9 957MODULE_DESCRIPTION("VME User Space Access Driver");
66bd8db5 958MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
f00a86d9
MW
959MODULE_LICENSE("GPL");
960
238add52
MW
961module_init(vme_user_init);
962module_exit(vme_user_exit);