]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * drivers/uio/uio.c | |
3 | * | |
4 | * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> | |
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | |
6 | * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> | |
7 | * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> | |
8 | * | |
9 | * Userspace IO | |
10 | * | |
11 | * Base Functions | |
12 | * | |
13 | * Licensed under the GPLv2 only. | |
14 | */ | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/poll.h> | |
19 | #include <linux/device.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/idr.h> | |
23 | #include <linux/sched/signal.h> | |
24 | #include <linux/string.h> | |
25 | #include <linux/kobject.h> | |
26 | #include <linux/cdev.h> | |
27 | #include <linux/uio_driver.h> | |
28 | ||
29 | #define UIO_MAX_DEVICES (1U << MINORBITS) | |
30 | ||
31 | static int uio_major; | |
32 | static struct cdev *uio_cdev; | |
33 | static DEFINE_IDR(uio_idr); | |
34 | static const struct file_operations uio_fops; | |
35 | ||
36 | /* Protect idr accesses */ | |
37 | static DEFINE_MUTEX(minor_lock); | |
38 | ||
39 | /* | |
40 | * attributes | |
41 | */ | |
42 | ||
43 | struct uio_map { | |
44 | struct kobject kobj; | |
45 | struct uio_mem *mem; | |
46 | }; | |
47 | #define to_map(map) container_of(map, struct uio_map, kobj) | |
48 | ||
49 | static ssize_t map_name_show(struct uio_mem *mem, char *buf) | |
50 | { | |
51 | if (unlikely(!mem->name)) | |
52 | mem->name = ""; | |
53 | ||
54 | return sprintf(buf, "%s\n", mem->name); | |
55 | } | |
56 | ||
57 | static ssize_t map_addr_show(struct uio_mem *mem, char *buf) | |
58 | { | |
59 | return sprintf(buf, "%pa\n", &mem->addr); | |
60 | } | |
61 | ||
62 | static ssize_t map_size_show(struct uio_mem *mem, char *buf) | |
63 | { | |
64 | return sprintf(buf, "%pa\n", &mem->size); | |
65 | } | |
66 | ||
67 | static ssize_t map_offset_show(struct uio_mem *mem, char *buf) | |
68 | { | |
69 | return sprintf(buf, "0x%llx\n", (unsigned long long)mem->offs); | |
70 | } | |
71 | ||
72 | struct map_sysfs_entry { | |
73 | struct attribute attr; | |
74 | ssize_t (*show)(struct uio_mem *, char *); | |
75 | ssize_t (*store)(struct uio_mem *, const char *, size_t); | |
76 | }; | |
77 | ||
78 | static struct map_sysfs_entry name_attribute = | |
79 | __ATTR(name, S_IRUGO, map_name_show, NULL); | |
80 | static struct map_sysfs_entry addr_attribute = | |
81 | __ATTR(addr, S_IRUGO, map_addr_show, NULL); | |
82 | static struct map_sysfs_entry size_attribute = | |
83 | __ATTR(size, S_IRUGO, map_size_show, NULL); | |
84 | static struct map_sysfs_entry offset_attribute = | |
85 | __ATTR(offset, S_IRUGO, map_offset_show, NULL); | |
86 | ||
87 | static struct attribute *attrs[] = { | |
88 | &name_attribute.attr, | |
89 | &addr_attribute.attr, | |
90 | &size_attribute.attr, | |
91 | &offset_attribute.attr, | |
92 | NULL, /* need to NULL terminate the list of attributes */ | |
93 | }; | |
94 | ||
95 | static void map_release(struct kobject *kobj) | |
96 | { | |
97 | struct uio_map *map = to_map(kobj); | |
98 | kfree(map); | |
99 | } | |
100 | ||
101 | static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, | |
102 | char *buf) | |
103 | { | |
104 | struct uio_map *map = to_map(kobj); | |
105 | struct uio_mem *mem = map->mem; | |
106 | struct map_sysfs_entry *entry; | |
107 | ||
108 | entry = container_of(attr, struct map_sysfs_entry, attr); | |
109 | ||
110 | if (!entry->show) | |
111 | return -EIO; | |
112 | ||
113 | return entry->show(mem, buf); | |
114 | } | |
115 | ||
116 | static const struct sysfs_ops map_sysfs_ops = { | |
117 | .show = map_type_show, | |
118 | }; | |
119 | ||
120 | static struct kobj_type map_attr_type = { | |
121 | .release = map_release, | |
122 | .sysfs_ops = &map_sysfs_ops, | |
123 | .default_attrs = attrs, | |
124 | }; | |
125 | ||
126 | struct uio_portio { | |
127 | struct kobject kobj; | |
128 | struct uio_port *port; | |
129 | }; | |
130 | #define to_portio(portio) container_of(portio, struct uio_portio, kobj) | |
131 | ||
132 | static ssize_t portio_name_show(struct uio_port *port, char *buf) | |
133 | { | |
134 | if (unlikely(!port->name)) | |
135 | port->name = ""; | |
136 | ||
137 | return sprintf(buf, "%s\n", port->name); | |
138 | } | |
139 | ||
140 | static ssize_t portio_start_show(struct uio_port *port, char *buf) | |
141 | { | |
142 | return sprintf(buf, "0x%lx\n", port->start); | |
143 | } | |
144 | ||
145 | static ssize_t portio_size_show(struct uio_port *port, char *buf) | |
146 | { | |
147 | return sprintf(buf, "0x%lx\n", port->size); | |
148 | } | |
149 | ||
150 | static ssize_t portio_porttype_show(struct uio_port *port, char *buf) | |
151 | { | |
152 | const char *porttypes[] = {"none", "x86", "gpio", "other"}; | |
153 | ||
154 | if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) | |
155 | return -EINVAL; | |
156 | ||
157 | return sprintf(buf, "port_%s\n", porttypes[port->porttype]); | |
158 | } | |
159 | ||
160 | struct portio_sysfs_entry { | |
161 | struct attribute attr; | |
162 | ssize_t (*show)(struct uio_port *, char *); | |
163 | ssize_t (*store)(struct uio_port *, const char *, size_t); | |
164 | }; | |
165 | ||
166 | static struct portio_sysfs_entry portio_name_attribute = | |
167 | __ATTR(name, S_IRUGO, portio_name_show, NULL); | |
168 | static struct portio_sysfs_entry portio_start_attribute = | |
169 | __ATTR(start, S_IRUGO, portio_start_show, NULL); | |
170 | static struct portio_sysfs_entry portio_size_attribute = | |
171 | __ATTR(size, S_IRUGO, portio_size_show, NULL); | |
172 | static struct portio_sysfs_entry portio_porttype_attribute = | |
173 | __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); | |
174 | ||
175 | static struct attribute *portio_attrs[] = { | |
176 | &portio_name_attribute.attr, | |
177 | &portio_start_attribute.attr, | |
178 | &portio_size_attribute.attr, | |
179 | &portio_porttype_attribute.attr, | |
180 | NULL, | |
181 | }; | |
182 | ||
183 | static void portio_release(struct kobject *kobj) | |
184 | { | |
185 | struct uio_portio *portio = to_portio(kobj); | |
186 | kfree(portio); | |
187 | } | |
188 | ||
189 | static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, | |
190 | char *buf) | |
191 | { | |
192 | struct uio_portio *portio = to_portio(kobj); | |
193 | struct uio_port *port = portio->port; | |
194 | struct portio_sysfs_entry *entry; | |
195 | ||
196 | entry = container_of(attr, struct portio_sysfs_entry, attr); | |
197 | ||
198 | if (!entry->show) | |
199 | return -EIO; | |
200 | ||
201 | return entry->show(port, buf); | |
202 | } | |
203 | ||
204 | static const struct sysfs_ops portio_sysfs_ops = { | |
205 | .show = portio_type_show, | |
206 | }; | |
207 | ||
208 | static struct kobj_type portio_attr_type = { | |
209 | .release = portio_release, | |
210 | .sysfs_ops = &portio_sysfs_ops, | |
211 | .default_attrs = portio_attrs, | |
212 | }; | |
213 | ||
214 | static ssize_t name_show(struct device *dev, | |
215 | struct device_attribute *attr, char *buf) | |
216 | { | |
217 | struct uio_device *idev = dev_get_drvdata(dev); | |
218 | return sprintf(buf, "%s\n", idev->info->name); | |
219 | } | |
220 | static DEVICE_ATTR_RO(name); | |
221 | ||
222 | static ssize_t version_show(struct device *dev, | |
223 | struct device_attribute *attr, char *buf) | |
224 | { | |
225 | struct uio_device *idev = dev_get_drvdata(dev); | |
226 | return sprintf(buf, "%s\n", idev->info->version); | |
227 | } | |
228 | static DEVICE_ATTR_RO(version); | |
229 | ||
230 | static ssize_t event_show(struct device *dev, | |
231 | struct device_attribute *attr, char *buf) | |
232 | { | |
233 | struct uio_device *idev = dev_get_drvdata(dev); | |
234 | return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); | |
235 | } | |
236 | static DEVICE_ATTR_RO(event); | |
237 | ||
238 | static struct attribute *uio_attrs[] = { | |
239 | &dev_attr_name.attr, | |
240 | &dev_attr_version.attr, | |
241 | &dev_attr_event.attr, | |
242 | NULL, | |
243 | }; | |
244 | ATTRIBUTE_GROUPS(uio); | |
245 | ||
246 | /* UIO class infrastructure */ | |
247 | static struct class uio_class = { | |
248 | .name = "uio", | |
249 | .dev_groups = uio_groups, | |
250 | }; | |
251 | ||
252 | /* | |
253 | * device functions | |
254 | */ | |
255 | static int uio_dev_add_attributes(struct uio_device *idev) | |
256 | { | |
257 | int ret; | |
258 | int mi, pi; | |
259 | int map_found = 0; | |
260 | int portio_found = 0; | |
261 | struct uio_mem *mem; | |
262 | struct uio_map *map; | |
263 | struct uio_port *port; | |
264 | struct uio_portio *portio; | |
265 | ||
266 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | |
267 | mem = &idev->info->mem[mi]; | |
268 | if (mem->size == 0) | |
269 | break; | |
270 | if (!map_found) { | |
271 | map_found = 1; | |
272 | idev->map_dir = kobject_create_and_add("maps", | |
273 | &idev->dev->kobj); | |
274 | if (!idev->map_dir) { | |
275 | ret = -ENOMEM; | |
276 | goto err_map; | |
277 | } | |
278 | } | |
279 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
280 | if (!map) { | |
281 | ret = -ENOMEM; | |
282 | goto err_map; | |
283 | } | |
284 | kobject_init(&map->kobj, &map_attr_type); | |
285 | map->mem = mem; | |
286 | mem->map = map; | |
287 | ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); | |
288 | if (ret) | |
289 | goto err_map_kobj; | |
290 | ret = kobject_uevent(&map->kobj, KOBJ_ADD); | |
291 | if (ret) | |
292 | goto err_map_kobj; | |
293 | } | |
294 | ||
295 | for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { | |
296 | port = &idev->info->port[pi]; | |
297 | if (port->size == 0) | |
298 | break; | |
299 | if (!portio_found) { | |
300 | portio_found = 1; | |
301 | idev->portio_dir = kobject_create_and_add("portio", | |
302 | &idev->dev->kobj); | |
303 | if (!idev->portio_dir) { | |
304 | ret = -ENOMEM; | |
305 | goto err_portio; | |
306 | } | |
307 | } | |
308 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); | |
309 | if (!portio) { | |
310 | ret = -ENOMEM; | |
311 | goto err_portio; | |
312 | } | |
313 | kobject_init(&portio->kobj, &portio_attr_type); | |
314 | portio->port = port; | |
315 | port->portio = portio; | |
316 | ret = kobject_add(&portio->kobj, idev->portio_dir, | |
317 | "port%d", pi); | |
318 | if (ret) | |
319 | goto err_portio_kobj; | |
320 | ret = kobject_uevent(&portio->kobj, KOBJ_ADD); | |
321 | if (ret) | |
322 | goto err_portio_kobj; | |
323 | } | |
324 | ||
325 | return 0; | |
326 | ||
327 | err_portio: | |
328 | pi--; | |
329 | err_portio_kobj: | |
330 | for (; pi >= 0; pi--) { | |
331 | port = &idev->info->port[pi]; | |
332 | portio = port->portio; | |
333 | kobject_put(&portio->kobj); | |
334 | } | |
335 | kobject_put(idev->portio_dir); | |
336 | err_map: | |
337 | mi--; | |
338 | err_map_kobj: | |
339 | for (; mi >= 0; mi--) { | |
340 | mem = &idev->info->mem[mi]; | |
341 | map = mem->map; | |
342 | kobject_put(&map->kobj); | |
343 | } | |
344 | kobject_put(idev->map_dir); | |
345 | dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); | |
346 | return ret; | |
347 | } | |
348 | ||
349 | static void uio_dev_del_attributes(struct uio_device *idev) | |
350 | { | |
351 | int i; | |
352 | struct uio_mem *mem; | |
353 | struct uio_port *port; | |
354 | ||
355 | for (i = 0; i < MAX_UIO_MAPS; i++) { | |
356 | mem = &idev->info->mem[i]; | |
357 | if (mem->size == 0) | |
358 | break; | |
359 | kobject_put(&mem->map->kobj); | |
360 | } | |
361 | kobject_put(idev->map_dir); | |
362 | ||
363 | for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { | |
364 | port = &idev->info->port[i]; | |
365 | if (port->size == 0) | |
366 | break; | |
367 | kobject_put(&port->portio->kobj); | |
368 | } | |
369 | kobject_put(idev->portio_dir); | |
370 | } | |
371 | ||
372 | static int uio_get_minor(struct uio_device *idev) | |
373 | { | |
374 | int retval = -ENOMEM; | |
375 | ||
376 | mutex_lock(&minor_lock); | |
377 | retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); | |
378 | if (retval >= 0) { | |
379 | idev->minor = retval; | |
380 | retval = 0; | |
381 | } else if (retval == -ENOSPC) { | |
382 | dev_err(idev->dev, "too many uio devices\n"); | |
383 | retval = -EINVAL; | |
384 | } | |
385 | mutex_unlock(&minor_lock); | |
386 | return retval; | |
387 | } | |
388 | ||
389 | static void uio_free_minor(struct uio_device *idev) | |
390 | { | |
391 | mutex_lock(&minor_lock); | |
392 | idr_remove(&uio_idr, idev->minor); | |
393 | mutex_unlock(&minor_lock); | |
394 | } | |
395 | ||
396 | /** | |
397 | * uio_event_notify - trigger an interrupt event | |
398 | * @info: UIO device capabilities | |
399 | */ | |
400 | void uio_event_notify(struct uio_info *info) | |
401 | { | |
402 | struct uio_device *idev = info->uio_dev; | |
403 | ||
404 | atomic_inc(&idev->event); | |
405 | wake_up_interruptible(&idev->wait); | |
406 | kill_fasync(&idev->async_queue, SIGIO, POLL_IN); | |
407 | } | |
408 | EXPORT_SYMBOL_GPL(uio_event_notify); | |
409 | ||
410 | /** | |
411 | * uio_interrupt - hardware interrupt handler | |
412 | * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer | |
413 | * @dev_id: Pointer to the devices uio_device structure | |
414 | */ | |
415 | static irqreturn_t uio_interrupt(int irq, void *dev_id) | |
416 | { | |
417 | struct uio_device *idev = (struct uio_device *)dev_id; | |
418 | irqreturn_t ret = idev->info->handler(irq, idev->info); | |
419 | ||
420 | if (ret == IRQ_HANDLED) | |
421 | uio_event_notify(idev->info); | |
422 | ||
423 | return ret; | |
424 | } | |
425 | ||
426 | struct uio_listener { | |
427 | struct uio_device *dev; | |
428 | s32 event_count; | |
429 | }; | |
430 | ||
431 | static int uio_open(struct inode *inode, struct file *filep) | |
432 | { | |
433 | struct uio_device *idev; | |
434 | struct uio_listener *listener; | |
435 | int ret = 0; | |
436 | ||
437 | mutex_lock(&minor_lock); | |
438 | idev = idr_find(&uio_idr, iminor(inode)); | |
439 | mutex_unlock(&minor_lock); | |
440 | if (!idev) { | |
441 | ret = -ENODEV; | |
442 | goto out; | |
443 | } | |
444 | ||
445 | if (!try_module_get(idev->owner)) { | |
446 | ret = -ENODEV; | |
447 | goto out; | |
448 | } | |
449 | ||
450 | listener = kmalloc(sizeof(*listener), GFP_KERNEL); | |
451 | if (!listener) { | |
452 | ret = -ENOMEM; | |
453 | goto err_alloc_listener; | |
454 | } | |
455 | ||
456 | listener->dev = idev; | |
457 | listener->event_count = atomic_read(&idev->event); | |
458 | filep->private_data = listener; | |
459 | ||
460 | if (idev->info->open) { | |
461 | ret = idev->info->open(idev->info, inode); | |
462 | if (ret) | |
463 | goto err_infoopen; | |
464 | } | |
465 | return 0; | |
466 | ||
467 | err_infoopen: | |
468 | kfree(listener); | |
469 | ||
470 | err_alloc_listener: | |
471 | module_put(idev->owner); | |
472 | ||
473 | out: | |
474 | return ret; | |
475 | } | |
476 | ||
477 | static int uio_fasync(int fd, struct file *filep, int on) | |
478 | { | |
479 | struct uio_listener *listener = filep->private_data; | |
480 | struct uio_device *idev = listener->dev; | |
481 | ||
482 | return fasync_helper(fd, filep, on, &idev->async_queue); | |
483 | } | |
484 | ||
485 | static int uio_release(struct inode *inode, struct file *filep) | |
486 | { | |
487 | int ret = 0; | |
488 | struct uio_listener *listener = filep->private_data; | |
489 | struct uio_device *idev = listener->dev; | |
490 | ||
491 | if (idev->info->release) | |
492 | ret = idev->info->release(idev->info, inode); | |
493 | ||
494 | module_put(idev->owner); | |
495 | kfree(listener); | |
496 | return ret; | |
497 | } | |
498 | ||
499 | static unsigned int uio_poll(struct file *filep, poll_table *wait) | |
500 | { | |
501 | struct uio_listener *listener = filep->private_data; | |
502 | struct uio_device *idev = listener->dev; | |
503 | ||
504 | if (!idev->info->irq) | |
505 | return -EIO; | |
506 | ||
507 | poll_wait(filep, &idev->wait, wait); | |
508 | if (listener->event_count != atomic_read(&idev->event)) | |
509 | return POLLIN | POLLRDNORM; | |
510 | return 0; | |
511 | } | |
512 | ||
513 | static ssize_t uio_read(struct file *filep, char __user *buf, | |
514 | size_t count, loff_t *ppos) | |
515 | { | |
516 | struct uio_listener *listener = filep->private_data; | |
517 | struct uio_device *idev = listener->dev; | |
518 | DECLARE_WAITQUEUE(wait, current); | |
519 | ssize_t retval; | |
520 | s32 event_count; | |
521 | ||
522 | if (!idev->info->irq) | |
523 | return -EIO; | |
524 | ||
525 | if (count != sizeof(s32)) | |
526 | return -EINVAL; | |
527 | ||
528 | add_wait_queue(&idev->wait, &wait); | |
529 | ||
530 | do { | |
531 | set_current_state(TASK_INTERRUPTIBLE); | |
532 | ||
533 | event_count = atomic_read(&idev->event); | |
534 | if (event_count != listener->event_count) { | |
535 | __set_current_state(TASK_RUNNING); | |
536 | if (copy_to_user(buf, &event_count, count)) | |
537 | retval = -EFAULT; | |
538 | else { | |
539 | listener->event_count = event_count; | |
540 | retval = count; | |
541 | } | |
542 | break; | |
543 | } | |
544 | ||
545 | if (filep->f_flags & O_NONBLOCK) { | |
546 | retval = -EAGAIN; | |
547 | break; | |
548 | } | |
549 | ||
550 | if (signal_pending(current)) { | |
551 | retval = -ERESTARTSYS; | |
552 | break; | |
553 | } | |
554 | schedule(); | |
555 | } while (1); | |
556 | ||
557 | __set_current_state(TASK_RUNNING); | |
558 | remove_wait_queue(&idev->wait, &wait); | |
559 | ||
560 | return retval; | |
561 | } | |
562 | ||
563 | static ssize_t uio_write(struct file *filep, const char __user *buf, | |
564 | size_t count, loff_t *ppos) | |
565 | { | |
566 | struct uio_listener *listener = filep->private_data; | |
567 | struct uio_device *idev = listener->dev; | |
568 | ssize_t retval; | |
569 | s32 irq_on; | |
570 | ||
571 | if (!idev->info->irq) | |
572 | return -EIO; | |
573 | ||
574 | if (count != sizeof(s32)) | |
575 | return -EINVAL; | |
576 | ||
577 | if (!idev->info->irqcontrol) | |
578 | return -ENOSYS; | |
579 | ||
580 | if (copy_from_user(&irq_on, buf, count)) | |
581 | return -EFAULT; | |
582 | ||
583 | retval = idev->info->irqcontrol(idev->info, irq_on); | |
584 | ||
585 | return retval ? retval : sizeof(s32); | |
586 | } | |
587 | ||
588 | static int uio_find_mem_index(struct vm_area_struct *vma) | |
589 | { | |
590 | struct uio_device *idev = vma->vm_private_data; | |
591 | ||
592 | if (vma->vm_pgoff < MAX_UIO_MAPS) { | |
593 | if (idev->info->mem[vma->vm_pgoff].size == 0) | |
594 | return -1; | |
595 | return (int)vma->vm_pgoff; | |
596 | } | |
597 | return -1; | |
598 | } | |
599 | ||
600 | static int uio_vma_fault(struct vm_fault *vmf) | |
601 | { | |
602 | struct uio_device *idev = vmf->vma->vm_private_data; | |
603 | struct page *page; | |
604 | unsigned long offset; | |
605 | void *addr; | |
606 | ||
607 | int mi = uio_find_mem_index(vmf->vma); | |
608 | if (mi < 0) | |
609 | return VM_FAULT_SIGBUS; | |
610 | ||
611 | /* | |
612 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE | |
613 | * to use mem[N]. | |
614 | */ | |
615 | offset = (vmf->pgoff - mi) << PAGE_SHIFT; | |
616 | ||
617 | addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset; | |
618 | if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) | |
619 | page = virt_to_page(addr); | |
620 | else | |
621 | page = vmalloc_to_page(addr); | |
622 | get_page(page); | |
623 | vmf->page = page; | |
624 | return 0; | |
625 | } | |
626 | ||
627 | static const struct vm_operations_struct uio_logical_vm_ops = { | |
628 | .fault = uio_vma_fault, | |
629 | }; | |
630 | ||
631 | static int uio_mmap_logical(struct vm_area_struct *vma) | |
632 | { | |
633 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
634 | vma->vm_ops = &uio_logical_vm_ops; | |
635 | return 0; | |
636 | } | |
637 | ||
638 | static const struct vm_operations_struct uio_physical_vm_ops = { | |
639 | #ifdef CONFIG_HAVE_IOREMAP_PROT | |
640 | .access = generic_access_phys, | |
641 | #endif | |
642 | }; | |
643 | ||
644 | static int uio_mmap_physical(struct vm_area_struct *vma) | |
645 | { | |
646 | struct uio_device *idev = vma->vm_private_data; | |
647 | int mi = uio_find_mem_index(vma); | |
648 | struct uio_mem *mem; | |
649 | if (mi < 0) | |
650 | return -EINVAL; | |
651 | mem = idev->info->mem + mi; | |
652 | ||
653 | if (mem->addr & ~PAGE_MASK) | |
654 | return -ENODEV; | |
655 | if (vma->vm_end - vma->vm_start > mem->size) | |
656 | return -EINVAL; | |
657 | ||
658 | vma->vm_ops = &uio_physical_vm_ops; | |
659 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
660 | ||
661 | /* | |
662 | * We cannot use the vm_iomap_memory() helper here, | |
663 | * because vma->vm_pgoff is the map index we looked | |
664 | * up above in uio_find_mem_index(), rather than an | |
665 | * actual page offset into the mmap. | |
666 | * | |
667 | * So we just do the physical mmap without a page | |
668 | * offset. | |
669 | */ | |
670 | return remap_pfn_range(vma, | |
671 | vma->vm_start, | |
672 | mem->addr >> PAGE_SHIFT, | |
673 | vma->vm_end - vma->vm_start, | |
674 | vma->vm_page_prot); | |
675 | } | |
676 | ||
677 | static int uio_mmap(struct file *filep, struct vm_area_struct *vma) | |
678 | { | |
679 | struct uio_listener *listener = filep->private_data; | |
680 | struct uio_device *idev = listener->dev; | |
681 | int mi; | |
682 | unsigned long requested_pages, actual_pages; | |
683 | int ret = 0; | |
684 | ||
685 | if (vma->vm_end < vma->vm_start) | |
686 | return -EINVAL; | |
687 | ||
688 | vma->vm_private_data = idev; | |
689 | ||
690 | mi = uio_find_mem_index(vma); | |
691 | if (mi < 0) | |
692 | return -EINVAL; | |
693 | ||
694 | requested_pages = vma_pages(vma); | |
695 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) | |
696 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; | |
697 | if (requested_pages > actual_pages) | |
698 | return -EINVAL; | |
699 | ||
700 | if (idev->info->mmap) { | |
701 | ret = idev->info->mmap(idev->info, vma); | |
702 | return ret; | |
703 | } | |
704 | ||
705 | switch (idev->info->mem[mi].memtype) { | |
706 | case UIO_MEM_PHYS: | |
707 | return uio_mmap_physical(vma); | |
708 | case UIO_MEM_LOGICAL: | |
709 | case UIO_MEM_VIRTUAL: | |
710 | return uio_mmap_logical(vma); | |
711 | default: | |
712 | return -EINVAL; | |
713 | } | |
714 | } | |
715 | ||
716 | static const struct file_operations uio_fops = { | |
717 | .owner = THIS_MODULE, | |
718 | .open = uio_open, | |
719 | .release = uio_release, | |
720 | .read = uio_read, | |
721 | .write = uio_write, | |
722 | .mmap = uio_mmap, | |
723 | .poll = uio_poll, | |
724 | .fasync = uio_fasync, | |
725 | .llseek = noop_llseek, | |
726 | }; | |
727 | ||
728 | static int uio_major_init(void) | |
729 | { | |
730 | static const char name[] = "uio"; | |
731 | struct cdev *cdev = NULL; | |
732 | dev_t uio_dev = 0; | |
733 | int result; | |
734 | ||
735 | result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name); | |
736 | if (result) | |
737 | goto out; | |
738 | ||
739 | result = -ENOMEM; | |
740 | cdev = cdev_alloc(); | |
741 | if (!cdev) | |
742 | goto out_unregister; | |
743 | ||
744 | cdev->owner = THIS_MODULE; | |
745 | cdev->ops = &uio_fops; | |
746 | kobject_set_name(&cdev->kobj, "%s", name); | |
747 | ||
748 | result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES); | |
749 | if (result) | |
750 | goto out_put; | |
751 | ||
752 | uio_major = MAJOR(uio_dev); | |
753 | uio_cdev = cdev; | |
754 | return 0; | |
755 | out_put: | |
756 | kobject_put(&cdev->kobj); | |
757 | out_unregister: | |
758 | unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES); | |
759 | out: | |
760 | return result; | |
761 | } | |
762 | ||
763 | static void uio_major_cleanup(void) | |
764 | { | |
765 | unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES); | |
766 | cdev_del(uio_cdev); | |
767 | } | |
768 | ||
769 | static int init_uio_class(void) | |
770 | { | |
771 | int ret; | |
772 | ||
773 | /* This is the first time in here, set everything up properly */ | |
774 | ret = uio_major_init(); | |
775 | if (ret) | |
776 | goto exit; | |
777 | ||
778 | ret = class_register(&uio_class); | |
779 | if (ret) { | |
780 | printk(KERN_ERR "class_register failed for uio\n"); | |
781 | goto err_class_register; | |
782 | } | |
783 | return 0; | |
784 | ||
785 | err_class_register: | |
786 | uio_major_cleanup(); | |
787 | exit: | |
788 | return ret; | |
789 | } | |
790 | ||
791 | static void release_uio_class(void) | |
792 | { | |
793 | class_unregister(&uio_class); | |
794 | uio_major_cleanup(); | |
795 | } | |
796 | ||
797 | /** | |
798 | * uio_register_device - register a new userspace IO device | |
799 | * @owner: module that creates the new device | |
800 | * @parent: parent device | |
801 | * @info: UIO device capabilities | |
802 | * | |
803 | * returns zero on success or a negative error code. | |
804 | */ | |
805 | int __uio_register_device(struct module *owner, | |
806 | struct device *parent, | |
807 | struct uio_info *info) | |
808 | { | |
809 | struct uio_device *idev; | |
810 | int ret = 0; | |
811 | ||
812 | if (!parent || !info || !info->name || !info->version) | |
813 | return -EINVAL; | |
814 | ||
815 | info->uio_dev = NULL; | |
816 | ||
817 | idev = devm_kzalloc(parent, sizeof(*idev), GFP_KERNEL); | |
818 | if (!idev) { | |
819 | return -ENOMEM; | |
820 | } | |
821 | ||
822 | idev->owner = owner; | |
823 | idev->info = info; | |
824 | init_waitqueue_head(&idev->wait); | |
825 | atomic_set(&idev->event, 0); | |
826 | ||
827 | ret = uio_get_minor(idev); | |
828 | if (ret) | |
829 | return ret; | |
830 | ||
831 | idev->dev = device_create(&uio_class, parent, | |
832 | MKDEV(uio_major, idev->minor), idev, | |
833 | "uio%d", idev->minor); | |
834 | if (IS_ERR(idev->dev)) { | |
835 | printk(KERN_ERR "UIO: device register failed\n"); | |
836 | ret = PTR_ERR(idev->dev); | |
837 | goto err_device_create; | |
838 | } | |
839 | ||
840 | ret = uio_dev_add_attributes(idev); | |
841 | if (ret) | |
842 | goto err_uio_dev_add_attributes; | |
843 | ||
844 | info->uio_dev = idev; | |
845 | ||
846 | if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { | |
847 | /* | |
848 | * Note that we deliberately don't use devm_request_irq | |
849 | * here. The parent module can unregister the UIO device | |
850 | * and call pci_disable_msi, which requires that this | |
851 | * irq has been freed. However, the device may have open | |
852 | * FDs at the time of unregister and therefore may not be | |
853 | * freed until they are released. | |
854 | */ | |
855 | ret = request_irq(info->irq, uio_interrupt, | |
856 | info->irq_flags, info->name, idev); | |
857 | if (ret) | |
858 | goto err_request_irq; | |
859 | } | |
860 | ||
861 | return 0; | |
862 | ||
863 | err_request_irq: | |
864 | uio_dev_del_attributes(idev); | |
865 | err_uio_dev_add_attributes: | |
866 | device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); | |
867 | err_device_create: | |
868 | uio_free_minor(idev); | |
869 | return ret; | |
870 | } | |
871 | EXPORT_SYMBOL_GPL(__uio_register_device); | |
872 | ||
873 | /** | |
874 | * uio_unregister_device - unregister a industrial IO device | |
875 | * @info: UIO device capabilities | |
876 | * | |
877 | */ | |
878 | void uio_unregister_device(struct uio_info *info) | |
879 | { | |
880 | struct uio_device *idev; | |
881 | ||
882 | if (!info || !info->uio_dev) | |
883 | return; | |
884 | ||
885 | idev = info->uio_dev; | |
886 | ||
887 | uio_free_minor(idev); | |
888 | ||
889 | uio_dev_del_attributes(idev); | |
890 | ||
891 | if (info->irq && info->irq != UIO_IRQ_CUSTOM) | |
892 | free_irq(info->irq, idev); | |
893 | ||
894 | device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); | |
895 | ||
896 | return; | |
897 | } | |
898 | EXPORT_SYMBOL_GPL(uio_unregister_device); | |
899 | ||
900 | static int __init uio_init(void) | |
901 | { | |
902 | return init_uio_class(); | |
903 | } | |
904 | ||
905 | static void __exit uio_exit(void) | |
906 | { | |
907 | release_uio_class(); | |
908 | idr_destroy(&uio_idr); | |
909 | } | |
910 | ||
911 | module_init(uio_init) | |
912 | module_exit(uio_exit) | |
913 | MODULE_LICENSE("GPL v2"); |