]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/staging/iio/industrialio-core.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / iio / industrialio-core.c
1 /* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Based on elements of hwmon and input subsystems.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24 #include <linux/anon_inodes.h>
25 #include "iio.h"
26 #include "iio_core.h"
27 #include "iio_core_trigger.h"
28 #include "chrdev.h"
29 #include "sysfs.h"
30
31 /* IDA to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
33
34 static dev_t iio_devt;
35
36 #define IIO_DEV_MAX 256
37 struct bus_type iio_bus_type = {
38 .name = "iio",
39 };
40 EXPORT_SYMBOL(iio_bus_type);
41
42 static const char * const iio_data_type_name[] = {
43 [IIO_RAW] = "raw",
44 [IIO_PROCESSED] = "input",
45 };
46
47 static const char * const iio_direction[] = {
48 [0] = "in",
49 [1] = "out",
50 };
51
52 static const char * const iio_chan_type_name_spec[] = {
53 [IIO_VOLTAGE] = "voltage",
54 [IIO_CURRENT] = "current",
55 [IIO_POWER] = "power",
56 [IIO_ACCEL] = "accel",
57 [IIO_ANGL_VEL] = "anglvel",
58 [IIO_MAGN] = "magn",
59 [IIO_LIGHT] = "illuminance",
60 [IIO_INTENSITY] = "intensity",
61 [IIO_PROXIMITY] = "proximity",
62 [IIO_TEMP] = "temp",
63 [IIO_INCLI] = "incli",
64 [IIO_ROT] = "rot",
65 [IIO_ANGL] = "angl",
66 [IIO_TIMESTAMP] = "timestamp",
67 [IIO_CAPACITANCE] = "capacitance",
68 };
69
70 static const char * const iio_modifier_names[] = {
71 [IIO_MOD_X] = "x",
72 [IIO_MOD_Y] = "y",
73 [IIO_MOD_Z] = "z",
74 [IIO_MOD_LIGHT_BOTH] = "both",
75 [IIO_MOD_LIGHT_IR] = "ir",
76 };
77
78 /* relies on pairs of these shared then separate */
79 static const char * const iio_chan_info_postfix[] = {
80 [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
81 [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
82 [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
83 [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
84 [IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
85 [IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
86 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
87 = "quadrature_correction_raw",
88 [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
89 };
90
91 /**
92 * struct iio_detected_event_list - list element for events that have occurred
93 * @list: linked list header
94 * @ev: the event itself
95 */
96 struct iio_detected_event_list {
97 struct list_head list;
98 struct iio_event_data ev;
99 };
100
101 /**
102 * struct iio_event_interface - chrdev interface for an event line
103 * @dev: device assocated with event interface
104 * @wait: wait queue to allow blocking reads of events
105 * @event_list_lock: mutex to protect the list of detected events
106 * @det_events: list of detected events
107 * @max_events: maximum number of events before new ones are dropped
108 * @current_events: number of events in detected list
109 * @flags: file operations related flags including busy flag.
110 */
111 struct iio_event_interface {
112 wait_queue_head_t wait;
113 struct mutex event_list_lock;
114 struct list_head det_events;
115 int max_events;
116 int current_events;
117 struct list_head dev_attr_list;
118 unsigned long flags;
119 struct attribute_group group;
120 };
121
122 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
123 {
124 struct iio_event_interface *ev_int = indio_dev->event_interface;
125 struct iio_detected_event_list *ev;
126 int ret = 0;
127
128 /* Does anyone care? */
129 mutex_lock(&ev_int->event_list_lock);
130 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
131 if (ev_int->current_events == ev_int->max_events) {
132 mutex_unlock(&ev_int->event_list_lock);
133 return 0;
134 }
135 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
136 if (ev == NULL) {
137 ret = -ENOMEM;
138 mutex_unlock(&ev_int->event_list_lock);
139 goto error_ret;
140 }
141 ev->ev.id = ev_code;
142 ev->ev.timestamp = timestamp;
143
144 list_add_tail(&ev->list, &ev_int->det_events);
145 ev_int->current_events++;
146 mutex_unlock(&ev_int->event_list_lock);
147 wake_up_interruptible(&ev_int->wait);
148 } else
149 mutex_unlock(&ev_int->event_list_lock);
150
151 error_ret:
152 return ret;
153 }
154 EXPORT_SYMBOL(iio_push_event);
155
156 /* This turns up an awful lot */
157 ssize_t iio_read_const_attr(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160 {
161 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
162 }
163 EXPORT_SYMBOL(iio_read_const_attr);
164
165 static ssize_t iio_event_chrdev_read(struct file *filep,
166 char __user *buf,
167 size_t count,
168 loff_t *f_ps)
169 {
170 struct iio_event_interface *ev_int = filep->private_data;
171 struct iio_detected_event_list *el;
172 int ret;
173 size_t len;
174
175 mutex_lock(&ev_int->event_list_lock);
176 if (list_empty(&ev_int->det_events)) {
177 if (filep->f_flags & O_NONBLOCK) {
178 ret = -EAGAIN;
179 goto error_mutex_unlock;
180 }
181 mutex_unlock(&ev_int->event_list_lock);
182 /* Blocking on device; waiting for something to be there */
183 ret = wait_event_interruptible(ev_int->wait,
184 !list_empty(&ev_int
185 ->det_events));
186 if (ret)
187 goto error_ret;
188 /* Single access device so no one else can get the data */
189 mutex_lock(&ev_int->event_list_lock);
190 }
191
192 el = list_first_entry(&ev_int->det_events,
193 struct iio_detected_event_list,
194 list);
195 len = sizeof el->ev;
196 if (copy_to_user(buf, &(el->ev), len)) {
197 ret = -EFAULT;
198 goto error_mutex_unlock;
199 }
200 list_del(&el->list);
201 ev_int->current_events--;
202 mutex_unlock(&ev_int->event_list_lock);
203 kfree(el);
204
205 return len;
206
207 error_mutex_unlock:
208 mutex_unlock(&ev_int->event_list_lock);
209 error_ret:
210
211 return ret;
212 }
213
214 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
215 {
216 struct iio_event_interface *ev_int = filep->private_data;
217 struct iio_detected_event_list *el, *t;
218
219 mutex_lock(&ev_int->event_list_lock);
220 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
221 /*
222 * In order to maintain a clean state for reopening,
223 * clear out any awaiting events. The mask will prevent
224 * any new __iio_push_event calls running.
225 */
226 list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
227 list_del(&el->list);
228 kfree(el);
229 }
230 ev_int->current_events = 0;
231 mutex_unlock(&ev_int->event_list_lock);
232
233 return 0;
234 }
235
236 static const struct file_operations iio_event_chrdev_fileops = {
237 .read = iio_event_chrdev_read,
238 .release = iio_event_chrdev_release,
239 .owner = THIS_MODULE,
240 .llseek = noop_llseek,
241 };
242
243 static int iio_event_getfd(struct iio_dev *indio_dev)
244 {
245 struct iio_event_interface *ev_int = indio_dev->event_interface;
246 int fd;
247
248 if (ev_int == NULL)
249 return -ENODEV;
250
251 mutex_lock(&ev_int->event_list_lock);
252 if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
253 mutex_unlock(&ev_int->event_list_lock);
254 return -EBUSY;
255 }
256 mutex_unlock(&ev_int->event_list_lock);
257 fd = anon_inode_getfd("iio:event",
258 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
259 if (fd < 0) {
260 mutex_lock(&ev_int->event_list_lock);
261 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
262 mutex_unlock(&ev_int->event_list_lock);
263 }
264 return fd;
265 }
266
267 static int __init iio_init(void)
268 {
269 int ret;
270
271 /* Register sysfs bus */
272 ret = bus_register(&iio_bus_type);
273 if (ret < 0) {
274 printk(KERN_ERR
275 "%s could not register bus type\n",
276 __FILE__);
277 goto error_nothing;
278 }
279
280 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
281 if (ret < 0) {
282 printk(KERN_ERR "%s: failed to allocate char dev region\n",
283 __FILE__);
284 goto error_unregister_bus_type;
285 }
286
287 return 0;
288
289 error_unregister_bus_type:
290 bus_unregister(&iio_bus_type);
291 error_nothing:
292 return ret;
293 }
294
295 static void __exit iio_exit(void)
296 {
297 if (iio_devt)
298 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
299 bus_unregister(&iio_bus_type);
300 }
301
302 static ssize_t iio_read_channel_info(struct device *dev,
303 struct device_attribute *attr,
304 char *buf)
305 {
306 struct iio_dev *indio_dev = dev_get_drvdata(dev);
307 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
308 int val, val2;
309 int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
310 &val, &val2, this_attr->address);
311
312 if (ret < 0)
313 return ret;
314
315 if (ret == IIO_VAL_INT)
316 return sprintf(buf, "%d\n", val);
317 else if (ret == IIO_VAL_INT_PLUS_MICRO) {
318 if (val2 < 0)
319 return sprintf(buf, "-%d.%06u\n", val, -val2);
320 else
321 return sprintf(buf, "%d.%06u\n", val, val2);
322 } else if (ret == IIO_VAL_INT_PLUS_NANO) {
323 if (val2 < 0)
324 return sprintf(buf, "-%d.%09u\n", val, -val2);
325 else
326 return sprintf(buf, "%d.%09u\n", val, val2);
327 } else
328 return 0;
329 }
330
331 static ssize_t iio_write_channel_info(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf,
334 size_t len)
335 {
336 struct iio_dev *indio_dev = dev_get_drvdata(dev);
337 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
338 int ret, integer = 0, fract = 0, fract_mult = 100000;
339 bool integer_part = true, negative = false;
340
341 /* Assumes decimal - precision based on number of digits */
342 if (!indio_dev->info->write_raw)
343 return -EINVAL;
344
345 if (indio_dev->info->write_raw_get_fmt)
346 switch (indio_dev->info->write_raw_get_fmt(indio_dev,
347 this_attr->c, this_attr->address)) {
348 case IIO_VAL_INT_PLUS_MICRO:
349 fract_mult = 100000;
350 break;
351 case IIO_VAL_INT_PLUS_NANO:
352 fract_mult = 100000000;
353 break;
354 default:
355 return -EINVAL;
356 }
357
358 if (buf[0] == '-') {
359 negative = true;
360 buf++;
361 }
362
363 while (*buf) {
364 if ('0' <= *buf && *buf <= '9') {
365 if (integer_part)
366 integer = integer*10 + *buf - '0';
367 else {
368 fract += fract_mult*(*buf - '0');
369 if (fract_mult == 1)
370 break;
371 fract_mult /= 10;
372 }
373 } else if (*buf == '\n') {
374 if (*(buf + 1) == '\0')
375 break;
376 else
377 return -EINVAL;
378 } else if (*buf == '.') {
379 integer_part = false;
380 } else {
381 return -EINVAL;
382 }
383 buf++;
384 }
385 if (negative) {
386 if (integer)
387 integer = -integer;
388 else
389 fract = -fract;
390 }
391
392 ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
393 integer, fract, this_attr->address);
394 if (ret)
395 return ret;
396
397 return len;
398 }
399
400 static
401 int __iio_device_attr_init(struct device_attribute *dev_attr,
402 const char *postfix,
403 struct iio_chan_spec const *chan,
404 ssize_t (*readfunc)(struct device *dev,
405 struct device_attribute *attr,
406 char *buf),
407 ssize_t (*writefunc)(struct device *dev,
408 struct device_attribute *attr,
409 const char *buf,
410 size_t len),
411 bool generic)
412 {
413 int ret;
414 char *name_format, *full_postfix;
415 sysfs_attr_init(&dev_attr->attr);
416
417 /* Build up postfix of <extend_name>_<modifier>_postfix */
418 if (chan->modified) {
419 if (chan->extend_name)
420 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
421 iio_modifier_names[chan
422 ->channel2],
423 chan->extend_name,
424 postfix);
425 else
426 full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
427 iio_modifier_names[chan
428 ->channel2],
429 postfix);
430 } else {
431 if (chan->extend_name == NULL)
432 full_postfix = kstrdup(postfix, GFP_KERNEL);
433 else
434 full_postfix = kasprintf(GFP_KERNEL,
435 "%s_%s",
436 chan->extend_name,
437 postfix);
438 }
439 if (full_postfix == NULL) {
440 ret = -ENOMEM;
441 goto error_ret;
442 }
443
444 if (chan->differential) { /* Differential can not have modifier */
445 if (generic)
446 name_format
447 = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
448 iio_direction[chan->output],
449 iio_chan_type_name_spec[chan->type],
450 iio_chan_type_name_spec[chan->type],
451 full_postfix);
452 else if (chan->indexed)
453 name_format
454 = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
455 iio_direction[chan->output],
456 iio_chan_type_name_spec[chan->type],
457 chan->channel,
458 iio_chan_type_name_spec[chan->type],
459 chan->channel2,
460 full_postfix);
461 else {
462 WARN_ON("Differential channels must be indexed\n");
463 ret = -EINVAL;
464 goto error_free_full_postfix;
465 }
466 } else { /* Single ended */
467 if (generic)
468 name_format
469 = kasprintf(GFP_KERNEL, "%s_%s_%s",
470 iio_direction[chan->output],
471 iio_chan_type_name_spec[chan->type],
472 full_postfix);
473 else if (chan->indexed)
474 name_format
475 = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
476 iio_direction[chan->output],
477 iio_chan_type_name_spec[chan->type],
478 chan->channel,
479 full_postfix);
480 else
481 name_format
482 = kasprintf(GFP_KERNEL, "%s_%s_%s",
483 iio_direction[chan->output],
484 iio_chan_type_name_spec[chan->type],
485 full_postfix);
486 }
487 if (name_format == NULL) {
488 ret = -ENOMEM;
489 goto error_free_full_postfix;
490 }
491 dev_attr->attr.name = kasprintf(GFP_KERNEL,
492 name_format,
493 chan->channel,
494 chan->channel2);
495 if (dev_attr->attr.name == NULL) {
496 ret = -ENOMEM;
497 goto error_free_name_format;
498 }
499
500 if (readfunc) {
501 dev_attr->attr.mode |= S_IRUGO;
502 dev_attr->show = readfunc;
503 }
504
505 if (writefunc) {
506 dev_attr->attr.mode |= S_IWUSR;
507 dev_attr->store = writefunc;
508 }
509 kfree(name_format);
510 kfree(full_postfix);
511
512 return 0;
513
514 error_free_name_format:
515 kfree(name_format);
516 error_free_full_postfix:
517 kfree(full_postfix);
518 error_ret:
519 return ret;
520 }
521
522 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
523 {
524 kfree(dev_attr->attr.name);
525 }
526
527 int __iio_add_chan_devattr(const char *postfix,
528 struct iio_chan_spec const *chan,
529 ssize_t (*readfunc)(struct device *dev,
530 struct device_attribute *attr,
531 char *buf),
532 ssize_t (*writefunc)(struct device *dev,
533 struct device_attribute *attr,
534 const char *buf,
535 size_t len),
536 u64 mask,
537 bool generic,
538 struct device *dev,
539 struct list_head *attr_list)
540 {
541 int ret;
542 struct iio_dev_attr *iio_attr, *t;
543
544 iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
545 if (iio_attr == NULL) {
546 ret = -ENOMEM;
547 goto error_ret;
548 }
549 ret = __iio_device_attr_init(&iio_attr->dev_attr,
550 postfix, chan,
551 readfunc, writefunc, generic);
552 if (ret)
553 goto error_iio_dev_attr_free;
554 iio_attr->c = chan;
555 iio_attr->address = mask;
556 list_for_each_entry(t, attr_list, l)
557 if (strcmp(t->dev_attr.attr.name,
558 iio_attr->dev_attr.attr.name) == 0) {
559 if (!generic)
560 dev_err(dev, "tried to double register : %s\n",
561 t->dev_attr.attr.name);
562 ret = -EBUSY;
563 goto error_device_attr_deinit;
564 }
565 list_add(&iio_attr->l, attr_list);
566
567 return 0;
568
569 error_device_attr_deinit:
570 __iio_device_attr_deinit(&iio_attr->dev_attr);
571 error_iio_dev_attr_free:
572 kfree(iio_attr);
573 error_ret:
574 return ret;
575 }
576
577 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
578 struct iio_chan_spec const *chan)
579 {
580 int ret, i, attrcount = 0;
581
582 if (chan->channel < 0)
583 return 0;
584
585 ret = __iio_add_chan_devattr(iio_data_type_name[chan->processed_val],
586 chan,
587 &iio_read_channel_info,
588 (chan->output ?
589 &iio_write_channel_info : NULL),
590 0,
591 0,
592 &indio_dev->dev,
593 &indio_dev->channel_attr_list);
594 if (ret)
595 goto error_ret;
596 attrcount++;
597
598 for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
599 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
600 chan,
601 &iio_read_channel_info,
602 &iio_write_channel_info,
603 (1 << i),
604 !(i%2),
605 &indio_dev->dev,
606 &indio_dev->channel_attr_list);
607 if (ret == -EBUSY && (i%2 == 0)) {
608 ret = 0;
609 continue;
610 }
611 if (ret < 0)
612 goto error_ret;
613 attrcount++;
614 }
615 ret = attrcount;
616 error_ret:
617 return ret;
618 }
619
620 static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev,
621 struct iio_dev_attr *p)
622 {
623 kfree(p->dev_attr.attr.name);
624 kfree(p);
625 }
626
627 static ssize_t iio_show_dev_name(struct device *dev,
628 struct device_attribute *attr,
629 char *buf)
630 {
631 struct iio_dev *indio_dev = dev_get_drvdata(dev);
632 return sprintf(buf, "%s\n", indio_dev->name);
633 }
634
635 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
636
637 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
638 {
639 int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
640 struct iio_dev_attr *p, *n;
641 struct attribute **attr;
642
643 /* First count elements in any existing group */
644 if (indio_dev->info->attrs) {
645 attr = indio_dev->info->attrs->attrs;
646 while (*attr++ != NULL)
647 attrcount_orig++;
648 }
649 attrcount = attrcount_orig;
650 /*
651 * New channel registration method - relies on the fact a group does
652 * not need to be initialized if it is name is NULL.
653 */
654 INIT_LIST_HEAD(&indio_dev->channel_attr_list);
655 if (indio_dev->channels)
656 for (i = 0; i < indio_dev->num_channels; i++) {
657 ret = iio_device_add_channel_sysfs(indio_dev,
658 &indio_dev
659 ->channels[i]);
660 if (ret < 0)
661 goto error_clear_attrs;
662 attrcount += ret;
663 }
664
665 if (indio_dev->name)
666 attrcount++;
667
668 indio_dev->chan_attr_group.attrs
669 = kzalloc(sizeof(indio_dev->chan_attr_group.attrs[0])*
670 (attrcount + 1),
671 GFP_KERNEL);
672 if (indio_dev->chan_attr_group.attrs == NULL) {
673 ret = -ENOMEM;
674 goto error_clear_attrs;
675 }
676 /* Copy across original attributes */
677 if (indio_dev->info->attrs)
678 memcpy(indio_dev->chan_attr_group.attrs,
679 indio_dev->info->attrs->attrs,
680 sizeof(indio_dev->chan_attr_group.attrs[0])
681 *attrcount_orig);
682 attrn = attrcount_orig;
683 /* Add all elements from the list. */
684 list_for_each_entry(p, &indio_dev->channel_attr_list, l)
685 indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
686 if (indio_dev->name)
687 indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
688
689 indio_dev->groups[indio_dev->groupcounter++] =
690 &indio_dev->chan_attr_group;
691
692 return 0;
693
694 error_clear_attrs:
695 list_for_each_entry_safe(p, n,
696 &indio_dev->channel_attr_list, l) {
697 list_del(&p->l);
698 iio_device_remove_and_free_read_attr(indio_dev, p);
699 }
700
701 return ret;
702 }
703
704 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
705 {
706
707 struct iio_dev_attr *p, *n;
708
709 list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) {
710 list_del(&p->l);
711 iio_device_remove_and_free_read_attr(indio_dev, p);
712 }
713 kfree(indio_dev->chan_attr_group.attrs);
714 }
715
716 static const char * const iio_ev_type_text[] = {
717 [IIO_EV_TYPE_THRESH] = "thresh",
718 [IIO_EV_TYPE_MAG] = "mag",
719 [IIO_EV_TYPE_ROC] = "roc",
720 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
721 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
722 };
723
724 static const char * const iio_ev_dir_text[] = {
725 [IIO_EV_DIR_EITHER] = "either",
726 [IIO_EV_DIR_RISING] = "rising",
727 [IIO_EV_DIR_FALLING] = "falling"
728 };
729
730 static ssize_t iio_ev_state_store(struct device *dev,
731 struct device_attribute *attr,
732 const char *buf,
733 size_t len)
734 {
735 struct iio_dev *indio_dev = dev_get_drvdata(dev);
736 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
737 int ret;
738 bool val;
739
740 ret = strtobool(buf, &val);
741 if (ret < 0)
742 return ret;
743
744 ret = indio_dev->info->write_event_config(indio_dev,
745 this_attr->address,
746 val);
747 return (ret < 0) ? ret : len;
748 }
749
750 static ssize_t iio_ev_state_show(struct device *dev,
751 struct device_attribute *attr,
752 char *buf)
753 {
754 struct iio_dev *indio_dev = dev_get_drvdata(dev);
755 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
756 int val = indio_dev->info->read_event_config(indio_dev,
757 this_attr->address);
758
759 if (val < 0)
760 return val;
761 else
762 return sprintf(buf, "%d\n", val);
763 }
764
765 static ssize_t iio_ev_value_show(struct device *dev,
766 struct device_attribute *attr,
767 char *buf)
768 {
769 struct iio_dev *indio_dev = dev_get_drvdata(dev);
770 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
771 int val, ret;
772
773 ret = indio_dev->info->read_event_value(indio_dev,
774 this_attr->address, &val);
775 if (ret < 0)
776 return ret;
777
778 return sprintf(buf, "%d\n", val);
779 }
780
781 static ssize_t iio_ev_value_store(struct device *dev,
782 struct device_attribute *attr,
783 const char *buf,
784 size_t len)
785 {
786 struct iio_dev *indio_dev = dev_get_drvdata(dev);
787 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
788 unsigned long val;
789 int ret;
790
791 ret = strict_strtoul(buf, 10, &val);
792 if (ret)
793 return ret;
794
795 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
796 val);
797 if (ret < 0)
798 return ret;
799
800 return len;
801 }
802
803 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
804 struct iio_chan_spec const *chan)
805 {
806 int ret = 0, i, attrcount = 0;
807 u64 mask = 0;
808 char *postfix;
809 if (!chan->event_mask)
810 return 0;
811
812 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
813 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
814 iio_ev_type_text[i/IIO_EV_DIR_MAX],
815 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
816 if (postfix == NULL) {
817 ret = -ENOMEM;
818 goto error_ret;
819 }
820 if (chan->modified)
821 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
822 i/IIO_EV_DIR_MAX,
823 i%IIO_EV_DIR_MAX);
824 else if (chan->differential)
825 mask = IIO_EVENT_CODE(chan->type,
826 0, 0,
827 i%IIO_EV_DIR_MAX,
828 i/IIO_EV_DIR_MAX,
829 0,
830 chan->channel,
831 chan->channel2);
832 else
833 mask = IIO_UNMOD_EVENT_CODE(chan->type,
834 chan->channel,
835 i/IIO_EV_DIR_MAX,
836 i%IIO_EV_DIR_MAX);
837
838 ret = __iio_add_chan_devattr(postfix,
839 chan,
840 &iio_ev_state_show,
841 iio_ev_state_store,
842 mask,
843 0,
844 &indio_dev->dev,
845 &indio_dev->event_interface->
846 dev_attr_list);
847 kfree(postfix);
848 if (ret)
849 goto error_ret;
850 attrcount++;
851 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
852 iio_ev_type_text[i/IIO_EV_DIR_MAX],
853 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
854 if (postfix == NULL) {
855 ret = -ENOMEM;
856 goto error_ret;
857 }
858 ret = __iio_add_chan_devattr(postfix, chan,
859 iio_ev_value_show,
860 iio_ev_value_store,
861 mask,
862 0,
863 &indio_dev->dev,
864 &indio_dev->event_interface->
865 dev_attr_list);
866 kfree(postfix);
867 if (ret)
868 goto error_ret;
869 attrcount++;
870 }
871 ret = attrcount;
872 error_ret:
873 return ret;
874 }
875
876 static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
877 {
878 struct iio_dev_attr *p, *n;
879 list_for_each_entry_safe(p, n,
880 &indio_dev->event_interface->
881 dev_attr_list, l) {
882 kfree(p->dev_attr.attr.name);
883 kfree(p);
884 }
885 }
886
887 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
888 {
889 int j, ret, attrcount = 0;
890
891 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
892 /* Dynically created from the channels array */
893 for (j = 0; j < indio_dev->num_channels; j++) {
894 ret = iio_device_add_event_sysfs(indio_dev,
895 &indio_dev->channels[j]);
896 if (ret < 0)
897 goto error_clear_attrs;
898 attrcount += ret;
899 }
900 return attrcount;
901
902 error_clear_attrs:
903 __iio_remove_event_config_attrs(indio_dev);
904
905 return ret;
906 }
907
908 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
909 {
910 int j;
911
912 for (j = 0; j < indio_dev->num_channels; j++)
913 if (indio_dev->channels[j].event_mask != 0)
914 return true;
915 return false;
916 }
917
918 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
919 {
920 mutex_init(&ev_int->event_list_lock);
921 /* discussion point - make this variable? */
922 ev_int->max_events = 10;
923 ev_int->current_events = 0;
924 INIT_LIST_HEAD(&ev_int->det_events);
925 init_waitqueue_head(&ev_int->wait);
926 }
927
928 static const char *iio_event_group_name = "events";
929 static int iio_device_register_eventset(struct iio_dev *indio_dev)
930 {
931 struct iio_dev_attr *p;
932 int ret = 0, attrcount_orig = 0, attrcount, attrn;
933 struct attribute **attr;
934
935 if (!(indio_dev->info->event_attrs ||
936 iio_check_for_dynamic_events(indio_dev)))
937 return 0;
938
939 indio_dev->event_interface =
940 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
941 if (indio_dev->event_interface == NULL) {
942 ret = -ENOMEM;
943 goto error_ret;
944 }
945
946 iio_setup_ev_int(indio_dev->event_interface);
947 if (indio_dev->info->event_attrs != NULL) {
948 attr = indio_dev->info->event_attrs->attrs;
949 while (*attr++ != NULL)
950 attrcount_orig++;
951 }
952 attrcount = attrcount_orig;
953 if (indio_dev->channels) {
954 ret = __iio_add_event_config_attrs(indio_dev);
955 if (ret < 0)
956 goto error_free_setup_event_lines;
957 attrcount += ret;
958 }
959
960 indio_dev->event_interface->group.name = iio_event_group_name;
961 indio_dev->event_interface->group.attrs =
962 kzalloc(sizeof(indio_dev->event_interface->group.attrs[0])
963 *(attrcount + 1),
964 GFP_KERNEL);
965 if (indio_dev->event_interface->group.attrs == NULL) {
966 ret = -ENOMEM;
967 goto error_free_setup_event_lines;
968 }
969 if (indio_dev->info->event_attrs)
970 memcpy(indio_dev->event_interface->group.attrs,
971 indio_dev->info->event_attrs->attrs,
972 sizeof(indio_dev->event_interface->group.attrs[0])
973 *attrcount_orig);
974 attrn = attrcount_orig;
975 /* Add all elements from the list. */
976 list_for_each_entry(p,
977 &indio_dev->event_interface->dev_attr_list,
978 l)
979 indio_dev->event_interface->group.attrs[attrn++] =
980 &p->dev_attr.attr;
981 indio_dev->groups[indio_dev->groupcounter++] =
982 &indio_dev->event_interface->group;
983
984 return 0;
985
986 error_free_setup_event_lines:
987 __iio_remove_event_config_attrs(indio_dev);
988 kfree(indio_dev->event_interface);
989 error_ret:
990
991 return ret;
992 }
993
994 static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
995 {
996 if (indio_dev->event_interface == NULL)
997 return;
998 __iio_remove_event_config_attrs(indio_dev);
999 kfree(indio_dev->event_interface->group.attrs);
1000 kfree(indio_dev->event_interface);
1001 }
1002
1003 static void iio_dev_release(struct device *device)
1004 {
1005 struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
1006 cdev_del(&indio_dev->chrdev);
1007 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1008 iio_device_unregister_trigger_consumer(indio_dev);
1009 iio_device_unregister_eventset(indio_dev);
1010 iio_device_unregister_sysfs(indio_dev);
1011 }
1012
1013 static struct device_type iio_dev_type = {
1014 .name = "iio_device",
1015 .release = iio_dev_release,
1016 };
1017
1018 struct iio_dev *iio_allocate_device(int sizeof_priv)
1019 {
1020 struct iio_dev *dev;
1021 size_t alloc_size;
1022
1023 alloc_size = sizeof(struct iio_dev);
1024 if (sizeof_priv) {
1025 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1026 alloc_size += sizeof_priv;
1027 }
1028 /* ensure 32-byte alignment of whole construct ? */
1029 alloc_size += IIO_ALIGN - 1;
1030
1031 dev = kzalloc(alloc_size, GFP_KERNEL);
1032
1033 if (dev) {
1034 dev->dev.groups = dev->groups;
1035 dev->dev.type = &iio_dev_type;
1036 dev->dev.bus = &iio_bus_type;
1037 device_initialize(&dev->dev);
1038 dev_set_drvdata(&dev->dev, (void *)dev);
1039 mutex_init(&dev->mlock);
1040
1041 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
1042 if (dev->id < 0) {
1043 /* cannot use a dev_err as the name isn't available */
1044 printk(KERN_ERR "Failed to get id\n");
1045 kfree(dev);
1046 return NULL;
1047 }
1048 dev_set_name(&dev->dev, "iio:device%d", dev->id);
1049 }
1050
1051 return dev;
1052 }
1053 EXPORT_SYMBOL(iio_allocate_device);
1054
1055 void iio_free_device(struct iio_dev *dev)
1056 {
1057 if (dev) {
1058 ida_simple_remove(&iio_ida, dev->id);
1059 kfree(dev);
1060 }
1061 }
1062 EXPORT_SYMBOL(iio_free_device);
1063
1064 /**
1065 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1066 **/
1067 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1068 {
1069 struct iio_dev *indio_dev = container_of(inode->i_cdev,
1070 struct iio_dev, chrdev);
1071 filp->private_data = indio_dev;
1072
1073 return iio_chrdev_buffer_open(indio_dev);
1074 }
1075
1076 /**
1077 * iio_chrdev_release() - chrdev file close buffer access and ioctls
1078 **/
1079 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1080 {
1081 iio_chrdev_buffer_release(container_of(inode->i_cdev,
1082 struct iio_dev, chrdev));
1083 return 0;
1084 }
1085
1086 /* Somewhat of a cross file organization violation - ioctls here are actually
1087 * event related */
1088 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1089 {
1090 struct iio_dev *indio_dev = filp->private_data;
1091 int __user *ip = (int __user *)arg;
1092 int fd;
1093
1094 if (cmd == IIO_GET_EVENT_FD_IOCTL) {
1095 fd = iio_event_getfd(indio_dev);
1096 if (copy_to_user(ip, &fd, sizeof(fd)))
1097 return -EFAULT;
1098 return 0;
1099 }
1100 return -EINVAL;
1101 }
1102
1103 static const struct file_operations iio_buffer_fileops = {
1104 .read = iio_buffer_read_first_n_outer_addr,
1105 .release = iio_chrdev_release,
1106 .open = iio_chrdev_open,
1107 .poll = iio_buffer_poll_addr,
1108 .owner = THIS_MODULE,
1109 .llseek = noop_llseek,
1110 .unlocked_ioctl = iio_ioctl,
1111 .compat_ioctl = iio_ioctl,
1112 };
1113
1114 int iio_device_register(struct iio_dev *indio_dev)
1115 {
1116 int ret;
1117
1118 /* configure elements for the chrdev */
1119 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
1120
1121 ret = iio_device_register_sysfs(indio_dev);
1122 if (ret) {
1123 dev_err(indio_dev->dev.parent,
1124 "Failed to register sysfs interfaces\n");
1125 goto error_ret;
1126 }
1127 ret = iio_device_register_eventset(indio_dev);
1128 if (ret) {
1129 dev_err(indio_dev->dev.parent,
1130 "Failed to register event set\n");
1131 goto error_free_sysfs;
1132 }
1133 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1134 iio_device_register_trigger_consumer(indio_dev);
1135
1136 ret = device_add(&indio_dev->dev);
1137 if (ret < 0)
1138 goto error_unreg_eventset;
1139 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
1140 indio_dev->chrdev.owner = indio_dev->info->driver_module;
1141 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
1142 if (ret < 0)
1143 goto error_del_device;
1144 return 0;
1145
1146 error_del_device:
1147 device_del(&indio_dev->dev);
1148 error_unreg_eventset:
1149 iio_device_unregister_eventset(indio_dev);
1150 error_free_sysfs:
1151 iio_device_unregister_sysfs(indio_dev);
1152 error_ret:
1153 return ret;
1154 }
1155 EXPORT_SYMBOL(iio_device_register);
1156
1157 void iio_device_unregister(struct iio_dev *indio_dev)
1158 {
1159 device_unregister(&indio_dev->dev);
1160 }
1161 EXPORT_SYMBOL(iio_device_unregister);
1162 subsys_initcall(iio_init);
1163 module_exit(iio_exit);
1164
1165 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1166 MODULE_DESCRIPTION("Industrial I/O core");
1167 MODULE_LICENSE("GPL");