]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/staging/iio/industrialio-core.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / iio / industrialio-core.c
1 /* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Based on elements of hwmon and input subsystems.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24 #include <linux/anon_inodes.h>
25 #include "iio.h"
26 #include "iio_core.h"
27 #include "iio_core_trigger.h"
28 #include "chrdev.h"
29 #include "sysfs.h"
30
31 /* IDA to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
33
34 static dev_t iio_devt;
35
36 #define IIO_DEV_MAX 256
37 struct bus_type iio_bus_type = {
38 .name = "iio",
39 };
40 EXPORT_SYMBOL(iio_bus_type);
41
42 static const char * const iio_data_type_name[] = {
43 [IIO_RAW] = "raw",
44 [IIO_PROCESSED] = "input",
45 };
46
47 static const char * const iio_direction[] = {
48 [0] = "in",
49 [1] = "out",
50 };
51
52 static const char * const iio_chan_type_name_spec[] = {
53 [IIO_VOLTAGE] = "voltage",
54 [IIO_CURRENT] = "current",
55 [IIO_POWER] = "power",
56 [IIO_ACCEL] = "accel",
57 [IIO_ANGL_VEL] = "anglvel",
58 [IIO_MAGN] = "magn",
59 [IIO_LIGHT] = "illuminance",
60 [IIO_INTENSITY] = "intensity",
61 [IIO_PROXIMITY] = "proximity",
62 [IIO_TEMP] = "temp",
63 [IIO_INCLI] = "incli",
64 [IIO_ROT] = "rot",
65 [IIO_ANGL] = "angl",
66 [IIO_TIMESTAMP] = "timestamp",
67 [IIO_CAPACITANCE] = "capacitance",
68 };
69
70 static const char * const iio_modifier_names[] = {
71 [IIO_MOD_X] = "x",
72 [IIO_MOD_Y] = "y",
73 [IIO_MOD_Z] = "z",
74 [IIO_MOD_LIGHT_BOTH] = "both",
75 [IIO_MOD_LIGHT_IR] = "ir",
76 };
77
78 /* relies on pairs of these shared then separate */
79 static const char * const iio_chan_info_postfix[] = {
80 [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
81 [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
82 [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
83 [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
84 [IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
85 [IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
86 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
87 = "quadrature_correction_raw",
88 [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
89 };
90
91 /**
92 * struct iio_detected_event_list - list element for events that have occurred
93 * @list: linked list header
94 * @ev: the event itself
95 */
96 struct iio_detected_event_list {
97 struct list_head list;
98 struct iio_event_data ev;
99 };
100
101 /**
102 * struct iio_event_interface - chrdev interface for an event line
103 * @dev: device assocated with event interface
104 * @wait: wait queue to allow blocking reads of events
105 * @event_list_lock: mutex to protect the list of detected events
106 * @det_events: list of detected events
107 * @max_events: maximum number of events before new ones are dropped
108 * @current_events: number of events in detected list
109 * @flags: file operations related flags including busy flag.
110 */
111 struct iio_event_interface {
112 wait_queue_head_t wait;
113 struct mutex event_list_lock;
114 struct list_head det_events;
115 int max_events;
116 int current_events;
117 struct list_head dev_attr_list;
118 unsigned long flags;
119 struct attribute_group group;
120 };
121
122 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
123 {
124 struct iio_event_interface *ev_int = indio_dev->event_interface;
125 struct iio_detected_event_list *ev;
126 int ret = 0;
127
128 /* Does anyone care? */
129 mutex_lock(&ev_int->event_list_lock);
130 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
131 if (ev_int->current_events == ev_int->max_events) {
132 mutex_unlock(&ev_int->event_list_lock);
133 return 0;
134 }
135 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
136 if (ev == NULL) {
137 ret = -ENOMEM;
138 mutex_unlock(&ev_int->event_list_lock);
139 goto error_ret;
140 }
141 ev->ev.id = ev_code;
142 ev->ev.timestamp = timestamp;
143
144 list_add_tail(&ev->list, &ev_int->det_events);
145 ev_int->current_events++;
146 mutex_unlock(&ev_int->event_list_lock);
147 wake_up_interruptible(&ev_int->wait);
148 } else
149 mutex_unlock(&ev_int->event_list_lock);
150
151 error_ret:
152 return ret;
153 }
154 EXPORT_SYMBOL(iio_push_event);
155
156 /* This turns up an awful lot */
157 ssize_t iio_read_const_attr(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160 {
161 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
162 }
163 EXPORT_SYMBOL(iio_read_const_attr);
164
165 static ssize_t iio_event_chrdev_read(struct file *filep,
166 char __user *buf,
167 size_t count,
168 loff_t *f_ps)
169 {
170 struct iio_event_interface *ev_int = filep->private_data;
171 struct iio_detected_event_list *el;
172 int ret;
173 size_t len;
174
175 mutex_lock(&ev_int->event_list_lock);
176 if (list_empty(&ev_int->det_events)) {
177 if (filep->f_flags & O_NONBLOCK) {
178 ret = -EAGAIN;
179 goto error_mutex_unlock;
180 }
181 mutex_unlock(&ev_int->event_list_lock);
182 /* Blocking on device; waiting for something to be there */
183 ret = wait_event_interruptible(ev_int->wait,
184 !list_empty(&ev_int
185 ->det_events));
186 if (ret)
187 goto error_ret;
188 /* Single access device so no one else can get the data */
189 mutex_lock(&ev_int->event_list_lock);
190 }
191
192 el = list_first_entry(&ev_int->det_events,
193 struct iio_detected_event_list,
194 list);
195 len = sizeof el->ev;
196 if (copy_to_user(buf, &(el->ev), len)) {
197 ret = -EFAULT;
198 goto error_mutex_unlock;
199 }
200 list_del(&el->list);
201 ev_int->current_events--;
202 mutex_unlock(&ev_int->event_list_lock);
203 kfree(el);
204
205 return len;
206
207 error_mutex_unlock:
208 mutex_unlock(&ev_int->event_list_lock);
209 error_ret:
210
211 return ret;
212 }
213
214 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
215 {
216 struct iio_event_interface *ev_int = filep->private_data;
217 struct iio_detected_event_list *el, *t;
218
219 mutex_lock(&ev_int->event_list_lock);
220 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
221 /*
222 * In order to maintain a clean state for reopening,
223 * clear out any awaiting events. The mask will prevent
224 * any new __iio_push_event calls running.
225 */
226 list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
227 list_del(&el->list);
228 kfree(el);
229 }
230 ev_int->current_events = 0;
231 mutex_unlock(&ev_int->event_list_lock);
232
233 return 0;
234 }
235
236 static const struct file_operations iio_event_chrdev_fileops = {
237 .read = iio_event_chrdev_read,
238 .release = iio_event_chrdev_release,
239 .owner = THIS_MODULE,
240 .llseek = noop_llseek,
241 };
242
243 static int iio_event_getfd(struct iio_dev *indio_dev)
244 {
245 if (indio_dev->event_interface == NULL)
246 return -ENODEV;
247
248 mutex_lock(&indio_dev->event_interface->event_list_lock);
249 if (test_and_set_bit(IIO_BUSY_BIT_POS,
250 &indio_dev->event_interface->flags)) {
251 mutex_unlock(&indio_dev->event_interface->event_list_lock);
252 return -EBUSY;
253 }
254 mutex_unlock(&indio_dev->event_interface->event_list_lock);
255 return anon_inode_getfd("iio:event",
256 &iio_event_chrdev_fileops,
257 indio_dev->event_interface, O_RDONLY);
258 }
259
260 static int __init iio_init(void)
261 {
262 int ret;
263
264 /* Register sysfs bus */
265 ret = bus_register(&iio_bus_type);
266 if (ret < 0) {
267 printk(KERN_ERR
268 "%s could not register bus type\n",
269 __FILE__);
270 goto error_nothing;
271 }
272
273 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
274 if (ret < 0) {
275 printk(KERN_ERR "%s: failed to allocate char dev region\n",
276 __FILE__);
277 goto error_unregister_bus_type;
278 }
279
280 return 0;
281
282 error_unregister_bus_type:
283 bus_unregister(&iio_bus_type);
284 error_nothing:
285 return ret;
286 }
287
288 static void __exit iio_exit(void)
289 {
290 if (iio_devt)
291 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
292 bus_unregister(&iio_bus_type);
293 }
294
295 static ssize_t iio_read_channel_info(struct device *dev,
296 struct device_attribute *attr,
297 char *buf)
298 {
299 struct iio_dev *indio_dev = dev_get_drvdata(dev);
300 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
301 int val, val2;
302 int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
303 &val, &val2, this_attr->address);
304
305 if (ret < 0)
306 return ret;
307
308 if (ret == IIO_VAL_INT)
309 return sprintf(buf, "%d\n", val);
310 else if (ret == IIO_VAL_INT_PLUS_MICRO) {
311 if (val2 < 0)
312 return sprintf(buf, "-%d.%06u\n", val, -val2);
313 else
314 return sprintf(buf, "%d.%06u\n", val, val2);
315 } else if (ret == IIO_VAL_INT_PLUS_NANO) {
316 if (val2 < 0)
317 return sprintf(buf, "-%d.%09u\n", val, -val2);
318 else
319 return sprintf(buf, "%d.%09u\n", val, val2);
320 } else
321 return 0;
322 }
323
324 static ssize_t iio_write_channel_info(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf,
327 size_t len)
328 {
329 struct iio_dev *indio_dev = dev_get_drvdata(dev);
330 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
331 int ret, integer = 0, fract = 0, fract_mult = 100000;
332 bool integer_part = true, negative = false;
333
334 /* Assumes decimal - precision based on number of digits */
335 if (!indio_dev->info->write_raw)
336 return -EINVAL;
337
338 if (indio_dev->info->write_raw_get_fmt)
339 switch (indio_dev->info->write_raw_get_fmt(indio_dev,
340 this_attr->c, this_attr->address)) {
341 case IIO_VAL_INT_PLUS_MICRO:
342 fract_mult = 100000;
343 break;
344 case IIO_VAL_INT_PLUS_NANO:
345 fract_mult = 100000000;
346 break;
347 default:
348 return -EINVAL;
349 }
350
351 if (buf[0] == '-') {
352 negative = true;
353 buf++;
354 }
355
356 while (*buf) {
357 if ('0' <= *buf && *buf <= '9') {
358 if (integer_part)
359 integer = integer*10 + *buf - '0';
360 else {
361 fract += fract_mult*(*buf - '0');
362 if (fract_mult == 1)
363 break;
364 fract_mult /= 10;
365 }
366 } else if (*buf == '\n') {
367 if (*(buf + 1) == '\0')
368 break;
369 else
370 return -EINVAL;
371 } else if (*buf == '.') {
372 integer_part = false;
373 } else {
374 return -EINVAL;
375 }
376 buf++;
377 }
378 if (negative) {
379 if (integer)
380 integer = -integer;
381 else
382 fract = -fract;
383 }
384
385 ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
386 integer, fract, this_attr->address);
387 if (ret)
388 return ret;
389
390 return len;
391 }
392
393 static
394 int __iio_device_attr_init(struct device_attribute *dev_attr,
395 const char *postfix,
396 struct iio_chan_spec const *chan,
397 ssize_t (*readfunc)(struct device *dev,
398 struct device_attribute *attr,
399 char *buf),
400 ssize_t (*writefunc)(struct device *dev,
401 struct device_attribute *attr,
402 const char *buf,
403 size_t len),
404 bool generic)
405 {
406 int ret;
407 char *name_format, *full_postfix;
408 sysfs_attr_init(&dev_attr->attr);
409
410 /* Build up postfix of <extend_name>_<modifier>_postfix */
411 if (chan->modified) {
412 if (chan->extend_name)
413 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
414 iio_modifier_names[chan
415 ->channel2],
416 chan->extend_name,
417 postfix);
418 else
419 full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
420 iio_modifier_names[chan
421 ->channel2],
422 postfix);
423 } else {
424 if (chan->extend_name == NULL)
425 full_postfix = kstrdup(postfix, GFP_KERNEL);
426 else
427 full_postfix = kasprintf(GFP_KERNEL,
428 "%s_%s",
429 chan->extend_name,
430 postfix);
431 }
432 if (full_postfix == NULL) {
433 ret = -ENOMEM;
434 goto error_ret;
435 }
436
437 if (chan->differential) { /* Differential can not have modifier */
438 if (generic)
439 name_format
440 = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
441 iio_direction[chan->output],
442 iio_chan_type_name_spec[chan->type],
443 iio_chan_type_name_spec[chan->type],
444 full_postfix);
445 else if (chan->indexed)
446 name_format
447 = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
448 iio_direction[chan->output],
449 iio_chan_type_name_spec[chan->type],
450 chan->channel,
451 iio_chan_type_name_spec[chan->type],
452 chan->channel2,
453 full_postfix);
454 else {
455 WARN_ON("Differential channels must be indexed\n");
456 ret = -EINVAL;
457 goto error_free_full_postfix;
458 }
459 } else { /* Single ended */
460 if (generic)
461 name_format
462 = kasprintf(GFP_KERNEL, "%s_%s_%s",
463 iio_direction[chan->output],
464 iio_chan_type_name_spec[chan->type],
465 full_postfix);
466 else if (chan->indexed)
467 name_format
468 = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
469 iio_direction[chan->output],
470 iio_chan_type_name_spec[chan->type],
471 chan->channel,
472 full_postfix);
473 else
474 name_format
475 = kasprintf(GFP_KERNEL, "%s_%s_%s",
476 iio_direction[chan->output],
477 iio_chan_type_name_spec[chan->type],
478 full_postfix);
479 }
480 if (name_format == NULL) {
481 ret = -ENOMEM;
482 goto error_free_full_postfix;
483 }
484 dev_attr->attr.name = kasprintf(GFP_KERNEL,
485 name_format,
486 chan->channel,
487 chan->channel2);
488 if (dev_attr->attr.name == NULL) {
489 ret = -ENOMEM;
490 goto error_free_name_format;
491 }
492
493 if (readfunc) {
494 dev_attr->attr.mode |= S_IRUGO;
495 dev_attr->show = readfunc;
496 }
497
498 if (writefunc) {
499 dev_attr->attr.mode |= S_IWUSR;
500 dev_attr->store = writefunc;
501 }
502 kfree(name_format);
503 kfree(full_postfix);
504
505 return 0;
506
507 error_free_name_format:
508 kfree(name_format);
509 error_free_full_postfix:
510 kfree(full_postfix);
511 error_ret:
512 return ret;
513 }
514
515 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
516 {
517 kfree(dev_attr->attr.name);
518 }
519
520 int __iio_add_chan_devattr(const char *postfix,
521 struct iio_chan_spec const *chan,
522 ssize_t (*readfunc)(struct device *dev,
523 struct device_attribute *attr,
524 char *buf),
525 ssize_t (*writefunc)(struct device *dev,
526 struct device_attribute *attr,
527 const char *buf,
528 size_t len),
529 u64 mask,
530 bool generic,
531 struct device *dev,
532 struct list_head *attr_list)
533 {
534 int ret;
535 struct iio_dev_attr *iio_attr, *t;
536
537 iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
538 if (iio_attr == NULL) {
539 ret = -ENOMEM;
540 goto error_ret;
541 }
542 ret = __iio_device_attr_init(&iio_attr->dev_attr,
543 postfix, chan,
544 readfunc, writefunc, generic);
545 if (ret)
546 goto error_iio_dev_attr_free;
547 iio_attr->c = chan;
548 iio_attr->address = mask;
549 list_for_each_entry(t, attr_list, l)
550 if (strcmp(t->dev_attr.attr.name,
551 iio_attr->dev_attr.attr.name) == 0) {
552 if (!generic)
553 dev_err(dev, "tried to double register : %s\n",
554 t->dev_attr.attr.name);
555 ret = -EBUSY;
556 goto error_device_attr_deinit;
557 }
558 list_add(&iio_attr->l, attr_list);
559
560 return 0;
561
562 error_device_attr_deinit:
563 __iio_device_attr_deinit(&iio_attr->dev_attr);
564 error_iio_dev_attr_free:
565 kfree(iio_attr);
566 error_ret:
567 return ret;
568 }
569
570 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
571 struct iio_chan_spec const *chan)
572 {
573 int ret, i, attrcount = 0;
574
575 if (chan->channel < 0)
576 return 0;
577
578 ret = __iio_add_chan_devattr(iio_data_type_name[chan->processed_val],
579 chan,
580 &iio_read_channel_info,
581 (chan->output ?
582 &iio_write_channel_info : NULL),
583 0,
584 0,
585 &indio_dev->dev,
586 &indio_dev->channel_attr_list);
587 if (ret)
588 goto error_ret;
589 attrcount++;
590
591 for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
592 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
593 chan,
594 &iio_read_channel_info,
595 &iio_write_channel_info,
596 (1 << i),
597 !(i%2),
598 &indio_dev->dev,
599 &indio_dev->channel_attr_list);
600 if (ret == -EBUSY && (i%2 == 0)) {
601 ret = 0;
602 continue;
603 }
604 if (ret < 0)
605 goto error_ret;
606 attrcount++;
607 }
608 ret = attrcount;
609 error_ret:
610 return ret;
611 }
612
613 static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev,
614 struct iio_dev_attr *p)
615 {
616 kfree(p->dev_attr.attr.name);
617 kfree(p);
618 }
619
620 static ssize_t iio_show_dev_name(struct device *dev,
621 struct device_attribute *attr,
622 char *buf)
623 {
624 struct iio_dev *indio_dev = dev_get_drvdata(dev);
625 return sprintf(buf, "%s\n", indio_dev->name);
626 }
627
628 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
629
630 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
631 {
632 int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
633 struct iio_dev_attr *p, *n;
634 struct attribute **attr;
635
636 /* First count elements in any existing group */
637 if (indio_dev->info->attrs) {
638 attr = indio_dev->info->attrs->attrs;
639 while (*attr++ != NULL)
640 attrcount_orig++;
641 }
642 attrcount = attrcount_orig;
643 /*
644 * New channel registration method - relies on the fact a group does
645 * not need to be initialized if it is name is NULL.
646 */
647 INIT_LIST_HEAD(&indio_dev->channel_attr_list);
648 if (indio_dev->channels)
649 for (i = 0; i < indio_dev->num_channels; i++) {
650 ret = iio_device_add_channel_sysfs(indio_dev,
651 &indio_dev
652 ->channels[i]);
653 if (ret < 0)
654 goto error_clear_attrs;
655 attrcount += ret;
656 }
657
658 if (indio_dev->name)
659 attrcount++;
660
661 indio_dev->chan_attr_group.attrs
662 = kzalloc(sizeof(indio_dev->chan_attr_group.attrs[0])*
663 (attrcount + 1),
664 GFP_KERNEL);
665 if (indio_dev->chan_attr_group.attrs == NULL) {
666 ret = -ENOMEM;
667 goto error_clear_attrs;
668 }
669 /* Copy across original attributes */
670 if (indio_dev->info->attrs)
671 memcpy(indio_dev->chan_attr_group.attrs,
672 indio_dev->info->attrs->attrs,
673 sizeof(indio_dev->chan_attr_group.attrs[0])
674 *attrcount_orig);
675 attrn = attrcount_orig;
676 /* Add all elements from the list. */
677 list_for_each_entry(p, &indio_dev->channel_attr_list, l)
678 indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
679 if (indio_dev->name)
680 indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
681
682 indio_dev->groups[indio_dev->groupcounter++] =
683 &indio_dev->chan_attr_group;
684
685 return 0;
686
687 error_clear_attrs:
688 list_for_each_entry_safe(p, n,
689 &indio_dev->channel_attr_list, l) {
690 list_del(&p->l);
691 iio_device_remove_and_free_read_attr(indio_dev, p);
692 }
693
694 return ret;
695 }
696
697 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
698 {
699
700 struct iio_dev_attr *p, *n;
701
702 list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) {
703 list_del(&p->l);
704 iio_device_remove_and_free_read_attr(indio_dev, p);
705 }
706 kfree(indio_dev->chan_attr_group.attrs);
707 }
708
709 static const char * const iio_ev_type_text[] = {
710 [IIO_EV_TYPE_THRESH] = "thresh",
711 [IIO_EV_TYPE_MAG] = "mag",
712 [IIO_EV_TYPE_ROC] = "roc",
713 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
714 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
715 };
716
717 static const char * const iio_ev_dir_text[] = {
718 [IIO_EV_DIR_EITHER] = "either",
719 [IIO_EV_DIR_RISING] = "rising",
720 [IIO_EV_DIR_FALLING] = "falling"
721 };
722
723 static ssize_t iio_ev_state_store(struct device *dev,
724 struct device_attribute *attr,
725 const char *buf,
726 size_t len)
727 {
728 struct iio_dev *indio_dev = dev_get_drvdata(dev);
729 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
730 int ret;
731 bool val;
732
733 ret = strtobool(buf, &val);
734 if (ret < 0)
735 return ret;
736
737 ret = indio_dev->info->write_event_config(indio_dev,
738 this_attr->address,
739 val);
740 return (ret < 0) ? ret : len;
741 }
742
743 static ssize_t iio_ev_state_show(struct device *dev,
744 struct device_attribute *attr,
745 char *buf)
746 {
747 struct iio_dev *indio_dev = dev_get_drvdata(dev);
748 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
749 int val = indio_dev->info->read_event_config(indio_dev,
750 this_attr->address);
751
752 if (val < 0)
753 return val;
754 else
755 return sprintf(buf, "%d\n", val);
756 }
757
758 static ssize_t iio_ev_value_show(struct device *dev,
759 struct device_attribute *attr,
760 char *buf)
761 {
762 struct iio_dev *indio_dev = dev_get_drvdata(dev);
763 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
764 int val, ret;
765
766 ret = indio_dev->info->read_event_value(indio_dev,
767 this_attr->address, &val);
768 if (ret < 0)
769 return ret;
770
771 return sprintf(buf, "%d\n", val);
772 }
773
774 static ssize_t iio_ev_value_store(struct device *dev,
775 struct device_attribute *attr,
776 const char *buf,
777 size_t len)
778 {
779 struct iio_dev *indio_dev = dev_get_drvdata(dev);
780 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
781 unsigned long val;
782 int ret;
783
784 ret = strict_strtoul(buf, 10, &val);
785 if (ret)
786 return ret;
787
788 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
789 val);
790 if (ret < 0)
791 return ret;
792
793 return len;
794 }
795
796 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
797 struct iio_chan_spec const *chan)
798 {
799 int ret = 0, i, attrcount = 0;
800 u64 mask = 0;
801 char *postfix;
802 if (!chan->event_mask)
803 return 0;
804
805 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
806 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
807 iio_ev_type_text[i/IIO_EV_DIR_MAX],
808 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
809 if (postfix == NULL) {
810 ret = -ENOMEM;
811 goto error_ret;
812 }
813 if (chan->modified)
814 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
815 i/IIO_EV_DIR_MAX,
816 i%IIO_EV_DIR_MAX);
817 else if (chan->differential)
818 mask = IIO_EVENT_CODE(chan->type,
819 0, 0,
820 i%IIO_EV_DIR_MAX,
821 i/IIO_EV_DIR_MAX,
822 0,
823 chan->channel,
824 chan->channel2);
825 else
826 mask = IIO_UNMOD_EVENT_CODE(chan->type,
827 chan->channel,
828 i/IIO_EV_DIR_MAX,
829 i%IIO_EV_DIR_MAX);
830
831 ret = __iio_add_chan_devattr(postfix,
832 chan,
833 &iio_ev_state_show,
834 iio_ev_state_store,
835 mask,
836 0,
837 &indio_dev->dev,
838 &indio_dev->event_interface->
839 dev_attr_list);
840 kfree(postfix);
841 if (ret)
842 goto error_ret;
843 attrcount++;
844 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
845 iio_ev_type_text[i/IIO_EV_DIR_MAX],
846 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
847 if (postfix == NULL) {
848 ret = -ENOMEM;
849 goto error_ret;
850 }
851 ret = __iio_add_chan_devattr(postfix, chan,
852 iio_ev_value_show,
853 iio_ev_value_store,
854 mask,
855 0,
856 &indio_dev->dev,
857 &indio_dev->event_interface->
858 dev_attr_list);
859 kfree(postfix);
860 if (ret)
861 goto error_ret;
862 attrcount++;
863 }
864 ret = attrcount;
865 error_ret:
866 return ret;
867 }
868
869 static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
870 {
871 struct iio_dev_attr *p, *n;
872 list_for_each_entry_safe(p, n,
873 &indio_dev->event_interface->
874 dev_attr_list, l) {
875 kfree(p->dev_attr.attr.name);
876 kfree(p);
877 }
878 }
879
880 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
881 {
882 int j, ret, attrcount = 0;
883
884 INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
885 /* Dynically created from the channels array */
886 for (j = 0; j < indio_dev->num_channels; j++) {
887 ret = iio_device_add_event_sysfs(indio_dev,
888 &indio_dev->channels[j]);
889 if (ret < 0)
890 goto error_clear_attrs;
891 attrcount += ret;
892 }
893 return attrcount;
894
895 error_clear_attrs:
896 __iio_remove_event_config_attrs(indio_dev);
897
898 return ret;
899 }
900
901 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
902 {
903 int j;
904
905 for (j = 0; j < indio_dev->num_channels; j++)
906 if (indio_dev->channels[j].event_mask != 0)
907 return true;
908 return false;
909 }
910
911 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
912 {
913 mutex_init(&ev_int->event_list_lock);
914 /* discussion point - make this variable? */
915 ev_int->max_events = 10;
916 ev_int->current_events = 0;
917 INIT_LIST_HEAD(&ev_int->det_events);
918 init_waitqueue_head(&ev_int->wait);
919 }
920
921 static const char *iio_event_group_name = "events";
922 static int iio_device_register_eventset(struct iio_dev *indio_dev)
923 {
924 struct iio_dev_attr *p;
925 int ret = 0, attrcount_orig = 0, attrcount, attrn;
926 struct attribute **attr;
927
928 if (!(indio_dev->info->event_attrs ||
929 iio_check_for_dynamic_events(indio_dev)))
930 return 0;
931
932 indio_dev->event_interface =
933 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
934 if (indio_dev->event_interface == NULL) {
935 ret = -ENOMEM;
936 goto error_ret;
937 }
938
939 iio_setup_ev_int(indio_dev->event_interface);
940 if (indio_dev->info->event_attrs != NULL) {
941 attr = indio_dev->info->event_attrs->attrs;
942 while (*attr++ != NULL)
943 attrcount_orig++;
944 }
945 attrcount = attrcount_orig;
946 if (indio_dev->channels) {
947 ret = __iio_add_event_config_attrs(indio_dev);
948 if (ret < 0)
949 goto error_free_setup_event_lines;
950 attrcount += ret;
951 }
952
953 indio_dev->event_interface->group.name = iio_event_group_name;
954 indio_dev->event_interface->group.attrs =
955 kzalloc(sizeof(indio_dev->event_interface->group.attrs[0])
956 *(attrcount + 1),
957 GFP_KERNEL);
958 if (indio_dev->event_interface->group.attrs == NULL) {
959 ret = -ENOMEM;
960 goto error_free_setup_event_lines;
961 }
962 if (indio_dev->info->event_attrs)
963 memcpy(indio_dev->event_interface->group.attrs,
964 indio_dev->info->event_attrs->attrs,
965 sizeof(indio_dev->event_interface->group.attrs[0])
966 *attrcount_orig);
967 attrn = attrcount_orig;
968 /* Add all elements from the list. */
969 list_for_each_entry(p,
970 &indio_dev->event_interface->dev_attr_list,
971 l)
972 indio_dev->event_interface->group.attrs[attrn++] =
973 &p->dev_attr.attr;
974 indio_dev->groups[indio_dev->groupcounter++] =
975 &indio_dev->event_interface->group;
976
977 return 0;
978
979 error_free_setup_event_lines:
980 __iio_remove_event_config_attrs(indio_dev);
981 kfree(indio_dev->event_interface);
982 error_ret:
983
984 return ret;
985 }
986
987 static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
988 {
989 if (indio_dev->event_interface == NULL)
990 return;
991 __iio_remove_event_config_attrs(indio_dev);
992 kfree(indio_dev->event_interface->group.attrs);
993 kfree(indio_dev->event_interface);
994 }
995
996 static void iio_dev_release(struct device *device)
997 {
998 struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
999 cdev_del(&indio_dev->chrdev);
1000 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1001 iio_device_unregister_trigger_consumer(indio_dev);
1002 iio_device_unregister_eventset(indio_dev);
1003 iio_device_unregister_sysfs(indio_dev);
1004 }
1005
1006 static struct device_type iio_dev_type = {
1007 .name = "iio_device",
1008 .release = iio_dev_release,
1009 };
1010
1011 struct iio_dev *iio_allocate_device(int sizeof_priv)
1012 {
1013 struct iio_dev *dev;
1014 size_t alloc_size;
1015
1016 alloc_size = sizeof(struct iio_dev);
1017 if (sizeof_priv) {
1018 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1019 alloc_size += sizeof_priv;
1020 }
1021 /* ensure 32-byte alignment of whole construct ? */
1022 alloc_size += IIO_ALIGN - 1;
1023
1024 dev = kzalloc(alloc_size, GFP_KERNEL);
1025
1026 if (dev) {
1027 dev->dev.groups = dev->groups;
1028 dev->dev.type = &iio_dev_type;
1029 dev->dev.bus = &iio_bus_type;
1030 device_initialize(&dev->dev);
1031 dev_set_drvdata(&dev->dev, (void *)dev);
1032 mutex_init(&dev->mlock);
1033
1034 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
1035 if (dev->id < 0) {
1036 /* cannot use a dev_err as the name isn't available */
1037 printk(KERN_ERR "Failed to get id\n");
1038 kfree(dev);
1039 return NULL;
1040 }
1041 dev_set_name(&dev->dev, "iio:device%d", dev->id);
1042 }
1043
1044 return dev;
1045 }
1046 EXPORT_SYMBOL(iio_allocate_device);
1047
1048 void iio_free_device(struct iio_dev *dev)
1049 {
1050 if (dev) {
1051 ida_simple_remove(&iio_ida, dev->id);
1052 kfree(dev);
1053 }
1054 }
1055 EXPORT_SYMBOL(iio_free_device);
1056
1057 /**
1058 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1059 **/
1060 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1061 {
1062 struct iio_dev *indio_dev = container_of(inode->i_cdev,
1063 struct iio_dev, chrdev);
1064 filp->private_data = indio_dev;
1065
1066 return iio_chrdev_buffer_open(indio_dev);
1067 }
1068
1069 /**
1070 * iio_chrdev_release() - chrdev file close buffer access and ioctls
1071 **/
1072 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1073 {
1074 iio_chrdev_buffer_release(container_of(inode->i_cdev,
1075 struct iio_dev, chrdev));
1076 return 0;
1077 }
1078
1079 /* Somewhat of a cross file organization violation - ioctls here are actually
1080 * event related */
1081 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1082 {
1083 struct iio_dev *indio_dev = filp->private_data;
1084 int __user *ip = (int __user *)arg;
1085 int fd;
1086
1087 if (cmd == IIO_GET_EVENT_FD_IOCTL) {
1088 fd = iio_event_getfd(indio_dev);
1089 if (copy_to_user(ip, &fd, sizeof(fd)))
1090 return -EFAULT;
1091 return 0;
1092 }
1093 return -EINVAL;
1094 }
1095
1096 static const struct file_operations iio_buffer_fileops = {
1097 .read = iio_buffer_read_first_n_outer_addr,
1098 .release = iio_chrdev_release,
1099 .open = iio_chrdev_open,
1100 .poll = iio_buffer_poll_addr,
1101 .owner = THIS_MODULE,
1102 .llseek = noop_llseek,
1103 .unlocked_ioctl = iio_ioctl,
1104 .compat_ioctl = iio_ioctl,
1105 };
1106
1107 int iio_device_register(struct iio_dev *indio_dev)
1108 {
1109 int ret;
1110
1111 /* configure elements for the chrdev */
1112 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
1113
1114 ret = iio_device_register_sysfs(indio_dev);
1115 if (ret) {
1116 dev_err(indio_dev->dev.parent,
1117 "Failed to register sysfs interfaces\n");
1118 goto error_ret;
1119 }
1120 ret = iio_device_register_eventset(indio_dev);
1121 if (ret) {
1122 dev_err(indio_dev->dev.parent,
1123 "Failed to register event set\n");
1124 goto error_free_sysfs;
1125 }
1126 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1127 iio_device_register_trigger_consumer(indio_dev);
1128
1129 ret = device_add(&indio_dev->dev);
1130 if (ret < 0)
1131 goto error_unreg_eventset;
1132 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
1133 indio_dev->chrdev.owner = indio_dev->info->driver_module;
1134 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
1135 if (ret < 0)
1136 goto error_del_device;
1137 return 0;
1138
1139 error_del_device:
1140 device_del(&indio_dev->dev);
1141 error_unreg_eventset:
1142 iio_device_unregister_eventset(indio_dev);
1143 error_free_sysfs:
1144 iio_device_unregister_sysfs(indio_dev);
1145 error_ret:
1146 return ret;
1147 }
1148 EXPORT_SYMBOL(iio_device_register);
1149
1150 void iio_device_unregister(struct iio_dev *indio_dev)
1151 {
1152 device_unregister(&indio_dev->dev);
1153 }
1154 EXPORT_SYMBOL(iio_device_unregister);
1155 subsys_initcall(iio_init);
1156 module_exit(iio_exit);
1157
1158 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1159 MODULE_DESCRIPTION("Industrial I/O core");
1160 MODULE_LICENSE("GPL");