]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/iio/inkern.c
KVM: PPC: Use preregistered memory API to access TCE list
[mirror_ubuntu-zesty-kernel.git] / drivers / iio / inkern.c
1 /* The industrial I/O core in kernel channel mapping
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9 #include <linux/err.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/of.h>
14
15 #include <linux/iio/iio.h>
16 #include "iio_core.h"
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iio/consumer.h>
20
21 struct iio_map_internal {
22 struct iio_dev *indio_dev;
23 struct iio_map *map;
24 struct list_head l;
25 };
26
27 static LIST_HEAD(iio_map_list);
28 static DEFINE_MUTEX(iio_map_list_lock);
29
30 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
31 {
32 int i = 0, ret = 0;
33 struct iio_map_internal *mapi;
34
35 if (maps == NULL)
36 return 0;
37
38 mutex_lock(&iio_map_list_lock);
39 while (maps[i].consumer_dev_name != NULL) {
40 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
41 if (mapi == NULL) {
42 ret = -ENOMEM;
43 goto error_ret;
44 }
45 mapi->map = &maps[i];
46 mapi->indio_dev = indio_dev;
47 list_add(&mapi->l, &iio_map_list);
48 i++;
49 }
50 error_ret:
51 mutex_unlock(&iio_map_list_lock);
52
53 return ret;
54 }
55 EXPORT_SYMBOL_GPL(iio_map_array_register);
56
57
58 /*
59 * Remove all map entries associated with the given iio device
60 */
61 int iio_map_array_unregister(struct iio_dev *indio_dev)
62 {
63 int ret = -ENODEV;
64 struct iio_map_internal *mapi, *next;
65
66 mutex_lock(&iio_map_list_lock);
67 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
68 if (indio_dev == mapi->indio_dev) {
69 list_del(&mapi->l);
70 kfree(mapi);
71 ret = 0;
72 }
73 }
74 mutex_unlock(&iio_map_list_lock);
75 return ret;
76 }
77 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
78
79 static const struct iio_chan_spec
80 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
81 {
82 int i;
83 const struct iio_chan_spec *chan = NULL;
84
85 for (i = 0; i < indio_dev->num_channels; i++)
86 if (indio_dev->channels[i].datasheet_name &&
87 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
88 chan = &indio_dev->channels[i];
89 break;
90 }
91 return chan;
92 }
93
94 #ifdef CONFIG_OF
95
96 static int iio_dev_node_match(struct device *dev, void *data)
97 {
98 return dev->of_node == data && dev->type == &iio_device_type;
99 }
100
101 /**
102 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
103 * @indio_dev: pointer to the iio_dev structure
104 * @iiospec: IIO specifier as found in the device tree
105 *
106 * This is simple translation function, suitable for the most 1:1 mapped
107 * channels in IIO chips. This function performs only one sanity check:
108 * whether IIO index is less than num_channels (that is specified in the
109 * iio_dev).
110 */
111 static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
112 const struct of_phandle_args *iiospec)
113 {
114 if (!iiospec->args_count)
115 return 0;
116
117 if (iiospec->args[0] >= indio_dev->num_channels) {
118 dev_err(&indio_dev->dev, "invalid channel index %u\n",
119 iiospec->args[0]);
120 return -EINVAL;
121 }
122
123 return iiospec->args[0];
124 }
125
126 static int __of_iio_channel_get(struct iio_channel *channel,
127 struct device_node *np, int index)
128 {
129 struct device *idev;
130 struct iio_dev *indio_dev;
131 int err;
132 struct of_phandle_args iiospec;
133
134 err = of_parse_phandle_with_args(np, "io-channels",
135 "#io-channel-cells",
136 index, &iiospec);
137 if (err)
138 return err;
139
140 idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
141 iio_dev_node_match);
142 of_node_put(iiospec.np);
143 if (idev == NULL)
144 return -EPROBE_DEFER;
145
146 indio_dev = dev_to_iio_dev(idev);
147 channel->indio_dev = indio_dev;
148 if (indio_dev->info->of_xlate)
149 index = indio_dev->info->of_xlate(indio_dev, &iiospec);
150 else
151 index = __of_iio_simple_xlate(indio_dev, &iiospec);
152 if (index < 0)
153 goto err_put;
154 channel->channel = &indio_dev->channels[index];
155
156 return 0;
157
158 err_put:
159 iio_device_put(indio_dev);
160 return index;
161 }
162
163 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
164 {
165 struct iio_channel *channel;
166 int err;
167
168 if (index < 0)
169 return ERR_PTR(-EINVAL);
170
171 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
172 if (channel == NULL)
173 return ERR_PTR(-ENOMEM);
174
175 err = __of_iio_channel_get(channel, np, index);
176 if (err)
177 goto err_free_channel;
178
179 return channel;
180
181 err_free_channel:
182 kfree(channel);
183 return ERR_PTR(err);
184 }
185
186 static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
187 const char *name)
188 {
189 struct iio_channel *chan = NULL;
190
191 /* Walk up the tree of devices looking for a matching iio channel */
192 while (np) {
193 int index = 0;
194
195 /*
196 * For named iio channels, first look up the name in the
197 * "io-channel-names" property. If it cannot be found, the
198 * index will be an error code, and of_iio_channel_get()
199 * will fail.
200 */
201 if (name)
202 index = of_property_match_string(np, "io-channel-names",
203 name);
204 chan = of_iio_channel_get(np, index);
205 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
206 break;
207 else if (name && index >= 0) {
208 pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
209 np->full_name, name ? name : "", index);
210 return NULL;
211 }
212
213 /*
214 * No matching IIO channel found on this node.
215 * If the parent node has a "io-channel-ranges" property,
216 * then we can try one of its channels.
217 */
218 np = np->parent;
219 if (np && !of_get_property(np, "io-channel-ranges", NULL))
220 return NULL;
221 }
222
223 return chan;
224 }
225
226 static struct iio_channel *of_iio_channel_get_all(struct device *dev)
227 {
228 struct iio_channel *chans;
229 int i, mapind, nummaps = 0;
230 int ret;
231
232 do {
233 ret = of_parse_phandle_with_args(dev->of_node,
234 "io-channels",
235 "#io-channel-cells",
236 nummaps, NULL);
237 if (ret < 0)
238 break;
239 } while (++nummaps);
240
241 if (nummaps == 0) /* no error, return NULL to search map table */
242 return NULL;
243
244 /* NULL terminated array to save passing size */
245 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
246 if (chans == NULL)
247 return ERR_PTR(-ENOMEM);
248
249 /* Search for OF matches */
250 for (mapind = 0; mapind < nummaps; mapind++) {
251 ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
252 mapind);
253 if (ret)
254 goto error_free_chans;
255 }
256 return chans;
257
258 error_free_chans:
259 for (i = 0; i < mapind; i++)
260 iio_device_put(chans[i].indio_dev);
261 kfree(chans);
262 return ERR_PTR(ret);
263 }
264
265 #else /* CONFIG_OF */
266
267 static inline struct iio_channel *
268 of_iio_channel_get_by_name(struct device_node *np, const char *name)
269 {
270 return NULL;
271 }
272
273 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
274 {
275 return NULL;
276 }
277
278 #endif /* CONFIG_OF */
279
280 static struct iio_channel *iio_channel_get_sys(const char *name,
281 const char *channel_name)
282 {
283 struct iio_map_internal *c_i = NULL, *c = NULL;
284 struct iio_channel *channel;
285 int err;
286
287 if (name == NULL && channel_name == NULL)
288 return ERR_PTR(-ENODEV);
289
290 /* first find matching entry the channel map */
291 mutex_lock(&iio_map_list_lock);
292 list_for_each_entry(c_i, &iio_map_list, l) {
293 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
294 (channel_name &&
295 strcmp(channel_name, c_i->map->consumer_channel) != 0))
296 continue;
297 c = c_i;
298 iio_device_get(c->indio_dev);
299 break;
300 }
301 mutex_unlock(&iio_map_list_lock);
302 if (c == NULL)
303 return ERR_PTR(-ENODEV);
304
305 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
306 if (channel == NULL) {
307 err = -ENOMEM;
308 goto error_no_mem;
309 }
310
311 channel->indio_dev = c->indio_dev;
312
313 if (c->map->adc_channel_label) {
314 channel->channel =
315 iio_chan_spec_from_name(channel->indio_dev,
316 c->map->adc_channel_label);
317
318 if (channel->channel == NULL) {
319 err = -EINVAL;
320 goto error_no_chan;
321 }
322 }
323
324 return channel;
325
326 error_no_chan:
327 kfree(channel);
328 error_no_mem:
329 iio_device_put(c->indio_dev);
330 return ERR_PTR(err);
331 }
332
333 struct iio_channel *iio_channel_get(struct device *dev,
334 const char *channel_name)
335 {
336 const char *name = dev ? dev_name(dev) : NULL;
337 struct iio_channel *channel;
338
339 if (dev) {
340 channel = of_iio_channel_get_by_name(dev->of_node,
341 channel_name);
342 if (channel != NULL)
343 return channel;
344 }
345
346 return iio_channel_get_sys(name, channel_name);
347 }
348 EXPORT_SYMBOL_GPL(iio_channel_get);
349
350 void iio_channel_release(struct iio_channel *channel)
351 {
352 if (!channel)
353 return;
354 iio_device_put(channel->indio_dev);
355 kfree(channel);
356 }
357 EXPORT_SYMBOL_GPL(iio_channel_release);
358
359 static void devm_iio_channel_free(struct device *dev, void *res)
360 {
361 struct iio_channel *channel = *(struct iio_channel **)res;
362
363 iio_channel_release(channel);
364 }
365
366 static int devm_iio_channel_match(struct device *dev, void *res, void *data)
367 {
368 struct iio_channel **r = res;
369
370 if (!r || !*r) {
371 WARN_ON(!r || !*r);
372 return 0;
373 }
374
375 return *r == data;
376 }
377
378 struct iio_channel *devm_iio_channel_get(struct device *dev,
379 const char *channel_name)
380 {
381 struct iio_channel **ptr, *channel;
382
383 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
384 if (!ptr)
385 return ERR_PTR(-ENOMEM);
386
387 channel = iio_channel_get(dev, channel_name);
388 if (IS_ERR(channel)) {
389 devres_free(ptr);
390 return channel;
391 }
392
393 *ptr = channel;
394 devres_add(dev, ptr);
395
396 return channel;
397 }
398 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
399
400 void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
401 {
402 WARN_ON(devres_release(dev, devm_iio_channel_free,
403 devm_iio_channel_match, channel));
404 }
405 EXPORT_SYMBOL_GPL(devm_iio_channel_release);
406
407 struct iio_channel *iio_channel_get_all(struct device *dev)
408 {
409 const char *name;
410 struct iio_channel *chans;
411 struct iio_map_internal *c = NULL;
412 int nummaps = 0;
413 int mapind = 0;
414 int i, ret;
415
416 if (dev == NULL)
417 return ERR_PTR(-EINVAL);
418
419 chans = of_iio_channel_get_all(dev);
420 if (chans)
421 return chans;
422
423 name = dev_name(dev);
424
425 mutex_lock(&iio_map_list_lock);
426 /* first count the matching maps */
427 list_for_each_entry(c, &iio_map_list, l)
428 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
429 continue;
430 else
431 nummaps++;
432
433 if (nummaps == 0) {
434 ret = -ENODEV;
435 goto error_ret;
436 }
437
438 /* NULL terminated array to save passing size */
439 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
440 if (chans == NULL) {
441 ret = -ENOMEM;
442 goto error_ret;
443 }
444
445 /* for each map fill in the chans element */
446 list_for_each_entry(c, &iio_map_list, l) {
447 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
448 continue;
449 chans[mapind].indio_dev = c->indio_dev;
450 chans[mapind].data = c->map->consumer_data;
451 chans[mapind].channel =
452 iio_chan_spec_from_name(chans[mapind].indio_dev,
453 c->map->adc_channel_label);
454 if (chans[mapind].channel == NULL) {
455 ret = -EINVAL;
456 goto error_free_chans;
457 }
458 iio_device_get(chans[mapind].indio_dev);
459 mapind++;
460 }
461 if (mapind == 0) {
462 ret = -ENODEV;
463 goto error_free_chans;
464 }
465 mutex_unlock(&iio_map_list_lock);
466
467 return chans;
468
469 error_free_chans:
470 for (i = 0; i < nummaps; i++)
471 iio_device_put(chans[i].indio_dev);
472 kfree(chans);
473 error_ret:
474 mutex_unlock(&iio_map_list_lock);
475
476 return ERR_PTR(ret);
477 }
478 EXPORT_SYMBOL_GPL(iio_channel_get_all);
479
480 void iio_channel_release_all(struct iio_channel *channels)
481 {
482 struct iio_channel *chan = &channels[0];
483
484 while (chan->indio_dev) {
485 iio_device_put(chan->indio_dev);
486 chan++;
487 }
488 kfree(channels);
489 }
490 EXPORT_SYMBOL_GPL(iio_channel_release_all);
491
492 static void devm_iio_channel_free_all(struct device *dev, void *res)
493 {
494 struct iio_channel *channels = *(struct iio_channel **)res;
495
496 iio_channel_release_all(channels);
497 }
498
499 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
500 {
501 struct iio_channel **ptr, *channels;
502
503 ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
504 if (!ptr)
505 return ERR_PTR(-ENOMEM);
506
507 channels = iio_channel_get_all(dev);
508 if (IS_ERR(channels)) {
509 devres_free(ptr);
510 return channels;
511 }
512
513 *ptr = channels;
514 devres_add(dev, ptr);
515
516 return channels;
517 }
518 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
519
520 void devm_iio_channel_release_all(struct device *dev,
521 struct iio_channel *channels)
522 {
523 WARN_ON(devres_release(dev, devm_iio_channel_free_all,
524 devm_iio_channel_match, channels));
525 }
526 EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
527
528 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
529 enum iio_chan_info_enum info)
530 {
531 int unused;
532 int vals[INDIO_MAX_RAW_ELEMENTS];
533 int ret;
534 int val_len = 2;
535
536 if (val2 == NULL)
537 val2 = &unused;
538
539 if (!iio_channel_has_info(chan->channel, info))
540 return -EINVAL;
541
542 if (chan->indio_dev->info->read_raw_multi) {
543 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
544 chan->channel, INDIO_MAX_RAW_ELEMENTS,
545 vals, &val_len, info);
546 *val = vals[0];
547 *val2 = vals[1];
548 } else
549 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
550 chan->channel, val, val2, info);
551
552 return ret;
553 }
554
555 int iio_read_channel_raw(struct iio_channel *chan, int *val)
556 {
557 int ret;
558
559 mutex_lock(&chan->indio_dev->info_exist_lock);
560 if (chan->indio_dev->info == NULL) {
561 ret = -ENODEV;
562 goto err_unlock;
563 }
564
565 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
566 err_unlock:
567 mutex_unlock(&chan->indio_dev->info_exist_lock);
568
569 return ret;
570 }
571 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
572
573 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
574 {
575 int ret;
576
577 mutex_lock(&chan->indio_dev->info_exist_lock);
578 if (chan->indio_dev->info == NULL) {
579 ret = -ENODEV;
580 goto err_unlock;
581 }
582
583 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
584 err_unlock:
585 mutex_unlock(&chan->indio_dev->info_exist_lock);
586
587 return ret;
588 }
589 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
590
591 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
592 int raw, int *processed, unsigned int scale)
593 {
594 int scale_type, scale_val, scale_val2, offset;
595 s64 raw64 = raw;
596 int ret;
597
598 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
599 if (ret >= 0)
600 raw64 += offset;
601
602 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
603 IIO_CHAN_INFO_SCALE);
604 if (scale_type < 0)
605 return scale_type;
606
607 switch (scale_type) {
608 case IIO_VAL_INT:
609 *processed = raw64 * scale_val;
610 break;
611 case IIO_VAL_INT_PLUS_MICRO:
612 if (scale_val2 < 0)
613 *processed = -raw64 * scale_val;
614 else
615 *processed = raw64 * scale_val;
616 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
617 1000000LL);
618 break;
619 case IIO_VAL_INT_PLUS_NANO:
620 if (scale_val2 < 0)
621 *processed = -raw64 * scale_val;
622 else
623 *processed = raw64 * scale_val;
624 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
625 1000000000LL);
626 break;
627 case IIO_VAL_FRACTIONAL:
628 *processed = div_s64(raw64 * (s64)scale_val * scale,
629 scale_val2);
630 break;
631 case IIO_VAL_FRACTIONAL_LOG2:
632 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
633 break;
634 default:
635 return -EINVAL;
636 }
637
638 return 0;
639 }
640
641 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
642 int *processed, unsigned int scale)
643 {
644 int ret;
645
646 mutex_lock(&chan->indio_dev->info_exist_lock);
647 if (chan->indio_dev->info == NULL) {
648 ret = -ENODEV;
649 goto err_unlock;
650 }
651
652 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
653 scale);
654 err_unlock:
655 mutex_unlock(&chan->indio_dev->info_exist_lock);
656
657 return ret;
658 }
659 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
660
661 static int iio_read_channel_attribute(struct iio_channel *chan,
662 int *val, int *val2,
663 enum iio_chan_info_enum attribute)
664 {
665 int ret;
666
667 mutex_lock(&chan->indio_dev->info_exist_lock);
668 if (chan->indio_dev->info == NULL) {
669 ret = -ENODEV;
670 goto err_unlock;
671 }
672
673 ret = iio_channel_read(chan, val, val2, attribute);
674 err_unlock:
675 mutex_unlock(&chan->indio_dev->info_exist_lock);
676
677 return ret;
678 }
679
680 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
681 {
682 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
683 }
684 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
685
686 int iio_read_channel_processed(struct iio_channel *chan, int *val)
687 {
688 int ret;
689
690 mutex_lock(&chan->indio_dev->info_exist_lock);
691 if (chan->indio_dev->info == NULL) {
692 ret = -ENODEV;
693 goto err_unlock;
694 }
695
696 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
697 ret = iio_channel_read(chan, val, NULL,
698 IIO_CHAN_INFO_PROCESSED);
699 } else {
700 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
701 if (ret < 0)
702 goto err_unlock;
703 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
704 }
705
706 err_unlock:
707 mutex_unlock(&chan->indio_dev->info_exist_lock);
708
709 return ret;
710 }
711 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
712
713 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
714 {
715 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
716 }
717 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
718
719 static int iio_channel_read_avail(struct iio_channel *chan,
720 const int **vals, int *type, int *length,
721 enum iio_chan_info_enum info)
722 {
723 if (!iio_channel_has_available(chan->channel, info))
724 return -EINVAL;
725
726 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
727 vals, type, length, info);
728 }
729
730 int iio_read_avail_channel_raw(struct iio_channel *chan,
731 const int **vals, int *length)
732 {
733 int ret;
734 int type;
735
736 mutex_lock(&chan->indio_dev->info_exist_lock);
737 if (!chan->indio_dev->info) {
738 ret = -ENODEV;
739 goto err_unlock;
740 }
741
742 ret = iio_channel_read_avail(chan,
743 vals, &type, length, IIO_CHAN_INFO_RAW);
744 err_unlock:
745 mutex_unlock(&chan->indio_dev->info_exist_lock);
746
747 if (ret >= 0 && type != IIO_VAL_INT) {
748 /* raw values are assumed to be IIO_VAL_INT */
749 ret = -EINVAL;
750 goto err_unlock;
751 }
752
753 return ret;
754 }
755 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
756
757 static int iio_channel_read_max(struct iio_channel *chan,
758 int *val, int *val2, int *type,
759 enum iio_chan_info_enum info)
760 {
761 int unused;
762 const int *vals;
763 int length;
764 int ret;
765
766 if (!val2)
767 val2 = &unused;
768
769 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
770 switch (ret) {
771 case IIO_AVAIL_RANGE:
772 switch (*type) {
773 case IIO_VAL_INT:
774 *val = vals[2];
775 break;
776 default:
777 *val = vals[4];
778 *val2 = vals[5];
779 }
780 return 0;
781
782 case IIO_AVAIL_LIST:
783 if (length <= 0)
784 return -EINVAL;
785 switch (*type) {
786 case IIO_VAL_INT:
787 *val = vals[--length];
788 while (length) {
789 if (vals[--length] > *val)
790 *val = vals[length];
791 }
792 break;
793 default:
794 /* FIXME: learn about max for other iio values */
795 return -EINVAL;
796 }
797 return 0;
798
799 default:
800 return ret;
801 }
802 }
803
804 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
805 {
806 int ret;
807 int type;
808
809 mutex_lock(&chan->indio_dev->info_exist_lock);
810 if (!chan->indio_dev->info) {
811 ret = -ENODEV;
812 goto err_unlock;
813 }
814
815 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
816 err_unlock:
817 mutex_unlock(&chan->indio_dev->info_exist_lock);
818
819 return ret;
820 }
821 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
822
823 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
824 {
825 int ret = 0;
826 /* Need to verify underlying driver has not gone away */
827
828 mutex_lock(&chan->indio_dev->info_exist_lock);
829 if (chan->indio_dev->info == NULL) {
830 ret = -ENODEV;
831 goto err_unlock;
832 }
833
834 *type = chan->channel->type;
835 err_unlock:
836 mutex_unlock(&chan->indio_dev->info_exist_lock);
837
838 return ret;
839 }
840 EXPORT_SYMBOL_GPL(iio_get_channel_type);
841
842 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
843 enum iio_chan_info_enum info)
844 {
845 return chan->indio_dev->info->write_raw(chan->indio_dev,
846 chan->channel, val, val2, info);
847 }
848
849 int iio_write_channel_raw(struct iio_channel *chan, int val)
850 {
851 int ret;
852
853 mutex_lock(&chan->indio_dev->info_exist_lock);
854 if (chan->indio_dev->info == NULL) {
855 ret = -ENODEV;
856 goto err_unlock;
857 }
858
859 ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
860 err_unlock:
861 mutex_unlock(&chan->indio_dev->info_exist_lock);
862
863 return ret;
864 }
865 EXPORT_SYMBOL_GPL(iio_write_channel_raw);