]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/iio/inkern.c
Merge tag 'for-5.11-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-hirsute-kernel.git] / drivers / iio / inkern.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 */
6 #include <linux/err.h>
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/mutex.h>
10 #include <linux/of.h>
11
12 #include <linux/iio/iio.h>
13 #include "iio_core.h"
14 #include <linux/iio/machine.h>
15 #include <linux/iio/driver.h>
16 #include <linux/iio/consumer.h>
17
18 struct iio_map_internal {
19 struct iio_dev *indio_dev;
20 struct iio_map *map;
21 struct list_head l;
22 };
23
24 static LIST_HEAD(iio_map_list);
25 static DEFINE_MUTEX(iio_map_list_lock);
26
27 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
28 {
29 int ret = -ENODEV;
30 struct iio_map_internal *mapi, *next;
31
32 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
33 if (indio_dev == mapi->indio_dev) {
34 list_del(&mapi->l);
35 kfree(mapi);
36 ret = 0;
37 }
38 }
39 return ret;
40 }
41
42 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
43 {
44 int i = 0, ret = 0;
45 struct iio_map_internal *mapi;
46
47 if (maps == NULL)
48 return 0;
49
50 mutex_lock(&iio_map_list_lock);
51 while (maps[i].consumer_dev_name != NULL) {
52 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
53 if (mapi == NULL) {
54 ret = -ENOMEM;
55 goto error_ret;
56 }
57 mapi->map = &maps[i];
58 mapi->indio_dev = indio_dev;
59 list_add_tail(&mapi->l, &iio_map_list);
60 i++;
61 }
62 error_ret:
63 if (ret)
64 iio_map_array_unregister_locked(indio_dev);
65 mutex_unlock(&iio_map_list_lock);
66
67 return ret;
68 }
69 EXPORT_SYMBOL_GPL(iio_map_array_register);
70
71
72 /*
73 * Remove all map entries associated with the given iio device
74 */
75 int iio_map_array_unregister(struct iio_dev *indio_dev)
76 {
77 int ret;
78
79 mutex_lock(&iio_map_list_lock);
80 ret = iio_map_array_unregister_locked(indio_dev);
81 mutex_unlock(&iio_map_list_lock);
82
83 return ret;
84 }
85 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
86
87 static const struct iio_chan_spec
88 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
89 {
90 int i;
91 const struct iio_chan_spec *chan = NULL;
92
93 for (i = 0; i < indio_dev->num_channels; i++)
94 if (indio_dev->channels[i].datasheet_name &&
95 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
96 chan = &indio_dev->channels[i];
97 break;
98 }
99 return chan;
100 }
101
102 #ifdef CONFIG_OF
103
104 static int iio_dev_node_match(struct device *dev, const void *data)
105 {
106 return dev->of_node == data && dev->type == &iio_device_type;
107 }
108
109 /**
110 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
111 * @indio_dev: pointer to the iio_dev structure
112 * @iiospec: IIO specifier as found in the device tree
113 *
114 * This is simple translation function, suitable for the most 1:1 mapped
115 * channels in IIO chips. This function performs only one sanity check:
116 * whether IIO index is less than num_channels (that is specified in the
117 * iio_dev).
118 */
119 static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
120 const struct of_phandle_args *iiospec)
121 {
122 if (!iiospec->args_count)
123 return 0;
124
125 if (iiospec->args[0] >= indio_dev->num_channels) {
126 dev_err(&indio_dev->dev, "invalid channel index %u\n",
127 iiospec->args[0]);
128 return -EINVAL;
129 }
130
131 return iiospec->args[0];
132 }
133
134 static int __of_iio_channel_get(struct iio_channel *channel,
135 struct device_node *np, int index)
136 {
137 struct device *idev;
138 struct iio_dev *indio_dev;
139 int err;
140 struct of_phandle_args iiospec;
141
142 err = of_parse_phandle_with_args(np, "io-channels",
143 "#io-channel-cells",
144 index, &iiospec);
145 if (err)
146 return err;
147
148 idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
149 iio_dev_node_match);
150 of_node_put(iiospec.np);
151 if (idev == NULL)
152 return -EPROBE_DEFER;
153
154 indio_dev = dev_to_iio_dev(idev);
155 channel->indio_dev = indio_dev;
156 if (indio_dev->info->of_xlate)
157 index = indio_dev->info->of_xlate(indio_dev, &iiospec);
158 else
159 index = __of_iio_simple_xlate(indio_dev, &iiospec);
160 if (index < 0)
161 goto err_put;
162 channel->channel = &indio_dev->channels[index];
163
164 return 0;
165
166 err_put:
167 iio_device_put(indio_dev);
168 return index;
169 }
170
171 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
172 {
173 struct iio_channel *channel;
174 int err;
175
176 if (index < 0)
177 return ERR_PTR(-EINVAL);
178
179 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
180 if (channel == NULL)
181 return ERR_PTR(-ENOMEM);
182
183 err = __of_iio_channel_get(channel, np, index);
184 if (err)
185 goto err_free_channel;
186
187 return channel;
188
189 err_free_channel:
190 kfree(channel);
191 return ERR_PTR(err);
192 }
193
194 static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
195 const char *name)
196 {
197 struct iio_channel *chan = NULL;
198
199 /* Walk up the tree of devices looking for a matching iio channel */
200 while (np) {
201 int index = 0;
202
203 /*
204 * For named iio channels, first look up the name in the
205 * "io-channel-names" property. If it cannot be found, the
206 * index will be an error code, and of_iio_channel_get()
207 * will fail.
208 */
209 if (name)
210 index = of_property_match_string(np, "io-channel-names",
211 name);
212 chan = of_iio_channel_get(np, index);
213 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
214 break;
215 else if (name && index >= 0) {
216 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
217 np, name ? name : "", index);
218 return NULL;
219 }
220
221 /*
222 * No matching IIO channel found on this node.
223 * If the parent node has a "io-channel-ranges" property,
224 * then we can try one of its channels.
225 */
226 np = np->parent;
227 if (np && !of_get_property(np, "io-channel-ranges", NULL))
228 return NULL;
229 }
230
231 return chan;
232 }
233
234 static struct iio_channel *of_iio_channel_get_all(struct device *dev)
235 {
236 struct iio_channel *chans;
237 int i, mapind, nummaps = 0;
238 int ret;
239
240 do {
241 ret = of_parse_phandle_with_args(dev->of_node,
242 "io-channels",
243 "#io-channel-cells",
244 nummaps, NULL);
245 if (ret < 0)
246 break;
247 } while (++nummaps);
248
249 if (nummaps == 0) /* no error, return NULL to search map table */
250 return NULL;
251
252 /* NULL terminated array to save passing size */
253 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
254 if (chans == NULL)
255 return ERR_PTR(-ENOMEM);
256
257 /* Search for OF matches */
258 for (mapind = 0; mapind < nummaps; mapind++) {
259 ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
260 mapind);
261 if (ret)
262 goto error_free_chans;
263 }
264 return chans;
265
266 error_free_chans:
267 for (i = 0; i < mapind; i++)
268 iio_device_put(chans[i].indio_dev);
269 kfree(chans);
270 return ERR_PTR(ret);
271 }
272
273 #else /* CONFIG_OF */
274
275 static inline struct iio_channel *
276 of_iio_channel_get_by_name(struct device_node *np, const char *name)
277 {
278 return NULL;
279 }
280
281 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
282 {
283 return NULL;
284 }
285
286 #endif /* CONFIG_OF */
287
288 static struct iio_channel *iio_channel_get_sys(const char *name,
289 const char *channel_name)
290 {
291 struct iio_map_internal *c_i = NULL, *c = NULL;
292 struct iio_channel *channel;
293 int err;
294
295 if (name == NULL && channel_name == NULL)
296 return ERR_PTR(-ENODEV);
297
298 /* first find matching entry the channel map */
299 mutex_lock(&iio_map_list_lock);
300 list_for_each_entry(c_i, &iio_map_list, l) {
301 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
302 (channel_name &&
303 strcmp(channel_name, c_i->map->consumer_channel) != 0))
304 continue;
305 c = c_i;
306 iio_device_get(c->indio_dev);
307 break;
308 }
309 mutex_unlock(&iio_map_list_lock);
310 if (c == NULL)
311 return ERR_PTR(-ENODEV);
312
313 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
314 if (channel == NULL) {
315 err = -ENOMEM;
316 goto error_no_mem;
317 }
318
319 channel->indio_dev = c->indio_dev;
320
321 if (c->map->adc_channel_label) {
322 channel->channel =
323 iio_chan_spec_from_name(channel->indio_dev,
324 c->map->adc_channel_label);
325
326 if (channel->channel == NULL) {
327 err = -EINVAL;
328 goto error_no_chan;
329 }
330 }
331
332 return channel;
333
334 error_no_chan:
335 kfree(channel);
336 error_no_mem:
337 iio_device_put(c->indio_dev);
338 return ERR_PTR(err);
339 }
340
341 struct iio_channel *iio_channel_get(struct device *dev,
342 const char *channel_name)
343 {
344 const char *name = dev ? dev_name(dev) : NULL;
345 struct iio_channel *channel;
346
347 if (dev) {
348 channel = of_iio_channel_get_by_name(dev->of_node,
349 channel_name);
350 if (channel != NULL)
351 return channel;
352 }
353
354 return iio_channel_get_sys(name, channel_name);
355 }
356 EXPORT_SYMBOL_GPL(iio_channel_get);
357
358 void iio_channel_release(struct iio_channel *channel)
359 {
360 if (!channel)
361 return;
362 iio_device_put(channel->indio_dev);
363 kfree(channel);
364 }
365 EXPORT_SYMBOL_GPL(iio_channel_release);
366
367 static void devm_iio_channel_free(struct device *dev, void *res)
368 {
369 struct iio_channel *channel = *(struct iio_channel **)res;
370
371 iio_channel_release(channel);
372 }
373
374 struct iio_channel *devm_iio_channel_get(struct device *dev,
375 const char *channel_name)
376 {
377 struct iio_channel **ptr, *channel;
378
379 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
380 if (!ptr)
381 return ERR_PTR(-ENOMEM);
382
383 channel = iio_channel_get(dev, channel_name);
384 if (IS_ERR(channel)) {
385 devres_free(ptr);
386 return channel;
387 }
388
389 *ptr = channel;
390 devres_add(dev, ptr);
391
392 return channel;
393 }
394 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
395
396 struct iio_channel *iio_channel_get_all(struct device *dev)
397 {
398 const char *name;
399 struct iio_channel *chans;
400 struct iio_map_internal *c = NULL;
401 int nummaps = 0;
402 int mapind = 0;
403 int i, ret;
404
405 if (dev == NULL)
406 return ERR_PTR(-EINVAL);
407
408 chans = of_iio_channel_get_all(dev);
409 if (chans)
410 return chans;
411
412 name = dev_name(dev);
413
414 mutex_lock(&iio_map_list_lock);
415 /* first count the matching maps */
416 list_for_each_entry(c, &iio_map_list, l)
417 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
418 continue;
419 else
420 nummaps++;
421
422 if (nummaps == 0) {
423 ret = -ENODEV;
424 goto error_ret;
425 }
426
427 /* NULL terminated array to save passing size */
428 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
429 if (chans == NULL) {
430 ret = -ENOMEM;
431 goto error_ret;
432 }
433
434 /* for each map fill in the chans element */
435 list_for_each_entry(c, &iio_map_list, l) {
436 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
437 continue;
438 chans[mapind].indio_dev = c->indio_dev;
439 chans[mapind].data = c->map->consumer_data;
440 chans[mapind].channel =
441 iio_chan_spec_from_name(chans[mapind].indio_dev,
442 c->map->adc_channel_label);
443 if (chans[mapind].channel == NULL) {
444 ret = -EINVAL;
445 goto error_free_chans;
446 }
447 iio_device_get(chans[mapind].indio_dev);
448 mapind++;
449 }
450 if (mapind == 0) {
451 ret = -ENODEV;
452 goto error_free_chans;
453 }
454 mutex_unlock(&iio_map_list_lock);
455
456 return chans;
457
458 error_free_chans:
459 for (i = 0; i < nummaps; i++)
460 iio_device_put(chans[i].indio_dev);
461 kfree(chans);
462 error_ret:
463 mutex_unlock(&iio_map_list_lock);
464
465 return ERR_PTR(ret);
466 }
467 EXPORT_SYMBOL_GPL(iio_channel_get_all);
468
469 void iio_channel_release_all(struct iio_channel *channels)
470 {
471 struct iio_channel *chan = &channels[0];
472
473 while (chan->indio_dev) {
474 iio_device_put(chan->indio_dev);
475 chan++;
476 }
477 kfree(channels);
478 }
479 EXPORT_SYMBOL_GPL(iio_channel_release_all);
480
481 static void devm_iio_channel_free_all(struct device *dev, void *res)
482 {
483 struct iio_channel *channels = *(struct iio_channel **)res;
484
485 iio_channel_release_all(channels);
486 }
487
488 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
489 {
490 struct iio_channel **ptr, *channels;
491
492 ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
493 if (!ptr)
494 return ERR_PTR(-ENOMEM);
495
496 channels = iio_channel_get_all(dev);
497 if (IS_ERR(channels)) {
498 devres_free(ptr);
499 return channels;
500 }
501
502 *ptr = channels;
503 devres_add(dev, ptr);
504
505 return channels;
506 }
507 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
508
509 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
510 enum iio_chan_info_enum info)
511 {
512 int unused;
513 int vals[INDIO_MAX_RAW_ELEMENTS];
514 int ret;
515 int val_len = 2;
516
517 if (val2 == NULL)
518 val2 = &unused;
519
520 if (!iio_channel_has_info(chan->channel, info))
521 return -EINVAL;
522
523 if (chan->indio_dev->info->read_raw_multi) {
524 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
525 chan->channel, INDIO_MAX_RAW_ELEMENTS,
526 vals, &val_len, info);
527 *val = vals[0];
528 *val2 = vals[1];
529 } else
530 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
531 chan->channel, val, val2, info);
532
533 return ret;
534 }
535
536 int iio_read_channel_raw(struct iio_channel *chan, int *val)
537 {
538 int ret;
539
540 mutex_lock(&chan->indio_dev->info_exist_lock);
541 if (chan->indio_dev->info == NULL) {
542 ret = -ENODEV;
543 goto err_unlock;
544 }
545
546 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
547 err_unlock:
548 mutex_unlock(&chan->indio_dev->info_exist_lock);
549
550 return ret;
551 }
552 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
553
554 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
555 {
556 int ret;
557
558 mutex_lock(&chan->indio_dev->info_exist_lock);
559 if (chan->indio_dev->info == NULL) {
560 ret = -ENODEV;
561 goto err_unlock;
562 }
563
564 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
565 err_unlock:
566 mutex_unlock(&chan->indio_dev->info_exist_lock);
567
568 return ret;
569 }
570 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
571
572 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
573 int raw, int *processed, unsigned int scale)
574 {
575 int scale_type, scale_val, scale_val2, offset;
576 s64 raw64 = raw;
577 int ret;
578
579 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
580 if (ret >= 0)
581 raw64 += offset;
582
583 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
584 IIO_CHAN_INFO_SCALE);
585 if (scale_type < 0) {
586 /*
587 * Just pass raw values as processed if no scaling is
588 * available.
589 */
590 *processed = raw;
591 return 0;
592 }
593
594 switch (scale_type) {
595 case IIO_VAL_INT:
596 *processed = raw64 * scale_val;
597 break;
598 case IIO_VAL_INT_PLUS_MICRO:
599 if (scale_val2 < 0)
600 *processed = -raw64 * scale_val;
601 else
602 *processed = raw64 * scale_val;
603 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
604 1000000LL);
605 break;
606 case IIO_VAL_INT_PLUS_NANO:
607 if (scale_val2 < 0)
608 *processed = -raw64 * scale_val;
609 else
610 *processed = raw64 * scale_val;
611 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
612 1000000000LL);
613 break;
614 case IIO_VAL_FRACTIONAL:
615 *processed = div_s64(raw64 * (s64)scale_val * scale,
616 scale_val2);
617 break;
618 case IIO_VAL_FRACTIONAL_LOG2:
619 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
620 break;
621 default:
622 return -EINVAL;
623 }
624
625 return 0;
626 }
627
628 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
629 int *processed, unsigned int scale)
630 {
631 int ret;
632
633 mutex_lock(&chan->indio_dev->info_exist_lock);
634 if (chan->indio_dev->info == NULL) {
635 ret = -ENODEV;
636 goto err_unlock;
637 }
638
639 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
640 scale);
641 err_unlock:
642 mutex_unlock(&chan->indio_dev->info_exist_lock);
643
644 return ret;
645 }
646 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
647
648 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
649 enum iio_chan_info_enum attribute)
650 {
651 int ret;
652
653 mutex_lock(&chan->indio_dev->info_exist_lock);
654 if (chan->indio_dev->info == NULL) {
655 ret = -ENODEV;
656 goto err_unlock;
657 }
658
659 ret = iio_channel_read(chan, val, val2, attribute);
660 err_unlock:
661 mutex_unlock(&chan->indio_dev->info_exist_lock);
662
663 return ret;
664 }
665 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
666
667 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
668 {
669 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
670 }
671 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
672
673 int iio_read_channel_processed(struct iio_channel *chan, int *val)
674 {
675 int ret;
676
677 mutex_lock(&chan->indio_dev->info_exist_lock);
678 if (chan->indio_dev->info == NULL) {
679 ret = -ENODEV;
680 goto err_unlock;
681 }
682
683 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
684 ret = iio_channel_read(chan, val, NULL,
685 IIO_CHAN_INFO_PROCESSED);
686 } else {
687 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
688 if (ret < 0)
689 goto err_unlock;
690 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
691 }
692
693 err_unlock:
694 mutex_unlock(&chan->indio_dev->info_exist_lock);
695
696 return ret;
697 }
698 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
699
700 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
701 {
702 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
703 }
704 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
705
706 static int iio_channel_read_avail(struct iio_channel *chan,
707 const int **vals, int *type, int *length,
708 enum iio_chan_info_enum info)
709 {
710 if (!iio_channel_has_available(chan->channel, info))
711 return -EINVAL;
712
713 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
714 vals, type, length, info);
715 }
716
717 int iio_read_avail_channel_attribute(struct iio_channel *chan,
718 const int **vals, int *type, int *length,
719 enum iio_chan_info_enum attribute)
720 {
721 int ret;
722
723 mutex_lock(&chan->indio_dev->info_exist_lock);
724 if (!chan->indio_dev->info) {
725 ret = -ENODEV;
726 goto err_unlock;
727 }
728
729 ret = iio_channel_read_avail(chan, vals, type, length, attribute);
730 err_unlock:
731 mutex_unlock(&chan->indio_dev->info_exist_lock);
732
733 return ret;
734 }
735 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
736
737 int iio_read_avail_channel_raw(struct iio_channel *chan,
738 const int **vals, int *length)
739 {
740 int ret;
741 int type;
742
743 ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
744 IIO_CHAN_INFO_RAW);
745
746 if (ret >= 0 && type != IIO_VAL_INT)
747 /* raw values are assumed to be IIO_VAL_INT */
748 ret = -EINVAL;
749
750 return ret;
751 }
752 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
753
754 static int iio_channel_read_max(struct iio_channel *chan,
755 int *val, int *val2, int *type,
756 enum iio_chan_info_enum info)
757 {
758 int unused;
759 const int *vals;
760 int length;
761 int ret;
762
763 if (!val2)
764 val2 = &unused;
765
766 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
767 switch (ret) {
768 case IIO_AVAIL_RANGE:
769 switch (*type) {
770 case IIO_VAL_INT:
771 *val = vals[2];
772 break;
773 default:
774 *val = vals[4];
775 *val2 = vals[5];
776 }
777 return 0;
778
779 case IIO_AVAIL_LIST:
780 if (length <= 0)
781 return -EINVAL;
782 switch (*type) {
783 case IIO_VAL_INT:
784 *val = vals[--length];
785 while (length) {
786 if (vals[--length] > *val)
787 *val = vals[length];
788 }
789 break;
790 default:
791 /* FIXME: learn about max for other iio values */
792 return -EINVAL;
793 }
794 return 0;
795
796 default:
797 return ret;
798 }
799 }
800
801 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
802 {
803 int ret;
804 int type;
805
806 mutex_lock(&chan->indio_dev->info_exist_lock);
807 if (!chan->indio_dev->info) {
808 ret = -ENODEV;
809 goto err_unlock;
810 }
811
812 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
813 err_unlock:
814 mutex_unlock(&chan->indio_dev->info_exist_lock);
815
816 return ret;
817 }
818 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
819
820 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
821 {
822 int ret = 0;
823 /* Need to verify underlying driver has not gone away */
824
825 mutex_lock(&chan->indio_dev->info_exist_lock);
826 if (chan->indio_dev->info == NULL) {
827 ret = -ENODEV;
828 goto err_unlock;
829 }
830
831 *type = chan->channel->type;
832 err_unlock:
833 mutex_unlock(&chan->indio_dev->info_exist_lock);
834
835 return ret;
836 }
837 EXPORT_SYMBOL_GPL(iio_get_channel_type);
838
839 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
840 enum iio_chan_info_enum info)
841 {
842 return chan->indio_dev->info->write_raw(chan->indio_dev,
843 chan->channel, val, val2, info);
844 }
845
846 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
847 enum iio_chan_info_enum attribute)
848 {
849 int ret;
850
851 mutex_lock(&chan->indio_dev->info_exist_lock);
852 if (chan->indio_dev->info == NULL) {
853 ret = -ENODEV;
854 goto err_unlock;
855 }
856
857 ret = iio_channel_write(chan, val, val2, attribute);
858 err_unlock:
859 mutex_unlock(&chan->indio_dev->info_exist_lock);
860
861 return ret;
862 }
863 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
864
865 int iio_write_channel_raw(struct iio_channel *chan, int val)
866 {
867 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
868 }
869 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
870
871 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
872 {
873 const struct iio_chan_spec_ext_info *ext_info;
874 unsigned int i = 0;
875
876 if (!chan->channel->ext_info)
877 return i;
878
879 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
880 ++i;
881
882 return i;
883 }
884 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
885
886 static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
887 const struct iio_channel *chan,
888 const char *attr)
889 {
890 const struct iio_chan_spec_ext_info *ext_info;
891
892 if (!chan->channel->ext_info)
893 return NULL;
894
895 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
896 if (!strcmp(attr, ext_info->name))
897 return ext_info;
898 }
899
900 return NULL;
901 }
902
903 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
904 const char *attr, char *buf)
905 {
906 const struct iio_chan_spec_ext_info *ext_info;
907
908 ext_info = iio_lookup_ext_info(chan, attr);
909 if (!ext_info)
910 return -EINVAL;
911
912 return ext_info->read(chan->indio_dev, ext_info->private,
913 chan->channel, buf);
914 }
915 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
916
917 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
918 const char *buf, size_t len)
919 {
920 const struct iio_chan_spec_ext_info *ext_info;
921
922 ext_info = iio_lookup_ext_info(chan, attr);
923 if (!ext_info)
924 return -EINVAL;
925
926 return ext_info->write(chan->indio_dev, ext_info->private,
927 chan->channel, buf, len);
928 }
929 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);