1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
4 * Copyright (c) 2011 Jonathan Cameron
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/mutex.h>
12 #include <linux/iio/iio.h>
14 #include <linux/iio/machine.h>
15 #include <linux/iio/driver.h>
16 #include <linux/iio/consumer.h>
18 struct iio_map_internal
{
19 struct iio_dev
*indio_dev
;
24 static LIST_HEAD(iio_map_list
);
25 static DEFINE_MUTEX(iio_map_list_lock
);
27 static int iio_map_array_unregister_locked(struct iio_dev
*indio_dev
)
30 struct iio_map_internal
*mapi
, *next
;
32 list_for_each_entry_safe(mapi
, next
, &iio_map_list
, l
) {
33 if (indio_dev
== mapi
->indio_dev
) {
42 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
45 struct iio_map_internal
*mapi
;
50 mutex_lock(&iio_map_list_lock
);
51 while (maps
[i
].consumer_dev_name
!= NULL
) {
52 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
58 mapi
->indio_dev
= indio_dev
;
59 list_add_tail(&mapi
->l
, &iio_map_list
);
64 iio_map_array_unregister_locked(indio_dev
);
65 mutex_unlock(&iio_map_list_lock
);
69 EXPORT_SYMBOL_GPL(iio_map_array_register
);
73 * Remove all map entries associated with the given iio device
75 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
79 mutex_lock(&iio_map_list_lock
);
80 ret
= iio_map_array_unregister_locked(indio_dev
);
81 mutex_unlock(&iio_map_list_lock
);
85 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
87 static const struct iio_chan_spec
88 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
91 const struct iio_chan_spec
*chan
= NULL
;
93 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
94 if (indio_dev
->channels
[i
].datasheet_name
&&
95 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
96 chan
= &indio_dev
->channels
[i
];
104 static int iio_dev_node_match(struct device
*dev
, const void *data
)
106 return dev
->of_node
== data
&& dev
->type
== &iio_device_type
;
110 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
111 * @indio_dev: pointer to the iio_dev structure
112 * @iiospec: IIO specifier as found in the device tree
114 * This is simple translation function, suitable for the most 1:1 mapped
115 * channels in IIO chips. This function performs only one sanity check:
116 * whether IIO index is less than num_channels (that is specified in the
119 static int __of_iio_simple_xlate(struct iio_dev
*indio_dev
,
120 const struct of_phandle_args
*iiospec
)
122 if (!iiospec
->args_count
)
125 if (iiospec
->args
[0] >= indio_dev
->num_channels
) {
126 dev_err(&indio_dev
->dev
, "invalid channel index %u\n",
131 return iiospec
->args
[0];
134 static int __of_iio_channel_get(struct iio_channel
*channel
,
135 struct device_node
*np
, int index
)
138 struct iio_dev
*indio_dev
;
140 struct of_phandle_args iiospec
;
142 err
= of_parse_phandle_with_args(np
, "io-channels",
148 idev
= bus_find_device(&iio_bus_type
, NULL
, iiospec
.np
,
150 of_node_put(iiospec
.np
);
152 return -EPROBE_DEFER
;
154 indio_dev
= dev_to_iio_dev(idev
);
155 channel
->indio_dev
= indio_dev
;
156 if (indio_dev
->info
->of_xlate
)
157 index
= indio_dev
->info
->of_xlate(indio_dev
, &iiospec
);
159 index
= __of_iio_simple_xlate(indio_dev
, &iiospec
);
162 channel
->channel
= &indio_dev
->channels
[index
];
167 iio_device_put(indio_dev
);
171 static struct iio_channel
*of_iio_channel_get(struct device_node
*np
, int index
)
173 struct iio_channel
*channel
;
177 return ERR_PTR(-EINVAL
);
179 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
181 return ERR_PTR(-ENOMEM
);
183 err
= __of_iio_channel_get(channel
, np
, index
);
185 goto err_free_channel
;
194 static struct iio_channel
*of_iio_channel_get_by_name(struct device_node
*np
,
197 struct iio_channel
*chan
= NULL
;
199 /* Walk up the tree of devices looking for a matching iio channel */
204 * For named iio channels, first look up the name in the
205 * "io-channel-names" property. If it cannot be found, the
206 * index will be an error code, and of_iio_channel_get()
210 index
= of_property_match_string(np
, "io-channel-names",
212 chan
= of_iio_channel_get(np
, index
);
213 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
215 else if (name
&& index
>= 0) {
216 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
217 np
, name
? name
: "", index
);
222 * No matching IIO channel found on this node.
223 * If the parent node has a "io-channel-ranges" property,
224 * then we can try one of its channels.
227 if (np
&& !of_get_property(np
, "io-channel-ranges", NULL
))
234 static struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
236 struct iio_channel
*chans
;
237 int i
, mapind
, nummaps
= 0;
241 ret
= of_parse_phandle_with_args(dev
->of_node
,
249 if (nummaps
== 0) /* no error, return NULL to search map table */
252 /* NULL terminated array to save passing size */
253 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
255 return ERR_PTR(-ENOMEM
);
257 /* Search for OF matches */
258 for (mapind
= 0; mapind
< nummaps
; mapind
++) {
259 ret
= __of_iio_channel_get(&chans
[mapind
], dev
->of_node
,
262 goto error_free_chans
;
267 for (i
= 0; i
< mapind
; i
++)
268 iio_device_put(chans
[i
].indio_dev
);
273 #else /* CONFIG_OF */
275 static inline struct iio_channel
*
276 of_iio_channel_get_by_name(struct device_node
*np
, const char *name
)
281 static inline struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
286 #endif /* CONFIG_OF */
288 static struct iio_channel
*iio_channel_get_sys(const char *name
,
289 const char *channel_name
)
291 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
292 struct iio_channel
*channel
;
295 if (name
== NULL
&& channel_name
== NULL
)
296 return ERR_PTR(-ENODEV
);
298 /* first find matching entry the channel map */
299 mutex_lock(&iio_map_list_lock
);
300 list_for_each_entry(c_i
, &iio_map_list
, l
) {
301 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
303 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
306 iio_device_get(c
->indio_dev
);
309 mutex_unlock(&iio_map_list_lock
);
311 return ERR_PTR(-ENODEV
);
313 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
314 if (channel
== NULL
) {
319 channel
->indio_dev
= c
->indio_dev
;
321 if (c
->map
->adc_channel_label
) {
323 iio_chan_spec_from_name(channel
->indio_dev
,
324 c
->map
->adc_channel_label
);
326 if (channel
->channel
== NULL
) {
337 iio_device_put(c
->indio_dev
);
341 struct iio_channel
*iio_channel_get(struct device
*dev
,
342 const char *channel_name
)
344 const char *name
= dev
? dev_name(dev
) : NULL
;
345 struct iio_channel
*channel
;
348 channel
= of_iio_channel_get_by_name(dev
->of_node
,
354 return iio_channel_get_sys(name
, channel_name
);
356 EXPORT_SYMBOL_GPL(iio_channel_get
);
358 void iio_channel_release(struct iio_channel
*channel
)
362 iio_device_put(channel
->indio_dev
);
365 EXPORT_SYMBOL_GPL(iio_channel_release
);
367 static void devm_iio_channel_free(struct device
*dev
, void *res
)
369 struct iio_channel
*channel
= *(struct iio_channel
**)res
;
371 iio_channel_release(channel
);
374 struct iio_channel
*devm_iio_channel_get(struct device
*dev
,
375 const char *channel_name
)
377 struct iio_channel
**ptr
, *channel
;
379 ptr
= devres_alloc(devm_iio_channel_free
, sizeof(*ptr
), GFP_KERNEL
);
381 return ERR_PTR(-ENOMEM
);
383 channel
= iio_channel_get(dev
, channel_name
);
384 if (IS_ERR(channel
)) {
390 devres_add(dev
, ptr
);
394 EXPORT_SYMBOL_GPL(devm_iio_channel_get
);
396 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
399 struct iio_channel
*chans
;
400 struct iio_map_internal
*c
= NULL
;
406 return ERR_PTR(-EINVAL
);
408 chans
= of_iio_channel_get_all(dev
);
412 name
= dev_name(dev
);
414 mutex_lock(&iio_map_list_lock
);
415 /* first count the matching maps */
416 list_for_each_entry(c
, &iio_map_list
, l
)
417 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
427 /* NULL terminated array to save passing size */
428 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
434 /* for each map fill in the chans element */
435 list_for_each_entry(c
, &iio_map_list
, l
) {
436 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
438 chans
[mapind
].indio_dev
= c
->indio_dev
;
439 chans
[mapind
].data
= c
->map
->consumer_data
;
440 chans
[mapind
].channel
=
441 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
442 c
->map
->adc_channel_label
);
443 if (chans
[mapind
].channel
== NULL
) {
445 goto error_free_chans
;
447 iio_device_get(chans
[mapind
].indio_dev
);
452 goto error_free_chans
;
454 mutex_unlock(&iio_map_list_lock
);
459 for (i
= 0; i
< nummaps
; i
++)
460 iio_device_put(chans
[i
].indio_dev
);
463 mutex_unlock(&iio_map_list_lock
);
467 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
469 void iio_channel_release_all(struct iio_channel
*channels
)
471 struct iio_channel
*chan
= &channels
[0];
473 while (chan
->indio_dev
) {
474 iio_device_put(chan
->indio_dev
);
479 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
481 static void devm_iio_channel_free_all(struct device
*dev
, void *res
)
483 struct iio_channel
*channels
= *(struct iio_channel
**)res
;
485 iio_channel_release_all(channels
);
488 struct iio_channel
*devm_iio_channel_get_all(struct device
*dev
)
490 struct iio_channel
**ptr
, *channels
;
492 ptr
= devres_alloc(devm_iio_channel_free_all
, sizeof(*ptr
), GFP_KERNEL
);
494 return ERR_PTR(-ENOMEM
);
496 channels
= iio_channel_get_all(dev
);
497 if (IS_ERR(channels
)) {
503 devres_add(dev
, ptr
);
507 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all
);
509 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
510 enum iio_chan_info_enum info
)
513 int vals
[INDIO_MAX_RAW_ELEMENTS
];
520 if (!iio_channel_has_info(chan
->channel
, info
))
523 if (chan
->indio_dev
->info
->read_raw_multi
) {
524 ret
= chan
->indio_dev
->info
->read_raw_multi(chan
->indio_dev
,
525 chan
->channel
, INDIO_MAX_RAW_ELEMENTS
,
526 vals
, &val_len
, info
);
530 ret
= chan
->indio_dev
->info
->read_raw(chan
->indio_dev
,
531 chan
->channel
, val
, val2
, info
);
536 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
540 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
541 if (chan
->indio_dev
->info
== NULL
) {
546 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
548 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
552 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
554 int iio_read_channel_average_raw(struct iio_channel
*chan
, int *val
)
558 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
559 if (chan
->indio_dev
->info
== NULL
) {
564 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_AVERAGE_RAW
);
566 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
570 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw
);
572 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
573 int raw
, int *processed
, unsigned int scale
)
575 int scale_type
, scale_val
, scale_val2
, offset
;
579 ret
= iio_channel_read(chan
, &offset
, NULL
, IIO_CHAN_INFO_OFFSET
);
583 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
584 IIO_CHAN_INFO_SCALE
);
585 if (scale_type
< 0) {
587 * Just pass raw values as processed if no scaling is
594 switch (scale_type
) {
596 *processed
= raw64
* scale_val
;
598 case IIO_VAL_INT_PLUS_MICRO
:
600 *processed
= -raw64
* scale_val
;
602 *processed
= raw64
* scale_val
;
603 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
606 case IIO_VAL_INT_PLUS_NANO
:
608 *processed
= -raw64
* scale_val
;
610 *processed
= raw64
* scale_val
;
611 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
614 case IIO_VAL_FRACTIONAL
:
615 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
618 case IIO_VAL_FRACTIONAL_LOG2
:
619 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
628 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
629 int *processed
, unsigned int scale
)
633 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
634 if (chan
->indio_dev
->info
== NULL
) {
639 ret
= iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
642 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
646 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
648 int iio_read_channel_attribute(struct iio_channel
*chan
, int *val
, int *val2
,
649 enum iio_chan_info_enum attribute
)
653 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
654 if (chan
->indio_dev
->info
== NULL
) {
659 ret
= iio_channel_read(chan
, val
, val2
, attribute
);
661 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
665 EXPORT_SYMBOL_GPL(iio_read_channel_attribute
);
667 int iio_read_channel_offset(struct iio_channel
*chan
, int *val
, int *val2
)
669 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_OFFSET
);
671 EXPORT_SYMBOL_GPL(iio_read_channel_offset
);
673 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
677 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
678 if (chan
->indio_dev
->info
== NULL
) {
683 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
684 ret
= iio_channel_read(chan
, val
, NULL
,
685 IIO_CHAN_INFO_PROCESSED
);
687 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
690 ret
= iio_convert_raw_to_processed_unlocked(chan
, *val
, val
, 1);
694 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
698 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
700 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
702 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
704 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
706 static int iio_channel_read_avail(struct iio_channel
*chan
,
707 const int **vals
, int *type
, int *length
,
708 enum iio_chan_info_enum info
)
710 if (!iio_channel_has_available(chan
->channel
, info
))
713 return chan
->indio_dev
->info
->read_avail(chan
->indio_dev
, chan
->channel
,
714 vals
, type
, length
, info
);
717 int iio_read_avail_channel_attribute(struct iio_channel
*chan
,
718 const int **vals
, int *type
, int *length
,
719 enum iio_chan_info_enum attribute
)
723 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
724 if (!chan
->indio_dev
->info
) {
729 ret
= iio_channel_read_avail(chan
, vals
, type
, length
, attribute
);
731 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
735 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute
);
737 int iio_read_avail_channel_raw(struct iio_channel
*chan
,
738 const int **vals
, int *length
)
743 ret
= iio_read_avail_channel_attribute(chan
, vals
, &type
, length
,
746 if (ret
>= 0 && type
!= IIO_VAL_INT
)
747 /* raw values are assumed to be IIO_VAL_INT */
752 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw
);
754 static int iio_channel_read_max(struct iio_channel
*chan
,
755 int *val
, int *val2
, int *type
,
756 enum iio_chan_info_enum info
)
766 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
768 case IIO_AVAIL_RANGE
:
784 *val
= vals
[--length
];
786 if (vals
[--length
] > *val
)
791 /* FIXME: learn about max for other iio values */
801 int iio_read_max_channel_raw(struct iio_channel
*chan
, int *val
)
806 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
807 if (!chan
->indio_dev
->info
) {
812 ret
= iio_channel_read_max(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
814 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
818 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw
);
820 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
823 /* Need to verify underlying driver has not gone away */
825 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
826 if (chan
->indio_dev
->info
== NULL
) {
831 *type
= chan
->channel
->type
;
833 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
837 EXPORT_SYMBOL_GPL(iio_get_channel_type
);
839 static int iio_channel_write(struct iio_channel
*chan
, int val
, int val2
,
840 enum iio_chan_info_enum info
)
842 return chan
->indio_dev
->info
->write_raw(chan
->indio_dev
,
843 chan
->channel
, val
, val2
, info
);
846 int iio_write_channel_attribute(struct iio_channel
*chan
, int val
, int val2
,
847 enum iio_chan_info_enum attribute
)
851 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
852 if (chan
->indio_dev
->info
== NULL
) {
857 ret
= iio_channel_write(chan
, val
, val2
, attribute
);
859 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
863 EXPORT_SYMBOL_GPL(iio_write_channel_attribute
);
865 int iio_write_channel_raw(struct iio_channel
*chan
, int val
)
867 return iio_write_channel_attribute(chan
, val
, 0, IIO_CHAN_INFO_RAW
);
869 EXPORT_SYMBOL_GPL(iio_write_channel_raw
);
871 unsigned int iio_get_channel_ext_info_count(struct iio_channel
*chan
)
873 const struct iio_chan_spec_ext_info
*ext_info
;
876 if (!chan
->channel
->ext_info
)
879 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ext_info
++)
884 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count
);
886 static const struct iio_chan_spec_ext_info
*iio_lookup_ext_info(
887 const struct iio_channel
*chan
,
890 const struct iio_chan_spec_ext_info
*ext_info
;
892 if (!chan
->channel
->ext_info
)
895 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ++ext_info
) {
896 if (!strcmp(attr
, ext_info
->name
))
903 ssize_t
iio_read_channel_ext_info(struct iio_channel
*chan
,
904 const char *attr
, char *buf
)
906 const struct iio_chan_spec_ext_info
*ext_info
;
908 ext_info
= iio_lookup_ext_info(chan
, attr
);
912 return ext_info
->read(chan
->indio_dev
, ext_info
->private,
915 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info
);
917 ssize_t
iio_write_channel_ext_info(struct iio_channel
*chan
, const char *attr
,
918 const char *buf
, size_t len
)
920 const struct iio_chan_spec_ext_info
*ext_info
;
922 ext_info
= iio_lookup_ext_info(chan
, attr
);
926 return ext_info
->write(chan
->indio_dev
, ext_info
->private,
927 chan
->channel
, buf
, len
);
929 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info
);