1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
24 #include <linux/iio/iio.h>
26 #include <linux/iio/sysfs.h>
27 #include <linux/iio/buffer.h>
29 static const char * const iio_endian_prefix
[] = {
34 static bool iio_buffer_is_active(struct iio_buffer
*buf
)
36 return !list_empty(&buf
->buffer_list
);
40 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
42 * This function relies on all buffer implementations having an
43 * iio_buffer as their first element.
45 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
46 size_t n
, loff_t
*f_ps
)
48 struct iio_dev
*indio_dev
= filp
->private_data
;
49 struct iio_buffer
*rb
= indio_dev
->buffer
;
51 if (!rb
|| !rb
->access
->read_first_n
)
53 return rb
->access
->read_first_n(rb
, n
, buf
);
57 * iio_buffer_poll() - poll the buffer to find out if it has data
59 unsigned int iio_buffer_poll(struct file
*filp
,
60 struct poll_table_struct
*wait
)
62 struct iio_dev
*indio_dev
= filp
->private_data
;
63 struct iio_buffer
*rb
= indio_dev
->buffer
;
65 poll_wait(filp
, &rb
->pollq
, wait
);
67 return POLLIN
| POLLRDNORM
;
68 /* need a way of knowing if there may be enough data... */
72 void iio_buffer_init(struct iio_buffer
*buffer
)
74 INIT_LIST_HEAD(&buffer
->demux_list
);
75 INIT_LIST_HEAD(&buffer
->buffer_list
);
76 init_waitqueue_head(&buffer
->pollq
);
77 kref_init(&buffer
->ref
);
79 EXPORT_SYMBOL(iio_buffer_init
);
81 static ssize_t
iio_show_scan_index(struct device
*dev
,
82 struct device_attribute
*attr
,
85 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
88 static ssize_t
iio_show_fixed_type(struct device
*dev
,
89 struct device_attribute
*attr
,
92 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
93 u8 type
= this_attr
->c
->scan_type
.endianness
;
95 if (type
== IIO_CPU
) {
96 #ifdef __LITTLE_ENDIAN
102 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
103 iio_endian_prefix
[type
],
104 this_attr
->c
->scan_type
.sign
,
105 this_attr
->c
->scan_type
.realbits
,
106 this_attr
->c
->scan_type
.storagebits
,
107 this_attr
->c
->scan_type
.shift
);
110 static ssize_t
iio_scan_el_show(struct device
*dev
,
111 struct device_attribute
*attr
,
115 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
117 ret
= test_bit(to_iio_dev_attr(attr
)->address
,
118 indio_dev
->buffer
->scan_mask
);
120 return sprintf(buf
, "%d\n", ret
);
123 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
125 clear_bit(bit
, buffer
->scan_mask
);
129 static ssize_t
iio_scan_el_store(struct device
*dev
,
130 struct device_attribute
*attr
,
136 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
137 struct iio_buffer
*buffer
= indio_dev
->buffer
;
138 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
140 ret
= strtobool(buf
, &state
);
143 mutex_lock(&indio_dev
->mlock
);
144 if (iio_buffer_is_active(indio_dev
->buffer
)) {
148 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
152 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
155 } else if (state
&& !ret
) {
156 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
162 mutex_unlock(&indio_dev
->mlock
);
164 return ret
< 0 ? ret
: len
;
168 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
169 struct device_attribute
*attr
,
172 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
173 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
176 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
177 struct device_attribute
*attr
,
182 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
185 ret
= strtobool(buf
, &state
);
189 mutex_lock(&indio_dev
->mlock
);
190 if (iio_buffer_is_active(indio_dev
->buffer
)) {
194 indio_dev
->buffer
->scan_timestamp
= state
;
196 mutex_unlock(&indio_dev
->mlock
);
198 return ret
? ret
: len
;
201 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
202 const struct iio_chan_spec
*chan
)
204 int ret
, attrcount
= 0;
205 struct iio_buffer
*buffer
= indio_dev
->buffer
;
207 ret
= __iio_add_chan_devattr("index",
209 &iio_show_scan_index
,
214 &buffer
->scan_el_dev_attr_list
);
218 ret
= __iio_add_chan_devattr("type",
220 &iio_show_fixed_type
,
225 &buffer
->scan_el_dev_attr_list
);
229 if (chan
->type
!= IIO_TIMESTAMP
)
230 ret
= __iio_add_chan_devattr("en",
237 &buffer
->scan_el_dev_attr_list
);
239 ret
= __iio_add_chan_devattr("en",
241 &iio_scan_el_ts_show
,
242 &iio_scan_el_ts_store
,
246 &buffer
->scan_el_dev_attr_list
);
255 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
256 struct iio_dev_attr
*p
)
258 kfree(p
->dev_attr
.attr
.name
);
262 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
264 struct iio_dev_attr
*p
, *n
;
265 struct iio_buffer
*buffer
= indio_dev
->buffer
;
267 list_for_each_entry_safe(p
, n
,
268 &buffer
->scan_el_dev_attr_list
, l
)
269 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
272 static const char * const iio_scan_elements_group_name
= "scan_elements";
274 int iio_buffer_register(struct iio_dev
*indio_dev
,
275 const struct iio_chan_spec
*channels
,
278 struct iio_dev_attr
*p
;
279 struct attribute
**attr
;
280 struct iio_buffer
*buffer
= indio_dev
->buffer
;
281 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
284 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
286 if (buffer
->scan_el_attrs
!= NULL
) {
287 attr
= buffer
->scan_el_attrs
->attrs
;
288 while (*attr
++ != NULL
)
291 attrcount
= attrcount_orig
;
292 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
295 for (i
= 0; i
< num_channels
; i
++) {
296 if (channels
[i
].scan_index
< 0)
299 /* Establish necessary mask length */
300 if (channels
[i
].scan_index
>
301 (int)indio_dev
->masklength
- 1)
302 indio_dev
->masklength
303 = channels
[i
].scan_index
+ 1;
305 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
308 goto error_cleanup_dynamic
;
310 if (channels
[i
].type
== IIO_TIMESTAMP
)
311 indio_dev
->scan_index_timestamp
=
312 channels
[i
].scan_index
;
314 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
315 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
316 sizeof(*buffer
->scan_mask
),
318 if (buffer
->scan_mask
== NULL
) {
320 goto error_cleanup_dynamic
;
325 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
327 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
328 sizeof(buffer
->scan_el_group
.attrs
[0]),
330 if (buffer
->scan_el_group
.attrs
== NULL
) {
332 goto error_free_scan_mask
;
334 if (buffer
->scan_el_attrs
)
335 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
336 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
337 attrn
= attrcount_orig
;
339 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
340 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
341 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
345 error_free_scan_mask
:
346 kfree(buffer
->scan_mask
);
347 error_cleanup_dynamic
:
348 __iio_buffer_attr_cleanup(indio_dev
);
352 EXPORT_SYMBOL(iio_buffer_register
);
354 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
356 kfree(indio_dev
->buffer
->scan_mask
);
357 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
358 __iio_buffer_attr_cleanup(indio_dev
);
360 EXPORT_SYMBOL(iio_buffer_unregister
);
362 ssize_t
iio_buffer_read_length(struct device
*dev
,
363 struct device_attribute
*attr
,
366 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
367 struct iio_buffer
*buffer
= indio_dev
->buffer
;
369 if (buffer
->access
->get_length
)
370 return sprintf(buf
, "%d\n",
371 buffer
->access
->get_length(buffer
));
375 EXPORT_SYMBOL(iio_buffer_read_length
);
377 ssize_t
iio_buffer_write_length(struct device
*dev
,
378 struct device_attribute
*attr
,
382 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
383 struct iio_buffer
*buffer
= indio_dev
->buffer
;
387 ret
= kstrtouint(buf
, 10, &val
);
391 if (buffer
->access
->get_length
)
392 if (val
== buffer
->access
->get_length(buffer
))
395 mutex_lock(&indio_dev
->mlock
);
396 if (iio_buffer_is_active(indio_dev
->buffer
)) {
399 if (buffer
->access
->set_length
)
400 buffer
->access
->set_length(buffer
, val
);
403 mutex_unlock(&indio_dev
->mlock
);
405 return ret
? ret
: len
;
407 EXPORT_SYMBOL(iio_buffer_write_length
);
409 ssize_t
iio_buffer_show_enable(struct device
*dev
,
410 struct device_attribute
*attr
,
413 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
414 return sprintf(buf
, "%d\n", iio_buffer_is_active(indio_dev
->buffer
));
416 EXPORT_SYMBOL(iio_buffer_show_enable
);
418 /* Note NULL used as error indicator as it doesn't make sense. */
419 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
420 unsigned int masklength
,
421 const unsigned long *mask
)
423 if (bitmap_empty(mask
, masklength
))
426 if (bitmap_subset(mask
, av_masks
, masklength
))
428 av_masks
+= BITS_TO_LONGS(masklength
);
433 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
,
434 const unsigned long *mask
, bool timestamp
)
436 const struct iio_chan_spec
*ch
;
440 /* How much space will the demuxed element take? */
441 for_each_set_bit(i
, mask
,
442 indio_dev
->masklength
) {
443 ch
= iio_find_channel_from_si(indio_dev
, i
);
444 length
= ch
->scan_type
.storagebits
/ 8;
445 bytes
= ALIGN(bytes
, length
);
449 ch
= iio_find_channel_from_si(indio_dev
,
450 indio_dev
->scan_index_timestamp
);
451 length
= ch
->scan_type
.storagebits
/ 8;
452 bytes
= ALIGN(bytes
, length
);
458 static void iio_buffer_activate(struct iio_dev
*indio_dev
,
459 struct iio_buffer
*buffer
)
461 iio_buffer_get(buffer
);
462 list_add(&buffer
->buffer_list
, &indio_dev
->buffer_list
);
465 static void iio_buffer_deactivate(struct iio_buffer
*buffer
)
467 list_del_init(&buffer
->buffer_list
);
468 iio_buffer_put(buffer
);
471 void iio_disable_all_buffers(struct iio_dev
*indio_dev
)
473 struct iio_buffer
*buffer
, *_buffer
;
475 if (list_empty(&indio_dev
->buffer_list
))
478 if (indio_dev
->setup_ops
->predisable
)
479 indio_dev
->setup_ops
->predisable(indio_dev
);
481 list_for_each_entry_safe(buffer
, _buffer
,
482 &indio_dev
->buffer_list
, buffer_list
)
483 iio_buffer_deactivate(buffer
);
485 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
486 if (indio_dev
->setup_ops
->postdisable
)
487 indio_dev
->setup_ops
->postdisable(indio_dev
);
490 int iio_update_buffers(struct iio_dev
*indio_dev
,
491 struct iio_buffer
*insert_buffer
,
492 struct iio_buffer
*remove_buffer
)
496 struct iio_buffer
*buffer
;
497 unsigned long *compound_mask
;
498 const unsigned long *old_mask
;
500 /* Wind down existing buffers - iff there are any */
501 if (!list_empty(&indio_dev
->buffer_list
)) {
502 if (indio_dev
->setup_ops
->predisable
) {
503 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
507 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
508 if (indio_dev
->setup_ops
->postdisable
) {
509 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
514 /* Keep a copy of current setup to allow roll back */
515 old_mask
= indio_dev
->active_scan_mask
;
516 if (!indio_dev
->available_scan_masks
)
517 indio_dev
->active_scan_mask
= NULL
;
520 iio_buffer_deactivate(remove_buffer
);
522 iio_buffer_activate(indio_dev
, insert_buffer
);
524 /* If no buffers in list, we are done */
525 if (list_empty(&indio_dev
->buffer_list
)) {
526 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
527 if (indio_dev
->available_scan_masks
== NULL
)
532 /* What scan mask do we actually have? */
533 compound_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
534 sizeof(long), GFP_KERNEL
);
535 if (compound_mask
== NULL
) {
536 if (indio_dev
->available_scan_masks
== NULL
)
540 indio_dev
->scan_timestamp
= 0;
542 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
543 bitmap_or(compound_mask
, compound_mask
, buffer
->scan_mask
,
544 indio_dev
->masklength
);
545 indio_dev
->scan_timestamp
|= buffer
->scan_timestamp
;
547 if (indio_dev
->available_scan_masks
) {
548 indio_dev
->active_scan_mask
=
549 iio_scan_mask_match(indio_dev
->available_scan_masks
,
550 indio_dev
->masklength
,
552 if (indio_dev
->active_scan_mask
== NULL
) {
555 * Note can only occur when adding a buffer.
557 iio_buffer_deactivate(insert_buffer
);
559 indio_dev
->active_scan_mask
= old_mask
;
563 kfree(compound_mask
);
569 indio_dev
->active_scan_mask
= compound_mask
;
572 iio_update_demux(indio_dev
);
575 if (indio_dev
->setup_ops
->preenable
) {
576 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
579 "Buffer not started: buffer preenable failed (%d)\n", ret
);
580 goto error_remove_inserted
;
583 indio_dev
->scan_bytes
=
584 iio_compute_scan_bytes(indio_dev
,
585 indio_dev
->active_scan_mask
,
586 indio_dev
->scan_timestamp
);
587 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
588 if (buffer
->access
->request_update
) {
589 ret
= buffer
->access
->request_update(buffer
);
592 "Buffer not started: buffer parameter update failed (%d)\n", ret
);
593 goto error_run_postdisable
;
596 if (indio_dev
->info
->update_scan_mode
) {
597 ret
= indio_dev
->info
598 ->update_scan_mode(indio_dev
,
599 indio_dev
->active_scan_mask
);
601 printk(KERN_INFO
"Buffer not started: update scan mode failed (%d)\n", ret
);
602 goto error_run_postdisable
;
605 /* Definitely possible for devices to support both of these. */
606 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
607 if (!indio_dev
->trig
) {
608 printk(KERN_INFO
"Buffer not started: no trigger\n");
610 /* Can only occur on first buffer */
611 goto error_run_postdisable
;
613 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
614 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
) {
615 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
616 } else { /* Should never be reached */
618 goto error_run_postdisable
;
621 if (indio_dev
->setup_ops
->postenable
) {
622 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
625 "Buffer not started: postenable failed (%d)\n", ret
);
626 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
627 if (indio_dev
->setup_ops
->postdisable
)
628 indio_dev
->setup_ops
->postdisable(indio_dev
);
629 goto error_disable_all_buffers
;
633 if (indio_dev
->available_scan_masks
)
634 kfree(compound_mask
);
640 error_disable_all_buffers
:
641 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
642 error_run_postdisable
:
643 if (indio_dev
->setup_ops
->postdisable
)
644 indio_dev
->setup_ops
->postdisable(indio_dev
);
645 error_remove_inserted
:
648 iio_buffer_deactivate(insert_buffer
);
649 indio_dev
->active_scan_mask
= old_mask
;
650 kfree(compound_mask
);
655 EXPORT_SYMBOL_GPL(iio_update_buffers
);
657 ssize_t
iio_buffer_store_enable(struct device
*dev
,
658 struct device_attribute
*attr
,
663 bool requested_state
;
664 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
667 ret
= strtobool(buf
, &requested_state
);
671 mutex_lock(&indio_dev
->mlock
);
673 /* Find out if it is in the list */
674 inlist
= iio_buffer_is_active(indio_dev
->buffer
);
675 /* Already in desired state */
676 if (inlist
== requested_state
)
680 ret
= iio_update_buffers(indio_dev
,
681 indio_dev
->buffer
, NULL
);
683 ret
= iio_update_buffers(indio_dev
,
684 NULL
, indio_dev
->buffer
);
689 mutex_unlock(&indio_dev
->mlock
);
690 return (ret
< 0) ? ret
: len
;
692 EXPORT_SYMBOL(iio_buffer_store_enable
);
694 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
696 struct iio_buffer
*buffer
;
698 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
700 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
701 if (buffer
->access
->set_bytes_per_datum
) {
702 bytes
= iio_compute_scan_bytes(indio_dev
,
704 buffer
->scan_timestamp
);
706 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
710 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
713 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
714 * @indio_dev: the iio device
715 * @mask: scan mask to be checked
717 * Return true if exactly one bit is set in the scan mask, false otherwise. It
718 * can be used for devices where only one channel can be active for sampling at
721 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
722 const unsigned long *mask
)
724 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
726 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
728 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
729 const unsigned long *mask
)
731 if (!indio_dev
->setup_ops
->validate_scan_mask
)
734 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
738 * iio_scan_mask_set() - set particular bit in the scan mask
739 * @indio_dev: the iio device
740 * @buffer: the buffer whose scan mask we are interested in
741 * @bit: the bit to be set.
743 * Note that at this point we have no way of knowing what other
744 * buffers might request, hence this code only verifies that the
745 * individual buffers request is plausible.
747 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
748 struct iio_buffer
*buffer
, int bit
)
750 const unsigned long *mask
;
751 unsigned long *trialmask
;
753 trialmask
= kmalloc(sizeof(*trialmask
)*
754 BITS_TO_LONGS(indio_dev
->masklength
),
757 if (trialmask
== NULL
)
759 if (!indio_dev
->masklength
) {
760 WARN_ON("Trying to set scanmask prior to registering buffer\n");
761 goto err_invalid_mask
;
763 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
764 set_bit(bit
, trialmask
);
766 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
767 goto err_invalid_mask
;
769 if (indio_dev
->available_scan_masks
) {
770 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
771 indio_dev
->masklength
,
774 goto err_invalid_mask
;
776 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
786 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
788 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
789 struct iio_buffer
*buffer
, int bit
)
791 if (bit
> indio_dev
->masklength
)
794 if (!buffer
->scan_mask
)
797 return test_bit(bit
, buffer
->scan_mask
);
799 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
802 * struct iio_demux_table() - table describing demux memcpy ops
803 * @from: index to copy from
804 * @to: index to copy to
805 * @length: how many bytes to copy
806 * @l: list head used for management
808 struct iio_demux_table
{
815 static const void *iio_demux(struct iio_buffer
*buffer
,
818 struct iio_demux_table
*t
;
820 if (list_empty(&buffer
->demux_list
))
822 list_for_each_entry(t
, &buffer
->demux_list
, l
)
823 memcpy(buffer
->demux_bounce
+ t
->to
,
824 datain
+ t
->from
, t
->length
);
826 return buffer
->demux_bounce
;
829 static int iio_push_to_buffer(struct iio_buffer
*buffer
, const void *data
)
831 const void *dataout
= iio_demux(buffer
, data
);
833 return buffer
->access
->store_to(buffer
, dataout
);
836 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
838 struct iio_demux_table
*p
, *q
;
839 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
846 int iio_push_to_buffers(struct iio_dev
*indio_dev
, const void *data
)
849 struct iio_buffer
*buf
;
851 list_for_each_entry(buf
, &indio_dev
->buffer_list
, buffer_list
) {
852 ret
= iio_push_to_buffer(buf
, data
);
859 EXPORT_SYMBOL_GPL(iio_push_to_buffers
);
861 static int iio_buffer_update_demux(struct iio_dev
*indio_dev
,
862 struct iio_buffer
*buffer
)
864 const struct iio_chan_spec
*ch
;
865 int ret
, in_ind
= -1, out_ind
, length
;
866 unsigned in_loc
= 0, out_loc
= 0;
867 struct iio_demux_table
*p
;
869 /* Clear out any old demux */
870 iio_buffer_demux_free(buffer
);
871 kfree(buffer
->demux_bounce
);
872 buffer
->demux_bounce
= NULL
;
874 /* First work out which scan mode we will actually have */
875 if (bitmap_equal(indio_dev
->active_scan_mask
,
877 indio_dev
->masklength
))
880 /* Now we have the two masks, work from least sig and build up sizes */
881 for_each_set_bit(out_ind
,
882 indio_dev
->active_scan_mask
,
883 indio_dev
->masklength
) {
884 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
885 indio_dev
->masklength
,
887 while (in_ind
!= out_ind
) {
888 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
889 indio_dev
->masklength
,
891 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
892 length
= ch
->scan_type
.storagebits
/8;
893 /* Make sure we are aligned */
896 in_loc
+= length
- in_loc
% length
;
898 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
901 goto error_clear_mux_table
;
903 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
904 length
= ch
->scan_type
.storagebits
/8;
905 if (out_loc
% length
)
906 out_loc
+= length
- out_loc
% length
;
908 in_loc
+= length
- in_loc
% length
;
912 list_add_tail(&p
->l
, &buffer
->demux_list
);
916 /* Relies on scan_timestamp being last */
917 if (buffer
->scan_timestamp
) {
918 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
921 goto error_clear_mux_table
;
923 ch
= iio_find_channel_from_si(indio_dev
,
924 indio_dev
->scan_index_timestamp
);
925 length
= ch
->scan_type
.storagebits
/8;
926 if (out_loc
% length
)
927 out_loc
+= length
- out_loc
% length
;
929 in_loc
+= length
- in_loc
% length
;
933 list_add_tail(&p
->l
, &buffer
->demux_list
);
937 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
938 if (buffer
->demux_bounce
== NULL
) {
940 goto error_clear_mux_table
;
944 error_clear_mux_table
:
945 iio_buffer_demux_free(buffer
);
950 int iio_update_demux(struct iio_dev
*indio_dev
)
952 struct iio_buffer
*buffer
;
955 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
956 ret
= iio_buffer_update_demux(indio_dev
, buffer
);
958 goto error_clear_mux_table
;
962 error_clear_mux_table
:
963 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
964 iio_buffer_demux_free(buffer
);
968 EXPORT_SYMBOL_GPL(iio_update_demux
);
971 * iio_buffer_release() - Free a buffer's resources
972 * @ref: Pointer to the kref embedded in the iio_buffer struct
974 * This function is called when the last reference to the buffer has been
975 * dropped. It will typically free all resources allocated by the buffer. Do not
976 * call this function manually, always use iio_buffer_put() when done using a
979 static void iio_buffer_release(struct kref
*ref
)
981 struct iio_buffer
*buffer
= container_of(ref
, struct iio_buffer
, ref
);
983 buffer
->access
->release(buffer
);
987 * iio_buffer_get() - Grab a reference to the buffer
988 * @buffer: The buffer to grab a reference for, may be NULL
990 * Returns the pointer to the buffer that was passed into the function.
992 struct iio_buffer
*iio_buffer_get(struct iio_buffer
*buffer
)
995 kref_get(&buffer
->ref
);
999 EXPORT_SYMBOL_GPL(iio_buffer_get
);
1002 * iio_buffer_put() - Release the reference to the buffer
1003 * @buffer: The buffer to release the reference for, may be NULL
1005 void iio_buffer_put(struct iio_buffer
*buffer
)
1008 kref_put(&buffer
->ref
, iio_buffer_release
);
1010 EXPORT_SYMBOL_GPL(iio_buffer_put
);