]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/iio/industrialio-buffer.c
x86/entry: Convert XEN hypercall vector to IDTENTRY_SYSVEC
[mirror_ubuntu-hirsute-kernel.git] / drivers / iio / industrialio-buffer.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
7026ea4b
JC
2/* The industrial I/O core
3 *
4 * Copyright (c) 2008 Jonathan Cameron
5 *
14555b14 6 * Handling of buffer allocation / resizing.
7026ea4b 7 *
7026ea4b
JC
8 * Things to look at here.
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
11 */
12#include <linux/kernel.h>
8e336a72 13#include <linux/export.h>
7026ea4b 14#include <linux/device.h>
7026ea4b 15#include <linux/fs.h>
7026ea4b 16#include <linux/cdev.h>
5a0e3ad6 17#include <linux/slab.h>
a7348347 18#include <linux/poll.h>
174cd4b1 19#include <linux/sched/signal.h>
7026ea4b 20
06458e27 21#include <linux/iio/iio.h>
df9c1c42 22#include "iio_core.h"
06458e27
JC
23#include <linux/iio/sysfs.h>
24#include <linux/iio/buffer.h>
33dd94cb 25#include <linux/iio/buffer_impl.h>
7026ea4b 26
8310b86c
JC
27static const char * const iio_endian_prefix[] = {
28 [IIO_BE] = "be",
29 [IIO_LE] = "le",
30};
7026ea4b 31
705ee2c9 32static bool iio_buffer_is_active(struct iio_buffer *buf)
84b36ce5 33{
705ee2c9 34 return !list_empty(&buf->buffer_list);
84b36ce5
JC
35}
36
37d34556 37static size_t iio_buffer_data_available(struct iio_buffer *buf)
647cc7b9 38{
9dd4694d 39 return buf->access->data_available(buf);
647cc7b9
LPC
40}
41
f4f4673b
OP
42static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
43 struct iio_buffer *buf, size_t required)
44{
45 if (!indio_dev->info->hwfifo_flush_to_buffer)
46 return -ENODEV;
47
48 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
49}
50
37d34556 51static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
f4f4673b 52 size_t to_wait, int to_flush)
37d34556 53{
f4f4673b
OP
54 size_t avail;
55 int flushed = 0;
56
37d34556
JC
57 /* wakeup if the device was unregistered */
58 if (!indio_dev->info)
59 return true;
60
61 /* drain the buffer if it was disabled */
f4f4673b 62 if (!iio_buffer_is_active(buf)) {
37d34556 63 to_wait = min_t(size_t, to_wait, 1);
f4f4673b
OP
64 to_flush = 0;
65 }
66
67 avail = iio_buffer_data_available(buf);
37d34556 68
f4f4673b
OP
69 if (avail >= to_wait) {
70 /* force a flush for non-blocking reads */
c6f67a1f
OP
71 if (!to_wait && avail < to_flush)
72 iio_buffer_flush_hwfifo(indio_dev, buf,
73 to_flush - avail);
f4f4673b
OP
74 return true;
75 }
76
77 if (to_flush)
78 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
79 to_wait - avail);
80 if (flushed <= 0)
81 return false;
82
83 if (avail + flushed >= to_wait)
37d34556
JC
84 return true;
85
86 return false;
87}
88
7026ea4b 89/**
f6d4033d 90 * iio_buffer_read_outer() - chrdev read for buffer access
0123635a
CO
91 * @filp: File structure pointer for the char device
92 * @buf: Destination buffer for iio buffer read
93 * @n: First n bytes to read
94 * @f_ps: Long offset provided by the user as a seek position
7026ea4b 95 *
14555b14
JC
96 * This function relies on all buffer implementations having an
97 * iio_buffer as their first element.
0123635a
CO
98 *
99 * Return: negative values corresponding to error codes or ret != 0
100 * for ending the reading activity
7026ea4b 101 **/
f6d4033d
LPC
102ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
103 size_t n, loff_t *f_ps)
7026ea4b 104{
1aa04278 105 struct iio_dev *indio_dev = filp->private_data;
14555b14 106 struct iio_buffer *rb = indio_dev->buffer;
fcf68f3c 107 DEFINE_WAIT_FUNC(wait, woken_wake_function);
37d34556 108 size_t datum_size;
c6f67a1f 109 size_t to_wait;
5dba4b14 110 int ret = 0;
d5857d65 111
f18e7a06
LPC
112 if (!indio_dev->info)
113 return -ENODEV;
114
f6d4033d 115 if (!rb || !rb->access->read)
7026ea4b 116 return -EINVAL;
ee551a10 117
37d34556
JC
118 datum_size = rb->bytes_per_datum;
119
120 /*
121 * If datum_size is 0 there will never be anything to read from the
122 * buffer, so signal end of file now.
123 */
124 if (!datum_size)
125 return 0;
126
c6f67a1f
OP
127 if (filp->f_flags & O_NONBLOCK)
128 to_wait = 0;
129 else
130 to_wait = min_t(size_t, n / datum_size, rb->watermark);
37d34556 131
fcf68f3c 132 add_wait_queue(&rb->pollq, &wait);
ee551a10 133 do {
fcf68f3c
BN
134 if (!indio_dev->info) {
135 ret = -ENODEV;
136 break;
137 }
138
139 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
140 if (signal_pending(current)) {
141 ret = -ERESTARTSYS;
142 break;
143 }
144
145 wait_woken(&wait, TASK_INTERRUPTIBLE,
146 MAX_SCHEDULE_TIMEOUT);
147 continue;
148 }
ee551a10 149
f6d4033d 150 ret = rb->access->read(rb, n, buf);
ee551a10
LPC
151 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
152 ret = -EAGAIN;
5dba4b14 153 } while (ret == 0);
fcf68f3c 154 remove_wait_queue(&rb->pollq, &wait);
ee551a10
LPC
155
156 return ret;
7026ea4b
JC
157}
158
a7348347 159/**
14555b14 160 * iio_buffer_poll() - poll the buffer to find out if it has data
0123635a
CO
161 * @filp: File structure pointer for device access
162 * @wait: Poll table structure pointer for which the driver adds
163 * a wait queue
164 *
a9a08845 165 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
0123635a 166 * or 0 for other cases
a7348347 167 */
afc9a42b 168__poll_t iio_buffer_poll(struct file *filp,
14555b14 169 struct poll_table_struct *wait)
a7348347 170{
1aa04278 171 struct iio_dev *indio_dev = filp->private_data;
14555b14 172 struct iio_buffer *rb = indio_dev->buffer;
a7348347 173
4cd140bd 174 if (!indio_dev->info || rb == NULL)
1bdc0293 175 return 0;
f18e7a06 176
a7348347 177 poll_wait(filp, &rb->pollq, wait);
f4f4673b 178 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
a9a08845 179 return EPOLLIN | EPOLLRDNORM;
8d213f24 180 return 0;
a7348347
JC
181}
182
d2f0a48f
LPC
183/**
184 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
185 * @indio_dev: The IIO device
186 *
187 * Wakes up the event waitqueue used for poll(). Should usually
188 * be called when the device is unregistered.
189 */
190void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
191{
ff3f7e04
AA
192 struct iio_buffer *buffer = indio_dev->buffer;
193
194 if (!buffer)
d2f0a48f
LPC
195 return;
196
ff3f7e04 197 wake_up(&buffer->pollq);
d2f0a48f
LPC
198}
199
f79a9098 200void iio_buffer_init(struct iio_buffer *buffer)
7026ea4b 201{
5ada4ea9 202 INIT_LIST_HEAD(&buffer->demux_list);
705ee2c9 203 INIT_LIST_HEAD(&buffer->buffer_list);
14555b14 204 init_waitqueue_head(&buffer->pollq);
9e69c935 205 kref_init(&buffer->ref);
4a605357
LPC
206 if (!buffer->watermark)
207 buffer->watermark = 1;
7026ea4b 208}
14555b14 209EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 210
9f466777
JC
211/**
212 * iio_buffer_set_attrs - Set buffer specific attributes
213 * @buffer: The buffer for which we are setting attributes
214 * @attrs: Pointer to a null terminated list of pointers to attributes
215 */
216void iio_buffer_set_attrs(struct iio_buffer *buffer,
217 const struct attribute **attrs)
218{
219 buffer->attrs = attrs;
220}
221EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
222
1d892719 223static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
224 struct device_attribute *attr,
225 char *buf)
1d892719 226{
8d213f24 227 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
228}
229
230static ssize_t iio_show_fixed_type(struct device *dev,
231 struct device_attribute *attr,
232 char *buf)
233{
234 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
235 u8 type = this_attr->c->scan_type.endianness;
236
237 if (type == IIO_CPU) {
9d5d1153
JC
238#ifdef __LITTLE_ENDIAN
239 type = IIO_LE;
240#else
241 type = IIO_BE;
242#endif
8310b86c 243 }
0ee8546a
SP
244 if (this_attr->c->scan_type.repeat > 1)
245 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
246 iio_endian_prefix[type],
247 this_attr->c->scan_type.sign,
248 this_attr->c->scan_type.realbits,
249 this_attr->c->scan_type.storagebits,
250 this_attr->c->scan_type.repeat,
251 this_attr->c->scan_type.shift);
252 else
253 return sprintf(buf, "%s:%c%d/%d>>%u\n",
8310b86c 254 iio_endian_prefix[type],
1d892719
JC
255 this_attr->c->scan_type.sign,
256 this_attr->c->scan_type.realbits,
257 this_attr->c->scan_type.storagebits,
258 this_attr->c->scan_type.shift);
259}
260
8d213f24
JC
261static ssize_t iio_scan_el_show(struct device *dev,
262 struct device_attribute *attr,
263 char *buf)
264{
265 int ret;
e53f5ac5 266 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
ff3f7e04 267 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24 268
2076a20f
AB
269 /* Ensure ret is 0 or 1. */
270 ret = !!test_bit(to_iio_dev_attr(attr)->address,
ff3f7e04 271 buffer->scan_mask);
5ada4ea9 272
8d213f24
JC
273 return sprintf(buf, "%d\n", ret);
274}
275
217a5cf0
LPC
276/* Note NULL used as error indicator as it doesn't make sense. */
277static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
278 unsigned int masklength,
1e1ec286
LPC
279 const unsigned long *mask,
280 bool strict)
217a5cf0
LPC
281{
282 if (bitmap_empty(mask, masklength))
283 return NULL;
284 while (*av_masks) {
1e1ec286
LPC
285 if (strict) {
286 if (bitmap_equal(mask, av_masks, masklength))
287 return av_masks;
288 } else {
289 if (bitmap_subset(mask, av_masks, masklength))
290 return av_masks;
291 }
217a5cf0
LPC
292 av_masks += BITS_TO_LONGS(masklength);
293 }
294 return NULL;
295}
296
297static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
298 const unsigned long *mask)
299{
300 if (!indio_dev->setup_ops->validate_scan_mask)
301 return true;
302
303 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
304}
305
306/**
307 * iio_scan_mask_set() - set particular bit in the scan mask
308 * @indio_dev: the iio device
309 * @buffer: the buffer whose scan mask we are interested in
310 * @bit: the bit to be set.
311 *
312 * Note that at this point we have no way of knowing what other
313 * buffers might request, hence this code only verifies that the
314 * individual buffers request is plausible.
315 */
316static int iio_scan_mask_set(struct iio_dev *indio_dev,
317 struct iio_buffer *buffer, int bit)
318{
319 const unsigned long *mask;
320 unsigned long *trialmask;
321
ccd428e4 322 trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
217a5cf0
LPC
323 if (trialmask == NULL)
324 return -ENOMEM;
325 if (!indio_dev->masklength) {
231bfe53 326 WARN(1, "Trying to set scanmask prior to registering buffer\n");
217a5cf0
LPC
327 goto err_invalid_mask;
328 }
329 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
330 set_bit(bit, trialmask);
331
332 if (!iio_validate_scan_mask(indio_dev, trialmask))
333 goto err_invalid_mask;
334
335 if (indio_dev->available_scan_masks) {
336 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
337 indio_dev->masklength,
1e1ec286 338 trialmask, false);
217a5cf0
LPC
339 if (!mask)
340 goto err_invalid_mask;
341 }
342 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
343
3862828a 344 bitmap_free(trialmask);
217a5cf0
LPC
345
346 return 0;
347
348err_invalid_mask:
3862828a 349 bitmap_free(trialmask);
217a5cf0
LPC
350 return -EINVAL;
351}
352
14555b14 353static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 354{
14555b14 355 clear_bit(bit, buffer->scan_mask);
8d213f24
JC
356 return 0;
357}
358
c2bf8d5f
JC
359static int iio_scan_mask_query(struct iio_dev *indio_dev,
360 struct iio_buffer *buffer, int bit)
361{
362 if (bit > indio_dev->masklength)
363 return -EINVAL;
364
365 if (!buffer->scan_mask)
366 return 0;
367
368 /* Ensure return value is 0 or 1. */
369 return !!test_bit(bit, buffer->scan_mask);
370};
371
8d213f24
JC
372static ssize_t iio_scan_el_store(struct device *dev,
373 struct device_attribute *attr,
374 const char *buf,
375 size_t len)
376{
a714af27 377 int ret;
8d213f24 378 bool state;
e53f5ac5 379 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 380 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
381 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
382
a714af27
JC
383 ret = strtobool(buf, &state);
384 if (ret < 0)
385 return ret;
8d213f24 386 mutex_lock(&indio_dev->mlock);
ff3f7e04 387 if (iio_buffer_is_active(buffer)) {
8d213f24
JC
388 ret = -EBUSY;
389 goto error_ret;
390 }
f79a9098 391 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
8d213f24
JC
392 if (ret < 0)
393 goto error_ret;
394 if (!state && ret) {
14555b14 395 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
396 if (ret)
397 goto error_ret;
398 } else if (state && !ret) {
f79a9098 399 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
8d213f24
JC
400 if (ret)
401 goto error_ret;
402 }
403
404error_ret:
405 mutex_unlock(&indio_dev->mlock);
406
5a2a6e11 407 return ret < 0 ? ret : len;
8d213f24
JC
408
409}
410
411static ssize_t iio_scan_el_ts_show(struct device *dev,
412 struct device_attribute *attr,
413 char *buf)
414{
e53f5ac5 415 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
ff3f7e04
AA
416 struct iio_buffer *buffer = indio_dev->buffer;
417
418 return sprintf(buf, "%d\n", buffer->scan_timestamp);
8d213f24
JC
419}
420
421static ssize_t iio_scan_el_ts_store(struct device *dev,
422 struct device_attribute *attr,
423 const char *buf,
424 size_t len)
425{
a714af27 426 int ret;
e53f5ac5 427 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
ff3f7e04 428 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24 429 bool state;
1aa04278 430
a714af27
JC
431 ret = strtobool(buf, &state);
432 if (ret < 0)
433 return ret;
434
8d213f24 435 mutex_lock(&indio_dev->mlock);
ff3f7e04 436 if (iio_buffer_is_active(buffer)) {
8d213f24
JC
437 ret = -EBUSY;
438 goto error_ret;
439 }
ff3f7e04 440 buffer->scan_timestamp = state;
8d213f24
JC
441error_ret:
442 mutex_unlock(&indio_dev->mlock);
443
444 return ret ? ret : len;
445}
446
14555b14 447static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
ff3f7e04 448 struct iio_buffer *buffer,
14555b14 449 const struct iio_chan_spec *chan)
1d892719 450{
26d25ae3 451 int ret, attrcount = 0;
1d892719 452
26d25ae3 453 ret = __iio_add_chan_devattr("index",
1d892719
JC
454 chan,
455 &iio_show_scan_index,
456 NULL,
457 0,
3704432f 458 IIO_SEPARATE,
1aa04278 459 &indio_dev->dev,
14555b14 460 &buffer->scan_el_dev_attr_list);
1d892719 461 if (ret)
92825ff9 462 return ret;
26d25ae3
JC
463 attrcount++;
464 ret = __iio_add_chan_devattr("type",
1d892719
JC
465 chan,
466 &iio_show_fixed_type,
467 NULL,
468 0,
469 0,
1aa04278 470 &indio_dev->dev,
14555b14 471 &buffer->scan_el_dev_attr_list);
1d892719 472 if (ret)
92825ff9 473 return ret;
26d25ae3 474 attrcount++;
a88b3ebc 475 if (chan->type != IIO_TIMESTAMP)
26d25ae3 476 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
477 chan,
478 &iio_scan_el_show,
479 &iio_scan_el_store,
480 chan->scan_index,
481 0,
1aa04278 482 &indio_dev->dev,
14555b14 483 &buffer->scan_el_dev_attr_list);
a88b3ebc 484 else
26d25ae3 485 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
486 chan,
487 &iio_scan_el_ts_show,
488 &iio_scan_el_ts_store,
489 chan->scan_index,
490 0,
1aa04278 491 &indio_dev->dev,
14555b14 492 &buffer->scan_el_dev_attr_list);
9572588c 493 if (ret)
92825ff9 494 return ret;
26d25ae3
JC
495 attrcount++;
496 ret = attrcount;
1d892719
JC
497 return ret;
498}
499
08e7e0ad
LPC
500static ssize_t iio_buffer_read_length(struct device *dev,
501 struct device_attribute *attr,
502 char *buf)
7026ea4b 503{
e53f5ac5 504 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 505 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 506
37495660 507 return sprintf(buf, "%d\n", buffer->length);
7026ea4b 508}
7026ea4b 509
08e7e0ad
LPC
510static ssize_t iio_buffer_write_length(struct device *dev,
511 struct device_attribute *attr,
512 const char *buf, size_t len)
7026ea4b 513{
e53f5ac5 514 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 515 struct iio_buffer *buffer = indio_dev->buffer;
948ad205
LPC
516 unsigned int val;
517 int ret;
8d213f24 518
948ad205 519 ret = kstrtouint(buf, 10, &val);
7026ea4b
JC
520 if (ret)
521 return ret;
522
37495660
LPC
523 if (val == buffer->length)
524 return len;
7026ea4b 525
e38c79e0 526 mutex_lock(&indio_dev->mlock);
ff3f7e04 527 if (iio_buffer_is_active(buffer)) {
e38c79e0
LPC
528 ret = -EBUSY;
529 } else {
8d92db28 530 buffer->access->set_length(buffer, val);
e38c79e0 531 ret = 0;
7026ea4b 532 }
37d34556
JC
533 if (ret)
534 goto out;
535 if (buffer->length && buffer->length < buffer->watermark)
536 buffer->watermark = buffer->length;
537out:
e38c79e0 538 mutex_unlock(&indio_dev->mlock);
7026ea4b 539
e38c79e0 540 return ret ? ret : len;
7026ea4b 541}
7026ea4b 542
08e7e0ad
LPC
543static ssize_t iio_buffer_show_enable(struct device *dev,
544 struct device_attribute *attr,
545 char *buf)
7026ea4b 546{
e53f5ac5 547 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
ff3f7e04
AA
548 struct iio_buffer *buffer = indio_dev->buffer;
549
550 return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
7026ea4b 551}
7026ea4b 552
182b4905
LPC
553static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
554 unsigned int scan_index)
555{
556 const struct iio_chan_spec *ch;
557 unsigned int bytes;
558
559 ch = iio_find_channel_from_si(indio_dev, scan_index);
560 bytes = ch->scan_type.storagebits / 8;
561 if (ch->scan_type.repeat > 1)
562 bytes *= ch->scan_type.repeat;
563 return bytes;
564}
565
566static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
567{
568 return iio_storage_bytes_for_si(indio_dev,
569 indio_dev->scan_index_timestamp);
570}
571
183f4173
PM
572static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
573 const unsigned long *mask, bool timestamp)
959d2952 574{
959d2952 575 unsigned bytes = 0;
883f6165 576 int length, i, largest = 0;
959d2952
JC
577
578 /* How much space will the demuxed element take? */
6b3b58ed 579 for_each_set_bit(i, mask,
959d2952 580 indio_dev->masklength) {
182b4905 581 length = iio_storage_bytes_for_si(indio_dev, i);
959d2952
JC
582 bytes = ALIGN(bytes, length);
583 bytes += length;
883f6165 584 largest = max(largest, length);
959d2952 585 }
182b4905 586
6b3b58ed 587 if (timestamp) {
182b4905 588 length = iio_storage_bytes_for_timestamp(indio_dev);
959d2952
JC
589 bytes = ALIGN(bytes, length);
590 bytes += length;
883f6165 591 largest = max(largest, length);
959d2952 592 }
883f6165
LM
593
594 bytes = ALIGN(bytes, largest);
6b3b58ed
JC
595 return bytes;
596}
597
9e69c935
LPC
598static void iio_buffer_activate(struct iio_dev *indio_dev,
599 struct iio_buffer *buffer)
600{
601 iio_buffer_get(buffer);
602 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
603}
604
605static void iio_buffer_deactivate(struct iio_buffer *buffer)
606{
607 list_del_init(&buffer->buffer_list);
37d34556 608 wake_up_interruptible(&buffer->pollq);
9e69c935
LPC
609 iio_buffer_put(buffer);
610}
611
1250186a
LPC
612static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
613{
614 struct iio_buffer *buffer, *_buffer;
615
616 list_for_each_entry_safe(buffer, _buffer,
617 &indio_dev->buffer_list, buffer_list)
618 iio_buffer_deactivate(buffer);
619}
620
e18a2ad4
LPC
621static int iio_buffer_enable(struct iio_buffer *buffer,
622 struct iio_dev *indio_dev)
623{
624 if (!buffer->access->enable)
625 return 0;
626 return buffer->access->enable(buffer, indio_dev);
627}
628
629static int iio_buffer_disable(struct iio_buffer *buffer,
630 struct iio_dev *indio_dev)
631{
632 if (!buffer->access->disable)
633 return 0;
634 return buffer->access->disable(buffer, indio_dev);
635}
636
8e050996
LPC
637static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
638 struct iio_buffer *buffer)
639{
640 unsigned int bytes;
641
642 if (!buffer->access->set_bytes_per_datum)
643 return;
644
645 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
646 buffer->scan_timestamp);
647
648 buffer->access->set_bytes_per_datum(buffer, bytes);
649}
650
fcc1b2f5
LPC
651static int iio_buffer_request_update(struct iio_dev *indio_dev,
652 struct iio_buffer *buffer)
653{
654 int ret;
655
656 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
657 if (buffer->access->request_update) {
658 ret = buffer->access->request_update(buffer);
659 if (ret) {
660 dev_dbg(&indio_dev->dev,
661 "Buffer not started: buffer parameter update failed (%d)\n",
662 ret);
663 return ret;
664 }
665 }
666
667 return 0;
668}
669
248be5aa
LPC
670static void iio_free_scan_mask(struct iio_dev *indio_dev,
671 const unsigned long *mask)
672{
673 /* If the mask is dynamically allocated free it, otherwise do nothing */
674 if (!indio_dev->available_scan_masks)
3862828a 675 bitmap_free(mask);
248be5aa
LPC
676}
677
6e509c4d
LPC
678struct iio_device_config {
679 unsigned int mode;
f0566c0c 680 unsigned int watermark;
6e509c4d
LPC
681 const unsigned long *scan_mask;
682 unsigned int scan_bytes;
683 bool scan_timestamp;
684};
685
686static int iio_verify_update(struct iio_dev *indio_dev,
687 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
688 struct iio_device_config *config)
689{
690 unsigned long *compound_mask;
691 const unsigned long *scan_mask;
1e1ec286 692 bool strict_scanmask = false;
6e509c4d
LPC
693 struct iio_buffer *buffer;
694 bool scan_timestamp;
225d59ad 695 unsigned int modes;
6e509c4d 696
b7329249
LPC
697 if (insert_buffer &&
698 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
699 dev_dbg(&indio_dev->dev,
700 "At least one scan element must be enabled first\n");
701 return -EINVAL;
702 }
703
6e509c4d 704 memset(config, 0, sizeof(*config));
1bef2c1d 705 config->watermark = ~0;
6e509c4d
LPC
706
707 /*
708 * If there is just one buffer and we are removing it there is nothing
709 * to verify.
710 */
711 if (remove_buffer && !insert_buffer &&
712 list_is_singular(&indio_dev->buffer_list))
713 return 0;
714
225d59ad
LPC
715 modes = indio_dev->modes;
716
717 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
718 if (buffer == remove_buffer)
719 continue;
720 modes &= buffer->access->modes;
f0566c0c 721 config->watermark = min(config->watermark, buffer->watermark);
225d59ad
LPC
722 }
723
f0566c0c 724 if (insert_buffer) {
225d59ad 725 modes &= insert_buffer->access->modes;
f0566c0c
LPC
726 config->watermark = min(config->watermark,
727 insert_buffer->watermark);
728 }
225d59ad 729
6e509c4d 730 /* Definitely possible for devices to support both of these. */
225d59ad 731 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
6e509c4d 732 config->mode = INDIO_BUFFER_TRIGGERED;
225d59ad 733 } else if (modes & INDIO_BUFFER_HARDWARE) {
1e1ec286
LPC
734 /*
735 * Keep things simple for now and only allow a single buffer to
736 * be connected in hardware mode.
737 */
738 if (insert_buffer && !list_empty(&indio_dev->buffer_list))
739 return -EINVAL;
6e509c4d 740 config->mode = INDIO_BUFFER_HARDWARE;
1e1ec286 741 strict_scanmask = true;
225d59ad 742 } else if (modes & INDIO_BUFFER_SOFTWARE) {
6e509c4d
LPC
743 config->mode = INDIO_BUFFER_SOFTWARE;
744 } else {
745 /* Can only occur on first buffer */
746 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
747 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
748 return -EINVAL;
749 }
750
751 /* What scan mask do we actually have? */
3862828a 752 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
6e509c4d
LPC
753 if (compound_mask == NULL)
754 return -ENOMEM;
755
756 scan_timestamp = false;
757
758 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
759 if (buffer == remove_buffer)
760 continue;
761 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
762 indio_dev->masklength);
763 scan_timestamp |= buffer->scan_timestamp;
764 }
765
766 if (insert_buffer) {
767 bitmap_or(compound_mask, compound_mask,
768 insert_buffer->scan_mask, indio_dev->masklength);
769 scan_timestamp |= insert_buffer->scan_timestamp;
770 }
771
772 if (indio_dev->available_scan_masks) {
773 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
774 indio_dev->masklength,
1e1ec286
LPC
775 compound_mask,
776 strict_scanmask);
3862828a 777 bitmap_free(compound_mask);
6e509c4d
LPC
778 if (scan_mask == NULL)
779 return -EINVAL;
780 } else {
781 scan_mask = compound_mask;
782 }
783
784 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
785 scan_mask, scan_timestamp);
786 config->scan_mask = scan_mask;
787 config->scan_timestamp = scan_timestamp;
788
789 return 0;
790}
791
78c9981f
JC
792/**
793 * struct iio_demux_table - table describing demux memcpy ops
794 * @from: index to copy from
795 * @to: index to copy to
796 * @length: how many bytes to copy
797 * @l: list head used for management
798 */
799struct iio_demux_table {
800 unsigned from;
801 unsigned to;
802 unsigned length;
803 struct list_head l;
804};
805
806static void iio_buffer_demux_free(struct iio_buffer *buffer)
807{
808 struct iio_demux_table *p, *q;
809 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
810 list_del(&p->l);
811 kfree(p);
812 }
813}
814
815static int iio_buffer_add_demux(struct iio_buffer *buffer,
816 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
817 unsigned int length)
818{
819
820 if (*p && (*p)->from + (*p)->length == in_loc &&
821 (*p)->to + (*p)->length == out_loc) {
822 (*p)->length += length;
823 } else {
824 *p = kmalloc(sizeof(**p), GFP_KERNEL);
825 if (*p == NULL)
826 return -ENOMEM;
827 (*p)->from = in_loc;
828 (*p)->to = out_loc;
829 (*p)->length = length;
830 list_add_tail(&(*p)->l, &buffer->demux_list);
831 }
832
833 return 0;
834}
835
836static int iio_buffer_update_demux(struct iio_dev *indio_dev,
837 struct iio_buffer *buffer)
838{
839 int ret, in_ind = -1, out_ind, length;
840 unsigned in_loc = 0, out_loc = 0;
841 struct iio_demux_table *p = NULL;
842
843 /* Clear out any old demux */
844 iio_buffer_demux_free(buffer);
845 kfree(buffer->demux_bounce);
846 buffer->demux_bounce = NULL;
847
848 /* First work out which scan mode we will actually have */
849 if (bitmap_equal(indio_dev->active_scan_mask,
850 buffer->scan_mask,
851 indio_dev->masklength))
852 return 0;
853
854 /* Now we have the two masks, work from least sig and build up sizes */
855 for_each_set_bit(out_ind,
856 buffer->scan_mask,
857 indio_dev->masklength) {
858 in_ind = find_next_bit(indio_dev->active_scan_mask,
859 indio_dev->masklength,
860 in_ind + 1);
861 while (in_ind != out_ind) {
862 in_ind = find_next_bit(indio_dev->active_scan_mask,
863 indio_dev->masklength,
864 in_ind + 1);
865 length = iio_storage_bytes_for_si(indio_dev, in_ind);
866 /* Make sure we are aligned */
867 in_loc = roundup(in_loc, length) + length;
868 }
869 length = iio_storage_bytes_for_si(indio_dev, in_ind);
870 out_loc = roundup(out_loc, length);
871 in_loc = roundup(in_loc, length);
872 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
873 if (ret)
874 goto error_clear_mux_table;
875 out_loc += length;
876 in_loc += length;
877 }
878 /* Relies on scan_timestamp being last */
879 if (buffer->scan_timestamp) {
880 length = iio_storage_bytes_for_timestamp(indio_dev);
881 out_loc = roundup(out_loc, length);
882 in_loc = roundup(in_loc, length);
883 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
884 if (ret)
885 goto error_clear_mux_table;
886 out_loc += length;
887 in_loc += length;
888 }
889 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
890 if (buffer->demux_bounce == NULL) {
891 ret = -ENOMEM;
892 goto error_clear_mux_table;
893 }
894 return 0;
895
896error_clear_mux_table:
897 iio_buffer_demux_free(buffer);
898
899 return ret;
900}
901
902static int iio_update_demux(struct iio_dev *indio_dev)
903{
904 struct iio_buffer *buffer;
905 int ret;
906
907 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
908 ret = iio_buffer_update_demux(indio_dev, buffer);
909 if (ret < 0)
910 goto error_clear_mux_table;
911 }
912 return 0;
913
914error_clear_mux_table:
915 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
916 iio_buffer_demux_free(buffer);
917
918 return ret;
919}
920
623d74e3
LPC
921static int iio_enable_buffers(struct iio_dev *indio_dev,
922 struct iio_device_config *config)
6b3b58ed 923{
e18a2ad4 924 struct iio_buffer *buffer;
84b36ce5 925 int ret;
fcc1b2f5 926
623d74e3
LPC
927 indio_dev->active_scan_mask = config->scan_mask;
928 indio_dev->scan_timestamp = config->scan_timestamp;
929 indio_dev->scan_bytes = config->scan_bytes;
5cb1a548 930 indio_dev->currentmode = config->mode;
aff1eb4e 931
5ada4ea9
JC
932 iio_update_demux(indio_dev);
933
84b36ce5
JC
934 /* Wind up again */
935 if (indio_dev->setup_ops->preenable) {
936 ret = indio_dev->setup_ops->preenable(indio_dev);
937 if (ret) {
63223c5f 938 dev_dbg(&indio_dev->dev,
bec1889d 939 "Buffer not started: buffer preenable failed (%d)\n", ret);
623d74e3 940 goto err_undo_config;
84b36ce5
JC
941 }
942 }
6e509c4d 943
84b36ce5
JC
944 if (indio_dev->info->update_scan_mode) {
945 ret = indio_dev->info
5ada4ea9
JC
946 ->update_scan_mode(indio_dev,
947 indio_dev->active_scan_mask);
84b36ce5 948 if (ret < 0) {
63223c5f
LPC
949 dev_dbg(&indio_dev->dev,
950 "Buffer not started: update scan mode failed (%d)\n",
951 ret);
623d74e3 952 goto err_run_postdisable;
84b36ce5
JC
953 }
954 }
6e509c4d 955
f0566c0c
LPC
956 if (indio_dev->info->hwfifo_set_watermark)
957 indio_dev->info->hwfifo_set_watermark(indio_dev,
958 config->watermark);
959
e18a2ad4
LPC
960 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
961 ret = iio_buffer_enable(buffer, indio_dev);
962 if (ret)
963 goto err_disable_buffers;
964 }
965
84b36ce5
JC
966 if (indio_dev->setup_ops->postenable) {
967 ret = indio_dev->setup_ops->postenable(indio_dev);
968 if (ret) {
63223c5f 969 dev_dbg(&indio_dev->dev,
bec1889d 970 "Buffer not started: postenable failed (%d)\n", ret);
e18a2ad4 971 goto err_disable_buffers;
84b36ce5
JC
972 }
973 }
974
6e509c4d 975 return 0;
84b36ce5 976
e18a2ad4
LPC
977err_disable_buffers:
978 list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
979 buffer_list)
980 iio_buffer_disable(buffer, indio_dev);
623d74e3 981err_run_postdisable:
84b36ce5
JC
982 if (indio_dev->setup_ops->postdisable)
983 indio_dev->setup_ops->postdisable(indio_dev);
623d74e3 984err_undo_config:
5cb1a548 985 indio_dev->currentmode = INDIO_DIRECT_MODE;
623d74e3
LPC
986 indio_dev->active_scan_mask = NULL;
987
84b36ce5 988 return ret;
623d74e3
LPC
989}
990
991static int iio_disable_buffers(struct iio_dev *indio_dev)
992{
e18a2ad4 993 struct iio_buffer *buffer;
1250186a
LPC
994 int ret = 0;
995 int ret2;
623d74e3
LPC
996
997 /* Wind down existing buffers - iff there are any */
998 if (list_empty(&indio_dev->buffer_list))
999 return 0;
1000
1250186a
LPC
1001 /*
1002 * If things go wrong at some step in disable we still need to continue
1003 * to perform the other steps, otherwise we leave the device in a
1004 * inconsistent state. We return the error code for the first error we
1005 * encountered.
1006 */
1007
623d74e3 1008 if (indio_dev->setup_ops->predisable) {
1250186a
LPC
1009 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1010 if (ret2 && !ret)
1011 ret = ret2;
623d74e3 1012 }
e18a2ad4
LPC
1013
1014 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1015 ret2 = iio_buffer_disable(buffer, indio_dev);
1016 if (ret2 && !ret)
1017 ret = ret2;
1018 }
623d74e3 1019
623d74e3 1020 if (indio_dev->setup_ops->postdisable) {
1250186a
LPC
1021 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1022 if (ret2 && !ret)
1023 ret = ret2;
623d74e3
LPC
1024 }
1025
1250186a
LPC
1026 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1027 indio_dev->active_scan_mask = NULL;
5cb1a548 1028 indio_dev->currentmode = INDIO_DIRECT_MODE;
1250186a
LPC
1029
1030 return ret;
623d74e3
LPC
1031}
1032
1033static int __iio_update_buffers(struct iio_dev *indio_dev,
1034 struct iio_buffer *insert_buffer,
1035 struct iio_buffer *remove_buffer)
1036{
623d74e3 1037 struct iio_device_config new_config;
1250186a 1038 int ret;
623d74e3
LPC
1039
1040 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1041 &new_config);
1042 if (ret)
1043 return ret;
1044
1045 if (insert_buffer) {
1046 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1047 if (ret)
1048 goto err_free_config;
1049 }
1050
623d74e3 1051 ret = iio_disable_buffers(indio_dev);
1250186a
LPC
1052 if (ret)
1053 goto err_deactivate_all;
623d74e3
LPC
1054
1055 if (remove_buffer)
1056 iio_buffer_deactivate(remove_buffer);
1057 if (insert_buffer)
1058 iio_buffer_activate(indio_dev, insert_buffer);
1059
1060 /* If no buffers in list, we are done */
1250186a 1061 if (list_empty(&indio_dev->buffer_list))
623d74e3 1062 return 0;
623d74e3
LPC
1063
1064 ret = iio_enable_buffers(indio_dev, &new_config);
1250186a
LPC
1065 if (ret)
1066 goto err_deactivate_all;
623d74e3 1067
623d74e3 1068 return 0;
6e509c4d 1069
1250186a
LPC
1070err_deactivate_all:
1071 /*
1072 * We've already verified that the config is valid earlier. If things go
1073 * wrong in either enable or disable the most likely reason is an IO
1074 * error from the device. In this case there is no good recovery
1075 * strategy. Just make sure to disable everything and leave the device
1076 * in a sane state. With a bit of luck the device might come back to
1077 * life again later and userspace can try again.
1078 */
1079 iio_buffer_deactivate_all(indio_dev);
1080
6e509c4d
LPC
1081err_free_config:
1082 iio_free_scan_mask(indio_dev, new_config.scan_mask);
1083 return ret;
84b36ce5 1084}
a9519456
LPC
1085
1086int iio_update_buffers(struct iio_dev *indio_dev,
1087 struct iio_buffer *insert_buffer,
1088 struct iio_buffer *remove_buffer)
1089{
1090 int ret;
1091
3909fab5
LPC
1092 if (insert_buffer == remove_buffer)
1093 return 0;
1094
a9519456
LPC
1095 mutex_lock(&indio_dev->info_exist_lock);
1096 mutex_lock(&indio_dev->mlock);
1097
3909fab5
LPC
1098 if (insert_buffer && iio_buffer_is_active(insert_buffer))
1099 insert_buffer = NULL;
1100
1101 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1102 remove_buffer = NULL;
1103
1104 if (!insert_buffer && !remove_buffer) {
1105 ret = 0;
1106 goto out_unlock;
1107 }
1108
a9519456
LPC
1109 if (indio_dev->info == NULL) {
1110 ret = -ENODEV;
1111 goto out_unlock;
1112 }
1113
1114 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1115
1116out_unlock:
1117 mutex_unlock(&indio_dev->mlock);
1118 mutex_unlock(&indio_dev->info_exist_lock);
1119
1120 return ret;
1121}
84b36ce5
JC
1122EXPORT_SYMBOL_GPL(iio_update_buffers);
1123
623d74e3
LPC
1124void iio_disable_all_buffers(struct iio_dev *indio_dev)
1125{
623d74e3 1126 iio_disable_buffers(indio_dev);
1250186a 1127 iio_buffer_deactivate_all(indio_dev);
623d74e3
LPC
1128}
1129
08e7e0ad
LPC
1130static ssize_t iio_buffer_store_enable(struct device *dev,
1131 struct device_attribute *attr,
1132 const char *buf,
1133 size_t len)
84b36ce5
JC
1134{
1135 int ret;
1136 bool requested_state;
1137 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
ff3f7e04 1138 struct iio_buffer *buffer = indio_dev->buffer;
84b36ce5
JC
1139 bool inlist;
1140
1141 ret = strtobool(buf, &requested_state);
1142 if (ret < 0)
1143 return ret;
1144
1145 mutex_lock(&indio_dev->mlock);
1146
1147 /* Find out if it is in the list */
ff3f7e04 1148 inlist = iio_buffer_is_active(buffer);
84b36ce5
JC
1149 /* Already in desired state */
1150 if (inlist == requested_state)
1151 goto done;
1152
1153 if (requested_state)
ff3f7e04 1154 ret = __iio_update_buffers(indio_dev, buffer, NULL);
84b36ce5 1155 else
ff3f7e04 1156 ret = __iio_update_buffers(indio_dev, NULL, buffer);
84b36ce5 1157
84b36ce5
JC
1158done:
1159 mutex_unlock(&indio_dev->mlock);
1160 return (ret < 0) ? ret : len;
1161}
84b36ce5 1162
d967cb6b
LPC
1163static const char * const iio_scan_elements_group_name = "scan_elements";
1164
37d34556
JC
1165static ssize_t iio_buffer_show_watermark(struct device *dev,
1166 struct device_attribute *attr,
1167 char *buf)
1168{
1169 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1170 struct iio_buffer *buffer = indio_dev->buffer;
1171
1172 return sprintf(buf, "%u\n", buffer->watermark);
1173}
1174
1175static ssize_t iio_buffer_store_watermark(struct device *dev,
1176 struct device_attribute *attr,
1177 const char *buf,
1178 size_t len)
1179{
1180 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1181 struct iio_buffer *buffer = indio_dev->buffer;
1182 unsigned int val;
1183 int ret;
1184
1185 ret = kstrtouint(buf, 10, &val);
1186 if (ret)
1187 return ret;
1188 if (!val)
1189 return -EINVAL;
1190
1191 mutex_lock(&indio_dev->mlock);
1192
1193 if (val > buffer->length) {
1194 ret = -EINVAL;
1195 goto out;
1196 }
1197
ff3f7e04 1198 if (iio_buffer_is_active(buffer)) {
37d34556
JC
1199 ret = -EBUSY;
1200 goto out;
1201 }
1202
1203 buffer->watermark = val;
1204out:
1205 mutex_unlock(&indio_dev->mlock);
1206
1207 return ret ? ret : len;
1208}
1209
350f6c75
MF
1210static ssize_t iio_dma_show_data_available(struct device *dev,
1211 struct device_attribute *attr,
1212 char *buf)
1213{
1214 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
ff3f7e04 1215 struct iio_buffer *buffer = indio_dev->buffer;
350f6c75 1216
ff3f7e04 1217 return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
350f6c75
MF
1218}
1219
08e7e0ad
LPC
1220static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1221 iio_buffer_write_length);
8d92db28
LPC
1222static struct device_attribute dev_attr_length_ro = __ATTR(length,
1223 S_IRUGO, iio_buffer_read_length, NULL);
08e7e0ad
LPC
1224static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1225 iio_buffer_show_enable, iio_buffer_store_enable);
37d34556
JC
1226static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1227 iio_buffer_show_watermark, iio_buffer_store_watermark);
b440655b
LPC
1228static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1229 S_IRUGO, iio_buffer_show_watermark, NULL);
350f6c75
MF
1230static DEVICE_ATTR(data_available, S_IRUGO,
1231 iio_dma_show_data_available, NULL);
08e7e0ad 1232
6da9b382
OP
1233static struct attribute *iio_buffer_attrs[] = {
1234 &dev_attr_length.attr,
1235 &dev_attr_enable.attr,
37d34556 1236 &dev_attr_watermark.attr,
350f6c75 1237 &dev_attr_data_available.attr,
6da9b382
OP
1238};
1239
d967cb6b
LPC
1240int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1241{
1242 struct iio_dev_attr *p;
1243 struct attribute **attr;
1244 struct iio_buffer *buffer = indio_dev->buffer;
96144d43 1245 int ret, i, attrn, attrcount;
d967cb6b
LPC
1246 const struct iio_chan_spec *channels;
1247
629bc023
LPC
1248 channels = indio_dev->channels;
1249 if (channels) {
1250 int ml = indio_dev->masklength;
1251
1252 for (i = 0; i < indio_dev->num_channels; i++)
1253 ml = max(ml, channels[i].scan_index + 1);
1254 indio_dev->masklength = ml;
1255 }
1256
d967cb6b
LPC
1257 if (!buffer)
1258 return 0;
1259
08e7e0ad
LPC
1260 attrcount = 0;
1261 if (buffer->attrs) {
1262 while (buffer->attrs[attrcount] != NULL)
1263 attrcount++;
1264 }
1265
6da9b382
OP
1266 attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1267 sizeof(struct attribute *), GFP_KERNEL);
1268 if (!attr)
08e7e0ad
LPC
1269 return -ENOMEM;
1270
6da9b382
OP
1271 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1272 if (!buffer->access->set_length)
1273 attr[0] = &dev_attr_length_ro.attr;
1274
b440655b
LPC
1275 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1276 attr[2] = &dev_attr_watermark_ro.attr;
1277
08e7e0ad 1278 if (buffer->attrs)
6da9b382
OP
1279 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1280 sizeof(struct attribute *) * attrcount);
1281
1282 attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1283
1284 buffer->buffer_group.name = "buffer";
1285 buffer->buffer_group.attrs = attr;
08e7e0ad
LPC
1286
1287 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1288
96144d43 1289 attrcount = 0;
d967cb6b
LPC
1290 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1291 channels = indio_dev->channels;
1292 if (channels) {
1293 /* new magic */
1294 for (i = 0; i < indio_dev->num_channels; i++) {
1295 if (channels[i].scan_index < 0)
1296 continue;
1297
ff3f7e04 1298 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
d967cb6b
LPC
1299 &channels[i]);
1300 if (ret < 0)
1301 goto error_cleanup_dynamic;
1302 attrcount += ret;
1303 if (channels[i].type == IIO_TIMESTAMP)
1304 indio_dev->scan_index_timestamp =
1305 channels[i].scan_index;
1306 }
1307 if (indio_dev->masklength && buffer->scan_mask == NULL) {
3862828a
AS
1308 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1309 GFP_KERNEL);
d967cb6b
LPC
1310 if (buffer->scan_mask == NULL) {
1311 ret = -ENOMEM;
1312 goto error_cleanup_dynamic;
1313 }
1314 }
1315 }
1316
1317 buffer->scan_el_group.name = iio_scan_elements_group_name;
1318
1319 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1320 sizeof(buffer->scan_el_group.attrs[0]),
1321 GFP_KERNEL);
1322 if (buffer->scan_el_group.attrs == NULL) {
1323 ret = -ENOMEM;
1324 goto error_free_scan_mask;
1325 }
96144d43 1326 attrn = 0;
d967cb6b
LPC
1327
1328 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1329 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1330 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1331
1332 return 0;
1333
1334error_free_scan_mask:
3862828a 1335 bitmap_free(buffer->scan_mask);
d967cb6b
LPC
1336error_cleanup_dynamic:
1337 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
ff3f7e04 1338 kfree(buffer->buffer_group.attrs);
d967cb6b
LPC
1339
1340 return ret;
1341}
1342
1343void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1344{
ff3f7e04
AA
1345 struct iio_buffer *buffer = indio_dev->buffer;
1346
1347 if (!buffer)
d967cb6b
LPC
1348 return;
1349
ff3f7e04
AA
1350 bitmap_free(buffer->scan_mask);
1351 kfree(buffer->buffer_group.attrs);
1352 kfree(buffer->scan_el_group.attrs);
1353 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
d967cb6b
LPC
1354}
1355
81636632
LPC
1356/**
1357 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1358 * @indio_dev: the iio device
1359 * @mask: scan mask to be checked
1360 *
1361 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1362 * can be used for devices where only one channel can be active for sampling at
1363 * a time.
1364 */
1365bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1366 const unsigned long *mask)
1367{
1368 return bitmap_weight(mask, indio_dev->masklength) == 1;
1369}
1370EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1371
5d65d920
LPC
1372static const void *iio_demux(struct iio_buffer *buffer,
1373 const void *datain)
5ada4ea9
JC
1374{
1375 struct iio_demux_table *t;
1376
1377 if (list_empty(&buffer->demux_list))
1378 return datain;
1379 list_for_each_entry(t, &buffer->demux_list, l)
1380 memcpy(buffer->demux_bounce + t->to,
1381 datain + t->from, t->length);
1382
1383 return buffer->demux_bounce;
1384}
1385
5d65d920 1386static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
5ada4ea9 1387{
5d65d920 1388 const void *dataout = iio_demux(buffer, data);
37d34556
JC
1389 int ret;
1390
1391 ret = buffer->access->store_to(buffer, dataout);
1392 if (ret)
1393 return ret;
5ada4ea9 1394
37d34556
JC
1395 /*
1396 * We can't just test for watermark to decide if we wake the poll queue
1397 * because read may request less samples than the watermark.
1398 */
a9a08845 1399 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
37d34556 1400 return 0;
5ada4ea9 1401}
5ada4ea9 1402
315a19ec
JC
1403/**
1404 * iio_push_to_buffers() - push to a registered buffer.
1405 * @indio_dev: iio_dev structure for device.
1406 * @data: Full scan.
1407 */
5d65d920 1408int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
84b36ce5
JC
1409{
1410 int ret;
1411 struct iio_buffer *buf;
1412
1413 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1414 ret = iio_push_to_buffer(buf, data);
1415 if (ret < 0)
1416 return ret;
1417 }
1418
1419 return 0;
1420}
1421EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1422
9e69c935
LPC
1423/**
1424 * iio_buffer_release() - Free a buffer's resources
1425 * @ref: Pointer to the kref embedded in the iio_buffer struct
1426 *
1427 * This function is called when the last reference to the buffer has been
1428 * dropped. It will typically free all resources allocated by the buffer. Do not
1429 * call this function manually, always use iio_buffer_put() when done using a
1430 * buffer.
1431 */
1432static void iio_buffer_release(struct kref *ref)
1433{
1434 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1435
1436 buffer->access->release(buffer);
1437}
1438
1439/**
1440 * iio_buffer_get() - Grab a reference to the buffer
1441 * @buffer: The buffer to grab a reference for, may be NULL
1442 *
1443 * Returns the pointer to the buffer that was passed into the function.
1444 */
1445struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1446{
1447 if (buffer)
1448 kref_get(&buffer->ref);
1449
1450 return buffer;
1451}
1452EXPORT_SYMBOL_GPL(iio_buffer_get);
1453
1454/**
1455 * iio_buffer_put() - Release the reference to the buffer
1456 * @buffer: The buffer to release the reference for, may be NULL
1457 */
1458void iio_buffer_put(struct iio_buffer *buffer)
1459{
1460 if (buffer)
1461 kref_put(&buffer->ref, iio_buffer_release);
1462}
1463EXPORT_SYMBOL_GPL(iio_buffer_put);
2b827ad5
JC
1464
1465/**
1466 * iio_device_attach_buffer - Attach a buffer to a IIO device
1467 * @indio_dev: The device the buffer should be attached to
1468 * @buffer: The buffer to attach to the device
1469 *
1470 * This function attaches a buffer to a IIO device. The buffer stays attached to
1471 * the device until the device is freed. The function should only be called at
1472 * most once per device.
1473 */
1474void iio_device_attach_buffer(struct iio_dev *indio_dev,
1475 struct iio_buffer *buffer)
1476{
1477 indio_dev->buffer = iio_buffer_get(buffer);
1478}
1479EXPORT_SYMBOL_GPL(iio_device_attach_buffer);