]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/iio/industrialio-buffer.c
staging:iio:buffer scrap to_iio_buffer as it no longer has meaning.
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
8e336a72 17#include <linux/export.h>
7026ea4b 18#include <linux/device.h>
7026ea4b 19#include <linux/fs.h>
7026ea4b 20#include <linux/cdev.h>
5a0e3ad6 21#include <linux/slab.h>
a7348347 22#include <linux/poll.h>
7026ea4b
JC
23
24#include "iio.h"
df9c1c42 25#include "iio_core.h"
9dd1cb30 26#include "sysfs.h"
af5046af 27#include "buffer.h"
7026ea4b 28
8310b86c
JC
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
7026ea4b
JC
33
34/**
14555b14 35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 36 *
14555b14
JC
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
7026ea4b 39 **/
14555b14
JC
40ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
41 size_t n, loff_t *f_ps)
7026ea4b 42{
1aa04278 43 struct iio_dev *indio_dev = filp->private_data;
14555b14 44 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 45
96e00f11 46 if (!rb || !rb->access->read_first_n)
7026ea4b 47 return -EINVAL;
8d213f24 48 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
49}
50
a7348347 51/**
14555b14 52 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 53 */
14555b14
JC
54unsigned int iio_buffer_poll(struct file *filp,
55 struct poll_table_struct *wait)
a7348347 56{
1aa04278 57 struct iio_dev *indio_dev = filp->private_data;
14555b14 58 struct iio_buffer *rb = indio_dev->buffer;
a7348347
JC
59
60 poll_wait(filp, &rb->pollq, wait);
61 if (rb->stufftoread)
62 return POLLIN | POLLRDNORM;
63 /* need a way of knowing if there may be enough data... */
8d213f24 64 return 0;
a7348347
JC
65}
66
30eb82f0 67int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
7026ea4b 68{
14555b14 69 struct iio_buffer *rb = indio_dev->buffer;
30eb82f0 70 if (!rb)
96e00f11 71 return 0;
30eb82f0 72 if (rb->access->mark_in_use)
1aa04278 73 rb->access->mark_in_use(rb);
30eb82f0 74 return 0;
7026ea4b 75}
7026ea4b 76
14555b14 77void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
7026ea4b 78{
14555b14 79 struct iio_buffer *rb = indio_dev->buffer;
758d988c 80
96e00f11
JC
81 if (!rb)
82 return;
1aa04278
JC
83 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
84 if (rb->access->unmark_in_use)
85 rb->access->unmark_in_use(rb);
7026ea4b
JC
86}
87
f8c6f4e9 88void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
7026ea4b 89{
5ada4ea9 90 INIT_LIST_HEAD(&buffer->demux_list);
f8c6f4e9 91 buffer->indio_dev = indio_dev;
14555b14 92 init_waitqueue_head(&buffer->pollq);
7026ea4b 93}
14555b14 94EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 95
1d892719 96static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
97 struct device_attribute *attr,
98 char *buf)
1d892719 99{
8d213f24 100 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
101}
102
103static ssize_t iio_show_fixed_type(struct device *dev,
104 struct device_attribute *attr,
105 char *buf)
106{
107 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
108 u8 type = this_attr->c->scan_type.endianness;
109
110 if (type == IIO_CPU) {
9d5d1153
JC
111#ifdef __LITTLE_ENDIAN
112 type = IIO_LE;
113#else
114 type = IIO_BE;
115#endif
8310b86c
JC
116 }
117 return sprintf(buf, "%s:%c%d/%d>>%u\n",
118 iio_endian_prefix[type],
1d892719
JC
119 this_attr->c->scan_type.sign,
120 this_attr->c->scan_type.realbits,
121 this_attr->c->scan_type.storagebits,
122 this_attr->c->scan_type.shift);
123}
124
8d213f24
JC
125static ssize_t iio_scan_el_show(struct device *dev,
126 struct device_attribute *attr,
127 char *buf)
128{
129 int ret;
f8c6f4e9 130 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 131
5ada4ea9
JC
132 ret = test_bit(to_iio_dev_attr(attr)->address,
133 indio_dev->buffer->scan_mask);
134
8d213f24
JC
135 return sprintf(buf, "%d\n", ret);
136}
137
14555b14 138static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 139{
14555b14 140 clear_bit(bit, buffer->scan_mask);
8d213f24
JC
141 return 0;
142}
143
144static ssize_t iio_scan_el_store(struct device *dev,
145 struct device_attribute *attr,
146 const char *buf,
147 size_t len)
148{
149 int ret = 0;
150 bool state;
1aa04278 151 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 152 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
153 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
154
155 state = !(buf[0] == '0');
156 mutex_lock(&indio_dev->mlock);
ec3afa40 157 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
8d213f24
JC
158 ret = -EBUSY;
159 goto error_ret;
160 }
14555b14 161 ret = iio_scan_mask_query(buffer, this_attr->address);
8d213f24
JC
162 if (ret < 0)
163 goto error_ret;
164 if (!state && ret) {
14555b14 165 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
166 if (ret)
167 goto error_ret;
168 } else if (state && !ret) {
14555b14 169 ret = iio_scan_mask_set(buffer, this_attr->address);
8d213f24
JC
170 if (ret)
171 goto error_ret;
172 }
173
174error_ret:
175 mutex_unlock(&indio_dev->mlock);
176
177 return ret ? ret : len;
178
179}
180
181static ssize_t iio_scan_el_ts_show(struct device *dev,
182 struct device_attribute *attr,
183 char *buf)
184{
f8c6f4e9
JC
185 struct iio_dev *indio_dev = dev_get_drvdata(dev);
186 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
8d213f24
JC
187}
188
189static ssize_t iio_scan_el_ts_store(struct device *dev,
190 struct device_attribute *attr,
191 const char *buf,
192 size_t len)
193{
194 int ret = 0;
1aa04278 195 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 196 bool state;
1aa04278 197
8d213f24
JC
198 state = !(buf[0] == '0');
199 mutex_lock(&indio_dev->mlock);
ec3afa40 200 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
8d213f24
JC
201 ret = -EBUSY;
202 goto error_ret;
203 }
14555b14 204 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
205error_ret:
206 mutex_unlock(&indio_dev->mlock);
207
208 return ret ? ret : len;
209}
210
14555b14
JC
211static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
212 const struct iio_chan_spec *chan)
1d892719 213{
26d25ae3 214 int ret, attrcount = 0;
14555b14 215 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 216
26d25ae3 217 ret = __iio_add_chan_devattr("index",
1d892719
JC
218 chan,
219 &iio_show_scan_index,
220 NULL,
221 0,
222 0,
1aa04278 223 &indio_dev->dev,
14555b14 224 &buffer->scan_el_dev_attr_list);
1d892719
JC
225 if (ret)
226 goto error_ret;
26d25ae3
JC
227 attrcount++;
228 ret = __iio_add_chan_devattr("type",
1d892719
JC
229 chan,
230 &iio_show_fixed_type,
231 NULL,
232 0,
233 0,
1aa04278 234 &indio_dev->dev,
14555b14 235 &buffer->scan_el_dev_attr_list);
1d892719
JC
236 if (ret)
237 goto error_ret;
26d25ae3 238 attrcount++;
a88b3ebc 239 if (chan->type != IIO_TIMESTAMP)
26d25ae3 240 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
241 chan,
242 &iio_scan_el_show,
243 &iio_scan_el_store,
244 chan->scan_index,
245 0,
1aa04278 246 &indio_dev->dev,
14555b14 247 &buffer->scan_el_dev_attr_list);
a88b3ebc 248 else
26d25ae3 249 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
250 chan,
251 &iio_scan_el_ts_show,
252 &iio_scan_el_ts_store,
253 chan->scan_index,
254 0,
1aa04278 255 &indio_dev->dev,
14555b14 256 &buffer->scan_el_dev_attr_list);
26d25ae3
JC
257 attrcount++;
258 ret = attrcount;
1d892719
JC
259error_ret:
260 return ret;
261}
262
14555b14
JC
263static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
264 struct iio_dev_attr *p)
1d892719 265{
1d892719
JC
266 kfree(p->dev_attr.attr.name);
267 kfree(p);
268}
269
14555b14 270static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
271{
272 struct iio_dev_attr *p, *n;
14555b14 273 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3 274
1d892719 275 list_for_each_entry_safe(p, n,
14555b14
JC
276 &buffer->scan_el_dev_attr_list, l)
277 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
278}
279
26d25ae3
JC
280static const char * const iio_scan_elements_group_name = "scan_elements";
281
14555b14
JC
282int iio_buffer_register(struct iio_dev *indio_dev,
283 const struct iio_chan_spec *channels,
284 int num_channels)
1d892719 285{
26d25ae3
JC
286 struct iio_dev_attr *p;
287 struct attribute **attr;
14555b14 288 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
289 int ret, i, attrn, attrcount, attrcount_orig = 0;
290
14555b14
JC
291 if (buffer->attrs)
292 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 293
14555b14
JC
294 if (buffer->scan_el_attrs != NULL) {
295 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
296 while (*attr++ != NULL)
297 attrcount_orig++;
298 }
299 attrcount = attrcount_orig;
14555b14 300 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
301 if (channels) {
302 /* new magic */
303 for (i = 0; i < num_channels; i++) {
32b5eeca
JC
304 /* Establish necessary mask length */
305 if (channels[i].scan_index >
306 (int)indio_dev->masklength - 1)
307 indio_dev->masklength
308 = indio_dev->channels[i].scan_index + 1;
309
14555b14 310 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 311 &channels[i]);
1d892719 312 if (ret < 0)
26d25ae3
JC
313 goto error_cleanup_dynamic;
314 attrcount += ret;
beb80600
JC
315 if (channels[i].type == IIO_TIMESTAMP)
316 buffer->scan_index_timestamp =
317 channels[i].scan_index;
1d892719 318 }
14555b14 319 if (indio_dev->masklength && buffer->scan_mask == NULL) {
d83fb184
TM
320 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
321 sizeof(*buffer->scan_mask),
322 GFP_KERNEL);
14555b14 323 if (buffer->scan_mask == NULL) {
32b5eeca 324 ret = -ENOMEM;
26d25ae3 325 goto error_cleanup_dynamic;
32b5eeca
JC
326 }
327 }
1d892719
JC
328 }
329
14555b14 330 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 331
d83fb184
TM
332 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
333 sizeof(buffer->scan_el_group.attrs[0]),
334 GFP_KERNEL);
14555b14 335 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
336 ret = -ENOMEM;
337 goto error_free_scan_mask;
338 }
14555b14
JC
339 if (buffer->scan_el_attrs)
340 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
341 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
342 attrn = attrcount_orig;
343
14555b14
JC
344 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
345 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
346 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 347
1d892719 348 return 0;
26d25ae3
JC
349
350error_free_scan_mask:
14555b14 351 kfree(buffer->scan_mask);
1d892719 352error_cleanup_dynamic:
14555b14 353 __iio_buffer_attr_cleanup(indio_dev);
26d25ae3 354
7026ea4b
JC
355 return ret;
356}
14555b14 357EXPORT_SYMBOL(iio_buffer_register);
1d892719 358
14555b14 359void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 360{
14555b14
JC
361 kfree(indio_dev->buffer->scan_mask);
362 kfree(indio_dev->buffer->scan_el_group.attrs);
363 __iio_buffer_attr_cleanup(indio_dev);
7026ea4b 364}
14555b14 365EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 366
14555b14
JC
367ssize_t iio_buffer_read_length(struct device *dev,
368 struct device_attribute *attr,
369 char *buf)
7026ea4b 370{
1aa04278 371 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 372 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 373
14555b14 374 if (buffer->access->get_length)
8d213f24 375 return sprintf(buf, "%d\n",
14555b14 376 buffer->access->get_length(buffer));
7026ea4b 377
8d213f24 378 return 0;
7026ea4b 379}
14555b14 380EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 381
14555b14
JC
382ssize_t iio_buffer_write_length(struct device *dev,
383 struct device_attribute *attr,
384 const char *buf,
385 size_t len)
7026ea4b
JC
386{
387 int ret;
388 ulong val;
1aa04278 389 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 390 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24 391
7026ea4b
JC
392 ret = strict_strtoul(buf, 10, &val);
393 if (ret)
394 return ret;
395
14555b14
JC
396 if (buffer->access->get_length)
397 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
398 return len;
399
14555b14
JC
400 if (buffer->access->set_length) {
401 buffer->access->set_length(buffer, val);
402 if (buffer->access->mark_param_change)
403 buffer->access->mark_param_change(buffer);
7026ea4b
JC
404 }
405
406 return len;
407}
14555b14 408EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 409
14555b14
JC
410ssize_t iio_buffer_store_enable(struct device *dev,
411 struct device_attribute *attr,
412 const char *buf,
413 size_t len)
7026ea4b
JC
414{
415 int ret;
416 bool requested_state, current_state;
417 int previous_mode;
f8c6f4e9
JC
418 struct iio_dev *indio_dev = dev_get_drvdata(dev);
419 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 420
f8c6f4e9
JC
421 mutex_lock(&indio_dev->mlock);
422 previous_mode = indio_dev->currentmode;
7026ea4b 423 requested_state = !(buf[0] == '0');
ec3afa40 424 current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
7026ea4b 425 if (current_state == requested_state) {
14555b14 426 printk(KERN_INFO "iio-buffer, current state requested again\n");
7026ea4b
JC
427 goto done;
428 }
429 if (requested_state) {
1612244f
JC
430 if (indio_dev->setup_ops->preenable) {
431 ret = indio_dev->setup_ops->preenable(indio_dev);
7026ea4b
JC
432 if (ret) {
433 printk(KERN_ERR
434 "Buffer not started:"
14555b14 435 "buffer preenable failed\n");
7026ea4b
JC
436 goto error_ret;
437 }
438 }
14555b14
JC
439 if (buffer->access->request_update) {
440 ret = buffer->access->request_update(buffer);
7026ea4b
JC
441 if (ret) {
442 printk(KERN_INFO
443 "Buffer not started:"
14555b14 444 "buffer parameter update failed\n");
7026ea4b
JC
445 goto error_ret;
446 }
447 }
14555b14
JC
448 if (buffer->access->mark_in_use)
449 buffer->access->mark_in_use(buffer);
7026ea4b 450 /* Definitely possible for devices to support both of these.*/
f8c6f4e9
JC
451 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
452 if (!indio_dev->trig) {
7026ea4b
JC
453 printk(KERN_INFO
454 "Buffer not started: no trigger\n");
455 ret = -EINVAL;
14555b14
JC
456 if (buffer->access->unmark_in_use)
457 buffer->access->unmark_in_use(buffer);
7026ea4b
JC
458 goto error_ret;
459 }
f8c6f4e9
JC
460 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
461 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
462 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
7026ea4b
JC
463 else { /* should never be reached */
464 ret = -EINVAL;
465 goto error_ret;
466 }
467
1612244f
JC
468 if (indio_dev->setup_ops->postenable) {
469 ret = indio_dev->setup_ops->postenable(indio_dev);
7026ea4b
JC
470 if (ret) {
471 printk(KERN_INFO
472 "Buffer not started:"
473 "postenable failed\n");
14555b14
JC
474 if (buffer->access->unmark_in_use)
475 buffer->access->unmark_in_use(buffer);
f8c6f4e9 476 indio_dev->currentmode = previous_mode;
1612244f
JC
477 if (indio_dev->setup_ops->postdisable)
478 indio_dev->setup_ops->
f8c6f4e9 479 postdisable(indio_dev);
7026ea4b
JC
480 goto error_ret;
481 }
482 }
483 } else {
1612244f
JC
484 if (indio_dev->setup_ops->predisable) {
485 ret = indio_dev->setup_ops->predisable(indio_dev);
7026ea4b
JC
486 if (ret)
487 goto error_ret;
488 }
14555b14
JC
489 if (buffer->access->unmark_in_use)
490 buffer->access->unmark_in_use(buffer);
f8c6f4e9 491 indio_dev->currentmode = INDIO_DIRECT_MODE;
1612244f
JC
492 if (indio_dev->setup_ops->postdisable) {
493 ret = indio_dev->setup_ops->postdisable(indio_dev);
7026ea4b
JC
494 if (ret)
495 goto error_ret;
496 }
497 }
498done:
f8c6f4e9 499 mutex_unlock(&indio_dev->mlock);
7026ea4b
JC
500 return len;
501
502error_ret:
f8c6f4e9 503 mutex_unlock(&indio_dev->mlock);
7026ea4b
JC
504 return ret;
505}
14555b14 506EXPORT_SYMBOL(iio_buffer_store_enable);
8d213f24 507
14555b14
JC
508ssize_t iio_buffer_show_enable(struct device *dev,
509 struct device_attribute *attr,
510 char *buf)
7026ea4b 511{
f8c6f4e9
JC
512 struct iio_dev *indio_dev = dev_get_drvdata(dev);
513 return sprintf(buf, "%d\n", !!(indio_dev->currentmode
ec3afa40 514 & INDIO_ALL_BUFFER_MODES));
7026ea4b 515}
14555b14 516EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 517
32b5eeca
JC
518/* note NULL used as error indicator as it doesn't make sense. */
519static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
520 unsigned int masklength,
521 unsigned long *mask)
522{
523 if (bitmap_empty(mask, masklength))
524 return NULL;
525 while (*av_masks) {
526 if (bitmap_subset(mask, av_masks, masklength))
527 return av_masks;
528 av_masks += BITS_TO_LONGS(masklength);
529 }
530 return NULL;
531}
532
959d2952
JC
533int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
534{
535 struct iio_buffer *buffer = indio_dev->buffer;
536 const struct iio_chan_spec *ch;
537 unsigned bytes = 0;
538 int length, i;
539 dev_dbg(&indio_dev->dev, "%s\n", __func__);
540
541 /* How much space will the demuxed element take? */
542 for_each_set_bit(i, buffer->scan_mask,
543 indio_dev->masklength) {
544 ch = iio_find_channel_from_si(indio_dev, i);
545 length = ch->scan_type.storagebits/8;
546 bytes = ALIGN(bytes, length);
547 bytes += length;
548 }
549 if (buffer->scan_timestamp) {
550 ch = iio_find_channel_from_si(indio_dev,
551 buffer->scan_index_timestamp);
552 length = ch->scan_type.storagebits/8;
553 bytes = ALIGN(bytes, length);
554 bytes += length;
555 }
556 buffer->access->set_bytes_per_datum(buffer, bytes);
557
558 /* What scan mask do we actually have ?*/
559 if (indio_dev->available_scan_masks)
560 indio_dev->active_scan_mask =
561 iio_scan_mask_match(indio_dev->available_scan_masks,
562 indio_dev->masklength,
563 buffer->scan_mask);
564 else
565 indio_dev->active_scan_mask = buffer->scan_mask;
5ada4ea9
JC
566 iio_update_demux(indio_dev);
567
568 if (indio_dev->info->update_scan_mode)
569 return indio_dev->info
570 ->update_scan_mode(indio_dev,
571 indio_dev->active_scan_mask);
959d2952
JC
572 return 0;
573}
574EXPORT_SYMBOL(iio_sw_buffer_preenable);
575
32b5eeca
JC
576/**
577 * iio_scan_mask_set() - set particular bit in the scan mask
14555b14 578 * @buffer: the buffer whose scan mask we are interested in
32b5eeca
JC
579 * @bit: the bit to be set.
580 **/
14555b14 581int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
32b5eeca 582{
f8c6f4e9 583 struct iio_dev *indio_dev = buffer->indio_dev;
32b5eeca
JC
584 unsigned long *mask;
585 unsigned long *trialmask;
586
587 trialmask = kmalloc(sizeof(*trialmask)*
f8c6f4e9 588 BITS_TO_LONGS(indio_dev->masklength),
32b5eeca
JC
589 GFP_KERNEL);
590
591 if (trialmask == NULL)
592 return -ENOMEM;
f8c6f4e9 593 if (!indio_dev->masklength) {
14555b14 594 WARN_ON("trying to set scanmask prior to registering buffer\n");
32b5eeca
JC
595 kfree(trialmask);
596 return -EINVAL;
597 }
f8c6f4e9 598 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
32b5eeca
JC
599 set_bit(bit, trialmask);
600
f8c6f4e9
JC
601 if (indio_dev->available_scan_masks) {
602 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
603 indio_dev->masklength,
32b5eeca
JC
604 trialmask);
605 if (!mask) {
606 kfree(trialmask);
607 return -EINVAL;
608 }
609 }
f8c6f4e9 610 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
32b5eeca
JC
611
612 kfree(trialmask);
613
614 return 0;
615};
616EXPORT_SYMBOL_GPL(iio_scan_mask_set);
617
14555b14 618int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
32b5eeca 619{
f8c6f4e9 620 struct iio_dev *indio_dev = buffer->indio_dev;
32b5eeca
JC
621 long *mask;
622
f8c6f4e9 623 if (bit > indio_dev->masklength)
32b5eeca
JC
624 return -EINVAL;
625
14555b14 626 if (!buffer->scan_mask)
32b5eeca 627 return 0;
f8c6f4e9
JC
628 if (indio_dev->available_scan_masks)
629 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
630 indio_dev->masklength,
14555b14 631 buffer->scan_mask);
32b5eeca 632 else
14555b14 633 mask = buffer->scan_mask;
32b5eeca
JC
634 if (!mask)
635 return 0;
636
637 return test_bit(bit, mask);
638};
639EXPORT_SYMBOL_GPL(iio_scan_mask_query);
5ada4ea9
JC
640
641/**
642 * struct iio_demux_table() - table describing demux memcpy ops
643 * @from: index to copy from
644 * @to: index to copy to
645 * @length: how many bytes to copy
646 * @l: list head used for management
647 */
648struct iio_demux_table {
649 unsigned from;
650 unsigned to;
651 unsigned length;
652 struct list_head l;
653};
654
655static unsigned char *iio_demux(struct iio_buffer *buffer,
656 unsigned char *datain)
657{
658 struct iio_demux_table *t;
659
660 if (list_empty(&buffer->demux_list))
661 return datain;
662 list_for_each_entry(t, &buffer->demux_list, l)
663 memcpy(buffer->demux_bounce + t->to,
664 datain + t->from, t->length);
665
666 return buffer->demux_bounce;
667}
668
669int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
670 s64 timestamp)
671{
672 unsigned char *dataout = iio_demux(buffer, data);
673
674 return buffer->access->store_to(buffer, dataout, timestamp);
675}
676EXPORT_SYMBOL_GPL(iio_push_to_buffer);
677
678int iio_update_demux(struct iio_dev *indio_dev)
679{
680 const struct iio_chan_spec *ch;
681 struct iio_buffer *buffer = indio_dev->buffer;
682 int ret, in_ind = -1, out_ind, length;
683 unsigned in_loc = 0, out_loc = 0;
684 struct iio_demux_table *p, *q;
685
686 /* Clear out any old demux */
687 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
688 list_del(&p->l);
689 kfree(p);
690 }
691 kfree(buffer->demux_bounce);
692 buffer->demux_bounce = NULL;
693
694 /* First work out which scan mode we will actually have */
695 if (bitmap_equal(indio_dev->active_scan_mask,
696 buffer->scan_mask,
697 indio_dev->masklength))
698 return 0;
699
700 /* Now we have the two masks, work from least sig and build up sizes */
701 for_each_set_bit(out_ind,
702 indio_dev->active_scan_mask,
703 indio_dev->masklength) {
704 in_ind = find_next_bit(indio_dev->active_scan_mask,
705 indio_dev->masklength,
706 in_ind + 1);
707 while (in_ind != out_ind) {
708 in_ind = find_next_bit(indio_dev->active_scan_mask,
709 indio_dev->masklength,
710 in_ind + 1);
711 ch = iio_find_channel_from_si(indio_dev, in_ind);
712 length = ch->scan_type.storagebits/8;
713 /* Make sure we are aligned */
714 in_loc += length;
715 if (in_loc % length)
716 in_loc += length - in_loc % length;
717 }
718 p = kmalloc(sizeof(*p), GFP_KERNEL);
719 if (p == NULL) {
720 ret = -ENOMEM;
721 goto error_clear_mux_table;
722 }
723 ch = iio_find_channel_from_si(indio_dev, in_ind);
724 length = ch->scan_type.storagebits/8;
725 if (out_loc % length)
726 out_loc += length - out_loc % length;
727 if (in_loc % length)
728 in_loc += length - in_loc % length;
729 p->from = in_loc;
730 p->to = out_loc;
731 p->length = length;
732 list_add_tail(&p->l, &buffer->demux_list);
733 out_loc += length;
734 in_loc += length;
735 }
736 /* Relies on scan_timestamp being last */
737 if (buffer->scan_timestamp) {
738 p = kmalloc(sizeof(*p), GFP_KERNEL);
739 if (p == NULL) {
740 ret = -ENOMEM;
741 goto error_clear_mux_table;
742 }
743 ch = iio_find_channel_from_si(indio_dev,
744 buffer->scan_index_timestamp);
745 length = ch->scan_type.storagebits/8;
746 if (out_loc % length)
747 out_loc += length - out_loc % length;
748 if (in_loc % length)
749 in_loc += length - in_loc % length;
750 p->from = in_loc;
751 p->to = out_loc;
752 p->length = length;
753 list_add_tail(&p->l, &buffer->demux_list);
754 out_loc += length;
755 in_loc += length;
756 }
757 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
758 if (buffer->demux_bounce == NULL) {
759 ret = -ENOMEM;
760 goto error_clear_mux_table;
761 }
762 return 0;
763
764error_clear_mux_table:
765 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
766 list_del(&p->l);
767 kfree(p);
768 }
769 return ret;
770}
771EXPORT_SYMBOL_GPL(iio_update_demux);