]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/iio/industrialio-buffer.c
staging: r8712u: include module.h where needed
[mirror_ubuntu-artful-kernel.git] / drivers / staging / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/device.h>
7026ea4b 18#include <linux/fs.h>
7026ea4b 19#include <linux/cdev.h>
5a0e3ad6 20#include <linux/slab.h>
a7348347 21#include <linux/poll.h>
7026ea4b
JC
22
23#include "iio.h"
df9c1c42 24#include "iio_core.h"
9dd1cb30 25#include "sysfs.h"
3811cd62 26#include "buffer_generic.h"
7026ea4b 27
8310b86c
JC
28static const char * const iio_endian_prefix[] = {
29 [IIO_BE] = "be",
30 [IIO_LE] = "le",
31};
7026ea4b
JC
32
33/**
14555b14 34 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 35 *
14555b14
JC
36 * This function relies on all buffer implementations having an
37 * iio_buffer as their first element.
7026ea4b 38 **/
14555b14
JC
39ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
40 size_t n, loff_t *f_ps)
7026ea4b 41{
1aa04278 42 struct iio_dev *indio_dev = filp->private_data;
14555b14 43 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 44
5565a450 45 if (!rb->access->read_first_n)
7026ea4b 46 return -EINVAL;
8d213f24 47 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
48}
49
a7348347 50/**
14555b14 51 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 52 */
14555b14
JC
53unsigned int iio_buffer_poll(struct file *filp,
54 struct poll_table_struct *wait)
a7348347 55{
1aa04278 56 struct iio_dev *indio_dev = filp->private_data;
14555b14 57 struct iio_buffer *rb = indio_dev->buffer;
a7348347
JC
58
59 poll_wait(filp, &rb->pollq, wait);
60 if (rb->stufftoread)
61 return POLLIN | POLLRDNORM;
62 /* need a way of knowing if there may be enough data... */
8d213f24 63 return 0;
a7348347
JC
64}
65
30eb82f0 66int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
7026ea4b 67{
14555b14 68 struct iio_buffer *rb = indio_dev->buffer;
30eb82f0
JC
69 if (!rb)
70 return -EINVAL;
71 if (rb->access->mark_in_use)
1aa04278 72 rb->access->mark_in_use(rb);
30eb82f0 73 return 0;
7026ea4b 74}
7026ea4b 75
14555b14 76void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
7026ea4b 77{
14555b14 78 struct iio_buffer *rb = indio_dev->buffer;
758d988c 79
1aa04278
JC
80 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
81 if (rb->access->unmark_in_use)
82 rb->access->unmark_in_use(rb);
7026ea4b
JC
83}
84
14555b14 85void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *dev_info)
7026ea4b 86{
14555b14
JC
87 buffer->indio_dev = dev_info;
88 init_waitqueue_head(&buffer->pollq);
7026ea4b 89}
14555b14 90EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 91
1d892719 92static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
93 struct device_attribute *attr,
94 char *buf)
1d892719 95{
8d213f24 96 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
97}
98
99static ssize_t iio_show_fixed_type(struct device *dev,
100 struct device_attribute *attr,
101 char *buf)
102{
103 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
104 u8 type = this_attr->c->scan_type.endianness;
105
106 if (type == IIO_CPU) {
107 if (__LITTLE_ENDIAN)
108 type = IIO_LE;
109 else
110 type = IIO_BE;
111 }
112 return sprintf(buf, "%s:%c%d/%d>>%u\n",
113 iio_endian_prefix[type],
1d892719
JC
114 this_attr->c->scan_type.sign,
115 this_attr->c->scan_type.realbits,
116 this_attr->c->scan_type.storagebits,
117 this_attr->c->scan_type.shift);
118}
119
8d213f24
JC
120static ssize_t iio_scan_el_show(struct device *dev,
121 struct device_attribute *attr,
122 char *buf)
123{
124 int ret;
1aa04278 125 struct iio_dev *dev_info = dev_get_drvdata(dev);
8d213f24 126
14555b14 127 ret = iio_scan_mask_query(dev_info->buffer,
1aa04278 128 to_iio_dev_attr(attr)->address);
8d213f24
JC
129 if (ret < 0)
130 return ret;
131 return sprintf(buf, "%d\n", ret);
132}
133
14555b14 134static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 135{
14555b14
JC
136 clear_bit(bit, buffer->scan_mask);
137 buffer->scan_count--;
8d213f24
JC
138 return 0;
139}
140
141static ssize_t iio_scan_el_store(struct device *dev,
142 struct device_attribute *attr,
143 const char *buf,
144 size_t len)
145{
146 int ret = 0;
147 bool state;
1aa04278 148 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 149 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
150 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
151
152 state = !(buf[0] == '0');
153 mutex_lock(&indio_dev->mlock);
ec3afa40 154 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
8d213f24
JC
155 ret = -EBUSY;
156 goto error_ret;
157 }
14555b14 158 ret = iio_scan_mask_query(buffer, this_attr->address);
8d213f24
JC
159 if (ret < 0)
160 goto error_ret;
161 if (!state && ret) {
14555b14 162 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
163 if (ret)
164 goto error_ret;
165 } else if (state && !ret) {
14555b14 166 ret = iio_scan_mask_set(buffer, this_attr->address);
8d213f24
JC
167 if (ret)
168 goto error_ret;
169 }
170
171error_ret:
172 mutex_unlock(&indio_dev->mlock);
173
174 return ret ? ret : len;
175
176}
177
178static ssize_t iio_scan_el_ts_show(struct device *dev,
179 struct device_attribute *attr,
180 char *buf)
181{
1aa04278 182 struct iio_dev *dev_info = dev_get_drvdata(dev);
14555b14 183 return sprintf(buf, "%d\n", dev_info->buffer->scan_timestamp);
8d213f24
JC
184}
185
186static ssize_t iio_scan_el_ts_store(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf,
189 size_t len)
190{
191 int ret = 0;
1aa04278 192 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 193 bool state;
1aa04278 194
8d213f24
JC
195 state = !(buf[0] == '0');
196 mutex_lock(&indio_dev->mlock);
ec3afa40 197 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
8d213f24
JC
198 ret = -EBUSY;
199 goto error_ret;
200 }
14555b14 201 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
202error_ret:
203 mutex_unlock(&indio_dev->mlock);
204
205 return ret ? ret : len;
206}
207
14555b14
JC
208static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
209 const struct iio_chan_spec *chan)
1d892719 210{
26d25ae3 211 int ret, attrcount = 0;
14555b14 212 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 213
26d25ae3 214 ret = __iio_add_chan_devattr("index",
1d892719
JC
215 chan,
216 &iio_show_scan_index,
217 NULL,
218 0,
219 0,
1aa04278 220 &indio_dev->dev,
14555b14 221 &buffer->scan_el_dev_attr_list);
1d892719
JC
222 if (ret)
223 goto error_ret;
26d25ae3
JC
224 attrcount++;
225 ret = __iio_add_chan_devattr("type",
1d892719
JC
226 chan,
227 &iio_show_fixed_type,
228 NULL,
229 0,
230 0,
1aa04278 231 &indio_dev->dev,
14555b14 232 &buffer->scan_el_dev_attr_list);
1d892719
JC
233 if (ret)
234 goto error_ret;
26d25ae3 235 attrcount++;
a88b3ebc 236 if (chan->type != IIO_TIMESTAMP)
26d25ae3 237 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
238 chan,
239 &iio_scan_el_show,
240 &iio_scan_el_store,
241 chan->scan_index,
242 0,
1aa04278 243 &indio_dev->dev,
14555b14 244 &buffer->scan_el_dev_attr_list);
a88b3ebc 245 else
26d25ae3 246 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
247 chan,
248 &iio_scan_el_ts_show,
249 &iio_scan_el_ts_store,
250 chan->scan_index,
251 0,
1aa04278 252 &indio_dev->dev,
14555b14 253 &buffer->scan_el_dev_attr_list);
26d25ae3
JC
254 attrcount++;
255 ret = attrcount;
1d892719
JC
256error_ret:
257 return ret;
258}
259
14555b14
JC
260static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
261 struct iio_dev_attr *p)
1d892719 262{
1d892719
JC
263 kfree(p->dev_attr.attr.name);
264 kfree(p);
265}
266
14555b14 267static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
268{
269 struct iio_dev_attr *p, *n;
14555b14 270 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3 271
1d892719 272 list_for_each_entry_safe(p, n,
14555b14
JC
273 &buffer->scan_el_dev_attr_list, l)
274 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
275}
276
26d25ae3
JC
277static const char * const iio_scan_elements_group_name = "scan_elements";
278
14555b14
JC
279int iio_buffer_register(struct iio_dev *indio_dev,
280 const struct iio_chan_spec *channels,
281 int num_channels)
1d892719 282{
26d25ae3
JC
283 struct iio_dev_attr *p;
284 struct attribute **attr;
14555b14 285 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
286 int ret, i, attrn, attrcount, attrcount_orig = 0;
287
14555b14
JC
288 if (buffer->attrs)
289 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 290
14555b14
JC
291 if (buffer->scan_el_attrs != NULL) {
292 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
293 while (*attr++ != NULL)
294 attrcount_orig++;
295 }
296 attrcount = attrcount_orig;
14555b14 297 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
298 if (channels) {
299 /* new magic */
300 for (i = 0; i < num_channels; i++) {
32b5eeca
JC
301 /* Establish necessary mask length */
302 if (channels[i].scan_index >
303 (int)indio_dev->masklength - 1)
304 indio_dev->masklength
305 = indio_dev->channels[i].scan_index + 1;
306
14555b14 307 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 308 &channels[i]);
1d892719 309 if (ret < 0)
26d25ae3
JC
310 goto error_cleanup_dynamic;
311 attrcount += ret;
1d892719 312 }
14555b14
JC
313 if (indio_dev->masklength && buffer->scan_mask == NULL) {
314 buffer->scan_mask
315 = kzalloc(sizeof(*buffer->scan_mask)*
32b5eeca
JC
316 BITS_TO_LONGS(indio_dev->masklength),
317 GFP_KERNEL);
14555b14 318 if (buffer->scan_mask == NULL) {
32b5eeca 319 ret = -ENOMEM;
26d25ae3 320 goto error_cleanup_dynamic;
32b5eeca
JC
321 }
322 }
1d892719
JC
323 }
324
14555b14 325 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 326
14555b14
JC
327 buffer->scan_el_group.attrs
328 = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
329 (attrcount + 1),
26d25ae3 330 GFP_KERNEL);
14555b14 331 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
332 ret = -ENOMEM;
333 goto error_free_scan_mask;
334 }
14555b14
JC
335 if (buffer->scan_el_attrs)
336 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
337 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
338 attrn = attrcount_orig;
339
14555b14
JC
340 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
341 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
342 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 343
1d892719 344 return 0;
26d25ae3
JC
345
346error_free_scan_mask:
14555b14 347 kfree(buffer->scan_mask);
1d892719 348error_cleanup_dynamic:
14555b14 349 __iio_buffer_attr_cleanup(indio_dev);
26d25ae3 350
7026ea4b
JC
351 return ret;
352}
14555b14 353EXPORT_SYMBOL(iio_buffer_register);
1d892719 354
14555b14 355void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 356{
14555b14
JC
357 kfree(indio_dev->buffer->scan_mask);
358 kfree(indio_dev->buffer->scan_el_group.attrs);
359 __iio_buffer_attr_cleanup(indio_dev);
7026ea4b 360}
14555b14 361EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 362
14555b14
JC
363ssize_t iio_buffer_read_length(struct device *dev,
364 struct device_attribute *attr,
365 char *buf)
7026ea4b 366{
1aa04278 367 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 368 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 369
14555b14 370 if (buffer->access->get_length)
8d213f24 371 return sprintf(buf, "%d\n",
14555b14 372 buffer->access->get_length(buffer));
7026ea4b 373
8d213f24 374 return 0;
7026ea4b 375}
14555b14 376EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 377
14555b14
JC
378ssize_t iio_buffer_write_length(struct device *dev,
379 struct device_attribute *attr,
380 const char *buf,
381 size_t len)
7026ea4b
JC
382{
383 int ret;
384 ulong val;
1aa04278 385 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 386 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24 387
7026ea4b
JC
388 ret = strict_strtoul(buf, 10, &val);
389 if (ret)
390 return ret;
391
14555b14
JC
392 if (buffer->access->get_length)
393 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
394 return len;
395
14555b14
JC
396 if (buffer->access->set_length) {
397 buffer->access->set_length(buffer, val);
398 if (buffer->access->mark_param_change)
399 buffer->access->mark_param_change(buffer);
7026ea4b
JC
400 }
401
402 return len;
403}
14555b14 404EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 405
14555b14
JC
406ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
7026ea4b 409{
1aa04278 410 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 411 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 412
14555b14 413 if (buffer->access->get_bytes_per_datum)
8d213f24 414 return sprintf(buf, "%d\n",
14555b14 415 buffer->access->get_bytes_per_datum(buffer));
7026ea4b 416
8d213f24 417 return 0;
7026ea4b 418}
14555b14 419EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
7026ea4b 420
14555b14
JC
421ssize_t iio_buffer_store_enable(struct device *dev,
422 struct device_attribute *attr,
423 const char *buf,
424 size_t len)
7026ea4b
JC
425{
426 int ret;
427 bool requested_state, current_state;
428 int previous_mode;
1aa04278 429 struct iio_dev *dev_info = dev_get_drvdata(dev);
14555b14 430 struct iio_buffer *buffer = dev_info->buffer;
7026ea4b
JC
431
432 mutex_lock(&dev_info->mlock);
433 previous_mode = dev_info->currentmode;
434 requested_state = !(buf[0] == '0');
ec3afa40 435 current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
7026ea4b 436 if (current_state == requested_state) {
14555b14 437 printk(KERN_INFO "iio-buffer, current state requested again\n");
7026ea4b
JC
438 goto done;
439 }
440 if (requested_state) {
14555b14
JC
441 if (buffer->setup_ops->preenable) {
442 ret = buffer->setup_ops->preenable(dev_info);
7026ea4b
JC
443 if (ret) {
444 printk(KERN_ERR
445 "Buffer not started:"
14555b14 446 "buffer preenable failed\n");
7026ea4b
JC
447 goto error_ret;
448 }
449 }
14555b14
JC
450 if (buffer->access->request_update) {
451 ret = buffer->access->request_update(buffer);
7026ea4b
JC
452 if (ret) {
453 printk(KERN_INFO
454 "Buffer not started:"
14555b14 455 "buffer parameter update failed\n");
7026ea4b
JC
456 goto error_ret;
457 }
458 }
14555b14
JC
459 if (buffer->access->mark_in_use)
460 buffer->access->mark_in_use(buffer);
7026ea4b 461 /* Definitely possible for devices to support both of these.*/
ec3afa40 462 if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
7026ea4b
JC
463 if (!dev_info->trig) {
464 printk(KERN_INFO
465 "Buffer not started: no trigger\n");
466 ret = -EINVAL;
14555b14
JC
467 if (buffer->access->unmark_in_use)
468 buffer->access->unmark_in_use(buffer);
7026ea4b
JC
469 goto error_ret;
470 }
ec3afa40
JC
471 dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
472 } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
473 dev_info->currentmode = INDIO_BUFFER_HARDWARE;
7026ea4b
JC
474 else { /* should never be reached */
475 ret = -EINVAL;
476 goto error_ret;
477 }
478
14555b14
JC
479 if (buffer->setup_ops->postenable) {
480 ret = buffer->setup_ops->postenable(dev_info);
7026ea4b
JC
481 if (ret) {
482 printk(KERN_INFO
483 "Buffer not started:"
484 "postenable failed\n");
14555b14
JC
485 if (buffer->access->unmark_in_use)
486 buffer->access->unmark_in_use(buffer);
7026ea4b 487 dev_info->currentmode = previous_mode;
14555b14
JC
488 if (buffer->setup_ops->postdisable)
489 buffer->setup_ops->
490 postdisable(dev_info);
7026ea4b
JC
491 goto error_ret;
492 }
493 }
494 } else {
14555b14
JC
495 if (buffer->setup_ops->predisable) {
496 ret = buffer->setup_ops->predisable(dev_info);
7026ea4b
JC
497 if (ret)
498 goto error_ret;
499 }
14555b14
JC
500 if (buffer->access->unmark_in_use)
501 buffer->access->unmark_in_use(buffer);
7026ea4b 502 dev_info->currentmode = INDIO_DIRECT_MODE;
14555b14
JC
503 if (buffer->setup_ops->postdisable) {
504 ret = buffer->setup_ops->postdisable(dev_info);
7026ea4b
JC
505 if (ret)
506 goto error_ret;
507 }
508 }
509done:
510 mutex_unlock(&dev_info->mlock);
511 return len;
512
513error_ret:
514 mutex_unlock(&dev_info->mlock);
515 return ret;
516}
14555b14 517EXPORT_SYMBOL(iio_buffer_store_enable);
8d213f24 518
14555b14
JC
519ssize_t iio_buffer_show_enable(struct device *dev,
520 struct device_attribute *attr,
521 char *buf)
7026ea4b 522{
1aa04278
JC
523 struct iio_dev *dev_info = dev_get_drvdata(dev);
524 return sprintf(buf, "%d\n", !!(dev_info->currentmode
ec3afa40 525 & INDIO_ALL_BUFFER_MODES));
7026ea4b 526}
14555b14 527EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 528
14555b14 529int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
5565a450 530{
14555b14 531 struct iio_buffer *buffer = indio_dev->buffer;
5565a450
JC
532 size_t size;
533 dev_dbg(&indio_dev->dev, "%s\n", __func__);
534 /* Check if there are any scan elements enabled, if not fail*/
14555b14 535 if (!(buffer->scan_count || buffer->scan_timestamp))
5565a450 536 return -EINVAL;
14555b14
JC
537 if (buffer->scan_timestamp)
538 if (buffer->scan_count)
5565a450 539 /* Timestamp (aligned to s64) and data */
14555b14 540 size = (((buffer->scan_count * buffer->bpe)
5565a450
JC
541 + sizeof(s64) - 1)
542 & ~(sizeof(s64) - 1))
543 + sizeof(s64);
544 else /* Timestamp only */
545 size = sizeof(s64);
546 else /* Data only */
14555b14
JC
547 size = buffer->scan_count * buffer->bpe;
548 buffer->access->set_bytes_per_datum(buffer, size);
5565a450
JC
549
550 return 0;
551}
14555b14 552EXPORT_SYMBOL(iio_sw_buffer_preenable);
32b5eeca
JC
553
554
555/* note NULL used as error indicator as it doesn't make sense. */
556static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
557 unsigned int masklength,
558 unsigned long *mask)
559{
560 if (bitmap_empty(mask, masklength))
561 return NULL;
562 while (*av_masks) {
563 if (bitmap_subset(mask, av_masks, masklength))
564 return av_masks;
565 av_masks += BITS_TO_LONGS(masklength);
566 }
567 return NULL;
568}
569
570/**
571 * iio_scan_mask_set() - set particular bit in the scan mask
14555b14 572 * @buffer: the buffer whose scan mask we are interested in
32b5eeca
JC
573 * @bit: the bit to be set.
574 **/
14555b14 575int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
32b5eeca 576{
14555b14 577 struct iio_dev *dev_info = buffer->indio_dev;
32b5eeca
JC
578 unsigned long *mask;
579 unsigned long *trialmask;
580
581 trialmask = kmalloc(sizeof(*trialmask)*
582 BITS_TO_LONGS(dev_info->masklength),
583 GFP_KERNEL);
584
585 if (trialmask == NULL)
586 return -ENOMEM;
587 if (!dev_info->masklength) {
14555b14 588 WARN_ON("trying to set scanmask prior to registering buffer\n");
32b5eeca
JC
589 kfree(trialmask);
590 return -EINVAL;
591 }
14555b14 592 bitmap_copy(trialmask, buffer->scan_mask, dev_info->masklength);
32b5eeca
JC
593 set_bit(bit, trialmask);
594
595 if (dev_info->available_scan_masks) {
596 mask = iio_scan_mask_match(dev_info->available_scan_masks,
597 dev_info->masklength,
598 trialmask);
599 if (!mask) {
600 kfree(trialmask);
601 return -EINVAL;
602 }
603 }
14555b14
JC
604 bitmap_copy(buffer->scan_mask, trialmask, dev_info->masklength);
605 buffer->scan_count++;
32b5eeca
JC
606
607 kfree(trialmask);
608
609 return 0;
610};
611EXPORT_SYMBOL_GPL(iio_scan_mask_set);
612
14555b14 613int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
32b5eeca 614{
14555b14 615 struct iio_dev *dev_info = buffer->indio_dev;
32b5eeca
JC
616 long *mask;
617
618 if (bit > dev_info->masklength)
619 return -EINVAL;
620
14555b14 621 if (!buffer->scan_mask)
32b5eeca
JC
622 return 0;
623 if (dev_info->available_scan_masks)
624 mask = iio_scan_mask_match(dev_info->available_scan_masks,
625 dev_info->masklength,
14555b14 626 buffer->scan_mask);
32b5eeca 627 else
14555b14 628 mask = buffer->scan_mask;
32b5eeca
JC
629 if (!mask)
630 return 0;
631
632 return test_bit(bit, mask);
633};
634EXPORT_SYMBOL_GPL(iio_scan_mask_query);