]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/iio/industrialio-buffer.c
Staging: iio/adc/ad7150: release lock on error
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/device.h>
7026ea4b 18#include <linux/fs.h>
7026ea4b 19#include <linux/cdev.h>
5a0e3ad6 20#include <linux/slab.h>
a7348347 21#include <linux/poll.h>
7026ea4b
JC
22
23#include "iio.h"
df9c1c42 24#include "iio_core.h"
9dd1cb30 25#include "sysfs.h"
3811cd62 26#include "buffer_generic.h"
7026ea4b 27
8310b86c
JC
28static const char * const iio_endian_prefix[] = {
29 [IIO_BE] = "be",
30 [IIO_LE] = "le",
31};
7026ea4b
JC
32
33/**
14555b14 34 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 35 *
14555b14
JC
36 * This function relies on all buffer implementations having an
37 * iio_buffer as their first element.
7026ea4b 38 **/
14555b14
JC
39ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
40 size_t n, loff_t *f_ps)
7026ea4b 41{
1aa04278 42 struct iio_dev *indio_dev = filp->private_data;
14555b14 43 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 44
5565a450 45 if (!rb->access->read_first_n)
7026ea4b 46 return -EINVAL;
8d213f24 47 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
48}
49
a7348347 50/**
14555b14 51 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 52 */
14555b14
JC
53unsigned int iio_buffer_poll(struct file *filp,
54 struct poll_table_struct *wait)
a7348347 55{
1aa04278 56 struct iio_dev *indio_dev = filp->private_data;
14555b14 57 struct iio_buffer *rb = indio_dev->buffer;
a7348347
JC
58
59 poll_wait(filp, &rb->pollq, wait);
60 if (rb->stufftoread)
61 return POLLIN | POLLRDNORM;
62 /* need a way of knowing if there may be enough data... */
8d213f24 63 return 0;
a7348347
JC
64}
65
30eb82f0 66int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
7026ea4b 67{
14555b14 68 struct iio_buffer *rb = indio_dev->buffer;
30eb82f0
JC
69 if (!rb)
70 return -EINVAL;
71 if (rb->access->mark_in_use)
1aa04278 72 rb->access->mark_in_use(rb);
30eb82f0 73 return 0;
7026ea4b 74}
7026ea4b 75
14555b14 76void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
7026ea4b 77{
14555b14 78 struct iio_buffer *rb = indio_dev->buffer;
758d988c 79
1aa04278
JC
80 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
81 if (rb->access->unmark_in_use)
82 rb->access->unmark_in_use(rb);
7026ea4b
JC
83}
84
14555b14 85void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *dev_info)
7026ea4b 86{
14555b14
JC
87 buffer->indio_dev = dev_info;
88 init_waitqueue_head(&buffer->pollq);
7026ea4b 89}
14555b14 90EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 91
1d892719 92static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
93 struct device_attribute *attr,
94 char *buf)
1d892719 95{
8d213f24 96 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
97}
98
99static ssize_t iio_show_fixed_type(struct device *dev,
100 struct device_attribute *attr,
101 char *buf)
102{
103 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
104 u8 type = this_attr->c->scan_type.endianness;
105
106 if (type == IIO_CPU) {
9d5d1153
JC
107#ifdef __LITTLE_ENDIAN
108 type = IIO_LE;
109#else
110 type = IIO_BE;
111#endif
8310b86c
JC
112 }
113 return sprintf(buf, "%s:%c%d/%d>>%u\n",
114 iio_endian_prefix[type],
1d892719
JC
115 this_attr->c->scan_type.sign,
116 this_attr->c->scan_type.realbits,
117 this_attr->c->scan_type.storagebits,
118 this_attr->c->scan_type.shift);
119}
120
8d213f24
JC
121static ssize_t iio_scan_el_show(struct device *dev,
122 struct device_attribute *attr,
123 char *buf)
124{
125 int ret;
1aa04278 126 struct iio_dev *dev_info = dev_get_drvdata(dev);
8d213f24 127
14555b14 128 ret = iio_scan_mask_query(dev_info->buffer,
1aa04278 129 to_iio_dev_attr(attr)->address);
8d213f24
JC
130 if (ret < 0)
131 return ret;
132 return sprintf(buf, "%d\n", ret);
133}
134
14555b14 135static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 136{
14555b14
JC
137 clear_bit(bit, buffer->scan_mask);
138 buffer->scan_count--;
8d213f24
JC
139 return 0;
140}
141
142static ssize_t iio_scan_el_store(struct device *dev,
143 struct device_attribute *attr,
144 const char *buf,
145 size_t len)
146{
147 int ret = 0;
148 bool state;
1aa04278 149 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 150 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
151 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
152
153 state = !(buf[0] == '0');
154 mutex_lock(&indio_dev->mlock);
ec3afa40 155 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
8d213f24
JC
156 ret = -EBUSY;
157 goto error_ret;
158 }
14555b14 159 ret = iio_scan_mask_query(buffer, this_attr->address);
8d213f24
JC
160 if (ret < 0)
161 goto error_ret;
162 if (!state && ret) {
14555b14 163 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
164 if (ret)
165 goto error_ret;
166 } else if (state && !ret) {
14555b14 167 ret = iio_scan_mask_set(buffer, this_attr->address);
8d213f24
JC
168 if (ret)
169 goto error_ret;
170 }
171
172error_ret:
173 mutex_unlock(&indio_dev->mlock);
174
175 return ret ? ret : len;
176
177}
178
179static ssize_t iio_scan_el_ts_show(struct device *dev,
180 struct device_attribute *attr,
181 char *buf)
182{
1aa04278 183 struct iio_dev *dev_info = dev_get_drvdata(dev);
14555b14 184 return sprintf(buf, "%d\n", dev_info->buffer->scan_timestamp);
8d213f24
JC
185}
186
187static ssize_t iio_scan_el_ts_store(struct device *dev,
188 struct device_attribute *attr,
189 const char *buf,
190 size_t len)
191{
192 int ret = 0;
1aa04278 193 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 194 bool state;
1aa04278 195
8d213f24
JC
196 state = !(buf[0] == '0');
197 mutex_lock(&indio_dev->mlock);
ec3afa40 198 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
8d213f24
JC
199 ret = -EBUSY;
200 goto error_ret;
201 }
14555b14 202 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
203error_ret:
204 mutex_unlock(&indio_dev->mlock);
205
206 return ret ? ret : len;
207}
208
14555b14
JC
209static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
210 const struct iio_chan_spec *chan)
1d892719 211{
26d25ae3 212 int ret, attrcount = 0;
14555b14 213 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 214
26d25ae3 215 ret = __iio_add_chan_devattr("index",
1d892719
JC
216 chan,
217 &iio_show_scan_index,
218 NULL,
219 0,
220 0,
1aa04278 221 &indio_dev->dev,
14555b14 222 &buffer->scan_el_dev_attr_list);
1d892719
JC
223 if (ret)
224 goto error_ret;
26d25ae3
JC
225 attrcount++;
226 ret = __iio_add_chan_devattr("type",
1d892719
JC
227 chan,
228 &iio_show_fixed_type,
229 NULL,
230 0,
231 0,
1aa04278 232 &indio_dev->dev,
14555b14 233 &buffer->scan_el_dev_attr_list);
1d892719
JC
234 if (ret)
235 goto error_ret;
26d25ae3 236 attrcount++;
a88b3ebc 237 if (chan->type != IIO_TIMESTAMP)
26d25ae3 238 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
239 chan,
240 &iio_scan_el_show,
241 &iio_scan_el_store,
242 chan->scan_index,
243 0,
1aa04278 244 &indio_dev->dev,
14555b14 245 &buffer->scan_el_dev_attr_list);
a88b3ebc 246 else
26d25ae3 247 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
248 chan,
249 &iio_scan_el_ts_show,
250 &iio_scan_el_ts_store,
251 chan->scan_index,
252 0,
1aa04278 253 &indio_dev->dev,
14555b14 254 &buffer->scan_el_dev_attr_list);
26d25ae3
JC
255 attrcount++;
256 ret = attrcount;
1d892719
JC
257error_ret:
258 return ret;
259}
260
14555b14
JC
261static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
262 struct iio_dev_attr *p)
1d892719 263{
1d892719
JC
264 kfree(p->dev_attr.attr.name);
265 kfree(p);
266}
267
14555b14 268static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
269{
270 struct iio_dev_attr *p, *n;
14555b14 271 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3 272
1d892719 273 list_for_each_entry_safe(p, n,
14555b14
JC
274 &buffer->scan_el_dev_attr_list, l)
275 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
276}
277
26d25ae3
JC
278static const char * const iio_scan_elements_group_name = "scan_elements";
279
14555b14
JC
280int iio_buffer_register(struct iio_dev *indio_dev,
281 const struct iio_chan_spec *channels,
282 int num_channels)
1d892719 283{
26d25ae3
JC
284 struct iio_dev_attr *p;
285 struct attribute **attr;
14555b14 286 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
287 int ret, i, attrn, attrcount, attrcount_orig = 0;
288
14555b14
JC
289 if (buffer->attrs)
290 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 291
14555b14
JC
292 if (buffer->scan_el_attrs != NULL) {
293 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
294 while (*attr++ != NULL)
295 attrcount_orig++;
296 }
297 attrcount = attrcount_orig;
14555b14 298 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
299 if (channels) {
300 /* new magic */
301 for (i = 0; i < num_channels; i++) {
32b5eeca
JC
302 /* Establish necessary mask length */
303 if (channels[i].scan_index >
304 (int)indio_dev->masklength - 1)
305 indio_dev->masklength
306 = indio_dev->channels[i].scan_index + 1;
307
14555b14 308 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 309 &channels[i]);
1d892719 310 if (ret < 0)
26d25ae3
JC
311 goto error_cleanup_dynamic;
312 attrcount += ret;
1d892719 313 }
14555b14
JC
314 if (indio_dev->masklength && buffer->scan_mask == NULL) {
315 buffer->scan_mask
316 = kzalloc(sizeof(*buffer->scan_mask)*
32b5eeca
JC
317 BITS_TO_LONGS(indio_dev->masklength),
318 GFP_KERNEL);
14555b14 319 if (buffer->scan_mask == NULL) {
32b5eeca 320 ret = -ENOMEM;
26d25ae3 321 goto error_cleanup_dynamic;
32b5eeca
JC
322 }
323 }
1d892719
JC
324 }
325
14555b14 326 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 327
14555b14
JC
328 buffer->scan_el_group.attrs
329 = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
330 (attrcount + 1),
26d25ae3 331 GFP_KERNEL);
14555b14 332 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
333 ret = -ENOMEM;
334 goto error_free_scan_mask;
335 }
14555b14
JC
336 if (buffer->scan_el_attrs)
337 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
338 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
339 attrn = attrcount_orig;
340
14555b14
JC
341 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
342 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
343 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 344
1d892719 345 return 0;
26d25ae3
JC
346
347error_free_scan_mask:
14555b14 348 kfree(buffer->scan_mask);
1d892719 349error_cleanup_dynamic:
14555b14 350 __iio_buffer_attr_cleanup(indio_dev);
26d25ae3 351
7026ea4b
JC
352 return ret;
353}
14555b14 354EXPORT_SYMBOL(iio_buffer_register);
1d892719 355
14555b14 356void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 357{
14555b14
JC
358 kfree(indio_dev->buffer->scan_mask);
359 kfree(indio_dev->buffer->scan_el_group.attrs);
360 __iio_buffer_attr_cleanup(indio_dev);
7026ea4b 361}
14555b14 362EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 363
14555b14
JC
364ssize_t iio_buffer_read_length(struct device *dev,
365 struct device_attribute *attr,
366 char *buf)
7026ea4b 367{
1aa04278 368 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 369 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 370
14555b14 371 if (buffer->access->get_length)
8d213f24 372 return sprintf(buf, "%d\n",
14555b14 373 buffer->access->get_length(buffer));
7026ea4b 374
8d213f24 375 return 0;
7026ea4b 376}
14555b14 377EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 378
14555b14
JC
379ssize_t iio_buffer_write_length(struct device *dev,
380 struct device_attribute *attr,
381 const char *buf,
382 size_t len)
7026ea4b
JC
383{
384 int ret;
385 ulong val;
1aa04278 386 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 387 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24 388
7026ea4b
JC
389 ret = strict_strtoul(buf, 10, &val);
390 if (ret)
391 return ret;
392
14555b14
JC
393 if (buffer->access->get_length)
394 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
395 return len;
396
14555b14
JC
397 if (buffer->access->set_length) {
398 buffer->access->set_length(buffer, val);
399 if (buffer->access->mark_param_change)
400 buffer->access->mark_param_change(buffer);
7026ea4b
JC
401 }
402
403 return len;
404}
14555b14 405EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 406
14555b14
JC
407ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
408 struct device_attribute *attr,
409 char *buf)
7026ea4b 410{
1aa04278 411 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 412 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 413
14555b14 414 if (buffer->access->get_bytes_per_datum)
8d213f24 415 return sprintf(buf, "%d\n",
14555b14 416 buffer->access->get_bytes_per_datum(buffer));
7026ea4b 417
8d213f24 418 return 0;
7026ea4b 419}
14555b14 420EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
7026ea4b 421
14555b14
JC
422ssize_t iio_buffer_store_enable(struct device *dev,
423 struct device_attribute *attr,
424 const char *buf,
425 size_t len)
7026ea4b
JC
426{
427 int ret;
428 bool requested_state, current_state;
429 int previous_mode;
1aa04278 430 struct iio_dev *dev_info = dev_get_drvdata(dev);
14555b14 431 struct iio_buffer *buffer = dev_info->buffer;
7026ea4b
JC
432
433 mutex_lock(&dev_info->mlock);
434 previous_mode = dev_info->currentmode;
435 requested_state = !(buf[0] == '0');
ec3afa40 436 current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
7026ea4b 437 if (current_state == requested_state) {
14555b14 438 printk(KERN_INFO "iio-buffer, current state requested again\n");
7026ea4b
JC
439 goto done;
440 }
441 if (requested_state) {
14555b14
JC
442 if (buffer->setup_ops->preenable) {
443 ret = buffer->setup_ops->preenable(dev_info);
7026ea4b
JC
444 if (ret) {
445 printk(KERN_ERR
446 "Buffer not started:"
14555b14 447 "buffer preenable failed\n");
7026ea4b
JC
448 goto error_ret;
449 }
450 }
14555b14
JC
451 if (buffer->access->request_update) {
452 ret = buffer->access->request_update(buffer);
7026ea4b
JC
453 if (ret) {
454 printk(KERN_INFO
455 "Buffer not started:"
14555b14 456 "buffer parameter update failed\n");
7026ea4b
JC
457 goto error_ret;
458 }
459 }
14555b14
JC
460 if (buffer->access->mark_in_use)
461 buffer->access->mark_in_use(buffer);
7026ea4b 462 /* Definitely possible for devices to support both of these.*/
ec3afa40 463 if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
7026ea4b
JC
464 if (!dev_info->trig) {
465 printk(KERN_INFO
466 "Buffer not started: no trigger\n");
467 ret = -EINVAL;
14555b14
JC
468 if (buffer->access->unmark_in_use)
469 buffer->access->unmark_in_use(buffer);
7026ea4b
JC
470 goto error_ret;
471 }
ec3afa40
JC
472 dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
473 } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
474 dev_info->currentmode = INDIO_BUFFER_HARDWARE;
7026ea4b
JC
475 else { /* should never be reached */
476 ret = -EINVAL;
477 goto error_ret;
478 }
479
14555b14
JC
480 if (buffer->setup_ops->postenable) {
481 ret = buffer->setup_ops->postenable(dev_info);
7026ea4b
JC
482 if (ret) {
483 printk(KERN_INFO
484 "Buffer not started:"
485 "postenable failed\n");
14555b14
JC
486 if (buffer->access->unmark_in_use)
487 buffer->access->unmark_in_use(buffer);
7026ea4b 488 dev_info->currentmode = previous_mode;
14555b14
JC
489 if (buffer->setup_ops->postdisable)
490 buffer->setup_ops->
491 postdisable(dev_info);
7026ea4b
JC
492 goto error_ret;
493 }
494 }
495 } else {
14555b14
JC
496 if (buffer->setup_ops->predisable) {
497 ret = buffer->setup_ops->predisable(dev_info);
7026ea4b
JC
498 if (ret)
499 goto error_ret;
500 }
14555b14
JC
501 if (buffer->access->unmark_in_use)
502 buffer->access->unmark_in_use(buffer);
7026ea4b 503 dev_info->currentmode = INDIO_DIRECT_MODE;
14555b14
JC
504 if (buffer->setup_ops->postdisable) {
505 ret = buffer->setup_ops->postdisable(dev_info);
7026ea4b
JC
506 if (ret)
507 goto error_ret;
508 }
509 }
510done:
511 mutex_unlock(&dev_info->mlock);
512 return len;
513
514error_ret:
515 mutex_unlock(&dev_info->mlock);
516 return ret;
517}
14555b14 518EXPORT_SYMBOL(iio_buffer_store_enable);
8d213f24 519
14555b14
JC
520ssize_t iio_buffer_show_enable(struct device *dev,
521 struct device_attribute *attr,
522 char *buf)
7026ea4b 523{
1aa04278
JC
524 struct iio_dev *dev_info = dev_get_drvdata(dev);
525 return sprintf(buf, "%d\n", !!(dev_info->currentmode
ec3afa40 526 & INDIO_ALL_BUFFER_MODES));
7026ea4b 527}
14555b14 528EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 529
14555b14 530int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
5565a450 531{
14555b14 532 struct iio_buffer *buffer = indio_dev->buffer;
5565a450
JC
533 size_t size;
534 dev_dbg(&indio_dev->dev, "%s\n", __func__);
535 /* Check if there are any scan elements enabled, if not fail*/
14555b14 536 if (!(buffer->scan_count || buffer->scan_timestamp))
5565a450 537 return -EINVAL;
14555b14
JC
538 if (buffer->scan_timestamp)
539 if (buffer->scan_count)
5565a450 540 /* Timestamp (aligned to s64) and data */
14555b14 541 size = (((buffer->scan_count * buffer->bpe)
5565a450
JC
542 + sizeof(s64) - 1)
543 & ~(sizeof(s64) - 1))
544 + sizeof(s64);
545 else /* Timestamp only */
546 size = sizeof(s64);
547 else /* Data only */
14555b14
JC
548 size = buffer->scan_count * buffer->bpe;
549 buffer->access->set_bytes_per_datum(buffer, size);
5565a450
JC
550
551 return 0;
552}
14555b14 553EXPORT_SYMBOL(iio_sw_buffer_preenable);
32b5eeca
JC
554
555
556/* note NULL used as error indicator as it doesn't make sense. */
557static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
558 unsigned int masklength,
559 unsigned long *mask)
560{
561 if (bitmap_empty(mask, masklength))
562 return NULL;
563 while (*av_masks) {
564 if (bitmap_subset(mask, av_masks, masklength))
565 return av_masks;
566 av_masks += BITS_TO_LONGS(masklength);
567 }
568 return NULL;
569}
570
571/**
572 * iio_scan_mask_set() - set particular bit in the scan mask
14555b14 573 * @buffer: the buffer whose scan mask we are interested in
32b5eeca
JC
574 * @bit: the bit to be set.
575 **/
14555b14 576int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
32b5eeca 577{
14555b14 578 struct iio_dev *dev_info = buffer->indio_dev;
32b5eeca
JC
579 unsigned long *mask;
580 unsigned long *trialmask;
581
582 trialmask = kmalloc(sizeof(*trialmask)*
583 BITS_TO_LONGS(dev_info->masklength),
584 GFP_KERNEL);
585
586 if (trialmask == NULL)
587 return -ENOMEM;
588 if (!dev_info->masklength) {
14555b14 589 WARN_ON("trying to set scanmask prior to registering buffer\n");
32b5eeca
JC
590 kfree(trialmask);
591 return -EINVAL;
592 }
14555b14 593 bitmap_copy(trialmask, buffer->scan_mask, dev_info->masklength);
32b5eeca
JC
594 set_bit(bit, trialmask);
595
596 if (dev_info->available_scan_masks) {
597 mask = iio_scan_mask_match(dev_info->available_scan_masks,
598 dev_info->masklength,
599 trialmask);
600 if (!mask) {
601 kfree(trialmask);
602 return -EINVAL;
603 }
604 }
14555b14
JC
605 bitmap_copy(buffer->scan_mask, trialmask, dev_info->masklength);
606 buffer->scan_count++;
32b5eeca
JC
607
608 kfree(trialmask);
609
610 return 0;
611};
612EXPORT_SYMBOL_GPL(iio_scan_mask_set);
613
14555b14 614int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
32b5eeca 615{
14555b14 616 struct iio_dev *dev_info = buffer->indio_dev;
32b5eeca
JC
617 long *mask;
618
619 if (bit > dev_info->masklength)
620 return -EINVAL;
621
14555b14 622 if (!buffer->scan_mask)
32b5eeca
JC
623 return 0;
624 if (dev_info->available_scan_masks)
625 mask = iio_scan_mask_match(dev_info->available_scan_masks,
626 dev_info->masklength,
14555b14 627 buffer->scan_mask);
32b5eeca 628 else
14555b14 629 mask = buffer->scan_mask;
32b5eeca
JC
630 if (!mask)
631 return 0;
632
633 return test_bit(bit, mask);
634};
635EXPORT_SYMBOL_GPL(iio_scan_mask_query);