]>
Commit | Line | Data |
---|---|---|
1 | /* The industrial I/O core | |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * Handling of buffer allocation / resizing. | |
10 | * | |
11 | * | |
12 | * Things to look at here. | |
13 | * - Better memory allocation techniques? | |
14 | * - Alternative access techniques? | |
15 | */ | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/export.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/cdev.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/poll.h> | |
23 | #include <linux/sched.h> | |
24 | ||
25 | #include <linux/iio/iio.h> | |
26 | #include "iio_core.h" | |
27 | #include <linux/iio/sysfs.h> | |
28 | #include <linux/iio/buffer.h> | |
29 | ||
30 | static const char * const iio_endian_prefix[] = { | |
31 | [IIO_BE] = "be", | |
32 | [IIO_LE] = "le", | |
33 | }; | |
34 | ||
35 | static bool iio_buffer_is_active(struct iio_buffer *buf) | |
36 | { | |
37 | return !list_empty(&buf->buffer_list); | |
38 | } | |
39 | ||
40 | static size_t iio_buffer_data_available(struct iio_buffer *buf) | |
41 | { | |
42 | return buf->access->data_available(buf); | |
43 | } | |
44 | ||
45 | static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, | |
46 | struct iio_buffer *buf, size_t required) | |
47 | { | |
48 | if (!indio_dev->info->hwfifo_flush_to_buffer) | |
49 | return -ENODEV; | |
50 | ||
51 | return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); | |
52 | } | |
53 | ||
54 | static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, | |
55 | size_t to_wait, int to_flush) | |
56 | { | |
57 | size_t avail; | |
58 | int flushed = 0; | |
59 | ||
60 | /* wakeup if the device was unregistered */ | |
61 | if (!indio_dev->info) | |
62 | return true; | |
63 | ||
64 | /* drain the buffer if it was disabled */ | |
65 | if (!iio_buffer_is_active(buf)) { | |
66 | to_wait = min_t(size_t, to_wait, 1); | |
67 | to_flush = 0; | |
68 | } | |
69 | ||
70 | avail = iio_buffer_data_available(buf); | |
71 | ||
72 | if (avail >= to_wait) { | |
73 | /* force a flush for non-blocking reads */ | |
74 | if (!to_wait && avail < to_flush) | |
75 | iio_buffer_flush_hwfifo(indio_dev, buf, | |
76 | to_flush - avail); | |
77 | return true; | |
78 | } | |
79 | ||
80 | if (to_flush) | |
81 | flushed = iio_buffer_flush_hwfifo(indio_dev, buf, | |
82 | to_wait - avail); | |
83 | if (flushed <= 0) | |
84 | return false; | |
85 | ||
86 | if (avail + flushed >= to_wait) | |
87 | return true; | |
88 | ||
89 | return false; | |
90 | } | |
91 | ||
92 | /** | |
93 | * iio_buffer_read_first_n_outer() - chrdev read for buffer access | |
94 | * @filp: File structure pointer for the char device | |
95 | * @buf: Destination buffer for iio buffer read | |
96 | * @n: First n bytes to read | |
97 | * @f_ps: Long offset provided by the user as a seek position | |
98 | * | |
99 | * This function relies on all buffer implementations having an | |
100 | * iio_buffer as their first element. | |
101 | * | |
102 | * Return: negative values corresponding to error codes or ret != 0 | |
103 | * for ending the reading activity | |
104 | **/ | |
105 | ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, | |
106 | size_t n, loff_t *f_ps) | |
107 | { | |
108 | struct iio_dev *indio_dev = filp->private_data; | |
109 | struct iio_buffer *rb = indio_dev->buffer; | |
110 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
111 | size_t datum_size; | |
112 | size_t to_wait; | |
113 | int ret = 0; | |
114 | ||
115 | if (!indio_dev->info) | |
116 | return -ENODEV; | |
117 | ||
118 | if (!rb || !rb->access->read_first_n) | |
119 | return -EINVAL; | |
120 | ||
121 | datum_size = rb->bytes_per_datum; | |
122 | ||
123 | /* | |
124 | * If datum_size is 0 there will never be anything to read from the | |
125 | * buffer, so signal end of file now. | |
126 | */ | |
127 | if (!datum_size) | |
128 | return 0; | |
129 | ||
130 | if (filp->f_flags & O_NONBLOCK) | |
131 | to_wait = 0; | |
132 | else | |
133 | to_wait = min_t(size_t, n / datum_size, rb->watermark); | |
134 | ||
135 | add_wait_queue(&rb->pollq, &wait); | |
136 | do { | |
137 | if (!indio_dev->info) { | |
138 | ret = -ENODEV; | |
139 | break; | |
140 | } | |
141 | ||
142 | if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { | |
143 | if (signal_pending(current)) { | |
144 | ret = -ERESTARTSYS; | |
145 | break; | |
146 | } | |
147 | ||
148 | wait_woken(&wait, TASK_INTERRUPTIBLE, | |
149 | MAX_SCHEDULE_TIMEOUT); | |
150 | continue; | |
151 | } | |
152 | ||
153 | ret = rb->access->read_first_n(rb, n, buf); | |
154 | if (ret == 0 && (filp->f_flags & O_NONBLOCK)) | |
155 | ret = -EAGAIN; | |
156 | } while (ret == 0); | |
157 | remove_wait_queue(&rb->pollq, &wait); | |
158 | ||
159 | return ret; | |
160 | } | |
161 | ||
162 | /** | |
163 | * iio_buffer_poll() - poll the buffer to find out if it has data | |
164 | * @filp: File structure pointer for device access | |
165 | * @wait: Poll table structure pointer for which the driver adds | |
166 | * a wait queue | |
167 | * | |
168 | * Return: (POLLIN | POLLRDNORM) if data is available for reading | |
169 | * or 0 for other cases | |
170 | */ | |
171 | unsigned int iio_buffer_poll(struct file *filp, | |
172 | struct poll_table_struct *wait) | |
173 | { | |
174 | struct iio_dev *indio_dev = filp->private_data; | |
175 | struct iio_buffer *rb = indio_dev->buffer; | |
176 | ||
177 | if (!indio_dev->info) | |
178 | return 0; | |
179 | ||
180 | poll_wait(filp, &rb->pollq, wait); | |
181 | if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) | |
182 | return POLLIN | POLLRDNORM; | |
183 | return 0; | |
184 | } | |
185 | ||
186 | /** | |
187 | * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue | |
188 | * @indio_dev: The IIO device | |
189 | * | |
190 | * Wakes up the event waitqueue used for poll(). Should usually | |
191 | * be called when the device is unregistered. | |
192 | */ | |
193 | void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) | |
194 | { | |
195 | if (!indio_dev->buffer) | |
196 | return; | |
197 | ||
198 | wake_up(&indio_dev->buffer->pollq); | |
199 | } | |
200 | ||
201 | void iio_buffer_init(struct iio_buffer *buffer) | |
202 | { | |
203 | INIT_LIST_HEAD(&buffer->demux_list); | |
204 | INIT_LIST_HEAD(&buffer->buffer_list); | |
205 | init_waitqueue_head(&buffer->pollq); | |
206 | kref_init(&buffer->ref); | |
207 | if (!buffer->watermark) | |
208 | buffer->watermark = 1; | |
209 | } | |
210 | EXPORT_SYMBOL(iio_buffer_init); | |
211 | ||
212 | static ssize_t iio_show_scan_index(struct device *dev, | |
213 | struct device_attribute *attr, | |
214 | char *buf) | |
215 | { | |
216 | return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); | |
217 | } | |
218 | ||
219 | static ssize_t iio_show_fixed_type(struct device *dev, | |
220 | struct device_attribute *attr, | |
221 | char *buf) | |
222 | { | |
223 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
224 | u8 type = this_attr->c->scan_type.endianness; | |
225 | ||
226 | if (type == IIO_CPU) { | |
227 | #ifdef __LITTLE_ENDIAN | |
228 | type = IIO_LE; | |
229 | #else | |
230 | type = IIO_BE; | |
231 | #endif | |
232 | } | |
233 | if (this_attr->c->scan_type.repeat > 1) | |
234 | return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", | |
235 | iio_endian_prefix[type], | |
236 | this_attr->c->scan_type.sign, | |
237 | this_attr->c->scan_type.realbits, | |
238 | this_attr->c->scan_type.storagebits, | |
239 | this_attr->c->scan_type.repeat, | |
240 | this_attr->c->scan_type.shift); | |
241 | else | |
242 | return sprintf(buf, "%s:%c%d/%d>>%u\n", | |
243 | iio_endian_prefix[type], | |
244 | this_attr->c->scan_type.sign, | |
245 | this_attr->c->scan_type.realbits, | |
246 | this_attr->c->scan_type.storagebits, | |
247 | this_attr->c->scan_type.shift); | |
248 | } | |
249 | ||
250 | static ssize_t iio_scan_el_show(struct device *dev, | |
251 | struct device_attribute *attr, | |
252 | char *buf) | |
253 | { | |
254 | int ret; | |
255 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
256 | ||
257 | /* Ensure ret is 0 or 1. */ | |
258 | ret = !!test_bit(to_iio_dev_attr(attr)->address, | |
259 | indio_dev->buffer->scan_mask); | |
260 | ||
261 | return sprintf(buf, "%d\n", ret); | |
262 | } | |
263 | ||
264 | /* Note NULL used as error indicator as it doesn't make sense. */ | |
265 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, | |
266 | unsigned int masklength, | |
267 | const unsigned long *mask, | |
268 | bool strict) | |
269 | { | |
270 | if (bitmap_empty(mask, masklength)) | |
271 | return NULL; | |
272 | while (*av_masks) { | |
273 | if (strict) { | |
274 | if (bitmap_equal(mask, av_masks, masklength)) | |
275 | return av_masks; | |
276 | } else { | |
277 | if (bitmap_subset(mask, av_masks, masklength)) | |
278 | return av_masks; | |
279 | } | |
280 | av_masks += BITS_TO_LONGS(masklength); | |
281 | } | |
282 | return NULL; | |
283 | } | |
284 | ||
285 | static bool iio_validate_scan_mask(struct iio_dev *indio_dev, | |
286 | const unsigned long *mask) | |
287 | { | |
288 | if (!indio_dev->setup_ops->validate_scan_mask) | |
289 | return true; | |
290 | ||
291 | return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); | |
292 | } | |
293 | ||
294 | /** | |
295 | * iio_scan_mask_set() - set particular bit in the scan mask | |
296 | * @indio_dev: the iio device | |
297 | * @buffer: the buffer whose scan mask we are interested in | |
298 | * @bit: the bit to be set. | |
299 | * | |
300 | * Note that at this point we have no way of knowing what other | |
301 | * buffers might request, hence this code only verifies that the | |
302 | * individual buffers request is plausible. | |
303 | */ | |
304 | static int iio_scan_mask_set(struct iio_dev *indio_dev, | |
305 | struct iio_buffer *buffer, int bit) | |
306 | { | |
307 | const unsigned long *mask; | |
308 | unsigned long *trialmask; | |
309 | ||
310 | trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength), | |
311 | sizeof(*trialmask), | |
312 | GFP_KERNEL); | |
313 | if (trialmask == NULL) | |
314 | return -ENOMEM; | |
315 | if (!indio_dev->masklength) { | |
316 | WARN(1, "Trying to set scanmask prior to registering buffer\n"); | |
317 | goto err_invalid_mask; | |
318 | } | |
319 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); | |
320 | set_bit(bit, trialmask); | |
321 | ||
322 | if (!iio_validate_scan_mask(indio_dev, trialmask)) | |
323 | goto err_invalid_mask; | |
324 | ||
325 | if (indio_dev->available_scan_masks) { | |
326 | mask = iio_scan_mask_match(indio_dev->available_scan_masks, | |
327 | indio_dev->masklength, | |
328 | trialmask, false); | |
329 | if (!mask) | |
330 | goto err_invalid_mask; | |
331 | } | |
332 | bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); | |
333 | ||
334 | kfree(trialmask); | |
335 | ||
336 | return 0; | |
337 | ||
338 | err_invalid_mask: | |
339 | kfree(trialmask); | |
340 | return -EINVAL; | |
341 | } | |
342 | ||
343 | static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) | |
344 | { | |
345 | clear_bit(bit, buffer->scan_mask); | |
346 | return 0; | |
347 | } | |
348 | ||
349 | static ssize_t iio_scan_el_store(struct device *dev, | |
350 | struct device_attribute *attr, | |
351 | const char *buf, | |
352 | size_t len) | |
353 | { | |
354 | int ret; | |
355 | bool state; | |
356 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
357 | struct iio_buffer *buffer = indio_dev->buffer; | |
358 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
359 | ||
360 | ret = strtobool(buf, &state); | |
361 | if (ret < 0) | |
362 | return ret; | |
363 | mutex_lock(&indio_dev->mlock); | |
364 | if (iio_buffer_is_active(indio_dev->buffer)) { | |
365 | ret = -EBUSY; | |
366 | goto error_ret; | |
367 | } | |
368 | ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); | |
369 | if (ret < 0) | |
370 | goto error_ret; | |
371 | if (!state && ret) { | |
372 | ret = iio_scan_mask_clear(buffer, this_attr->address); | |
373 | if (ret) | |
374 | goto error_ret; | |
375 | } else if (state && !ret) { | |
376 | ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); | |
377 | if (ret) | |
378 | goto error_ret; | |
379 | } | |
380 | ||
381 | error_ret: | |
382 | mutex_unlock(&indio_dev->mlock); | |
383 | ||
384 | return ret < 0 ? ret : len; | |
385 | ||
386 | } | |
387 | ||
388 | static ssize_t iio_scan_el_ts_show(struct device *dev, | |
389 | struct device_attribute *attr, | |
390 | char *buf) | |
391 | { | |
392 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
393 | return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); | |
394 | } | |
395 | ||
396 | static ssize_t iio_scan_el_ts_store(struct device *dev, | |
397 | struct device_attribute *attr, | |
398 | const char *buf, | |
399 | size_t len) | |
400 | { | |
401 | int ret; | |
402 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
403 | bool state; | |
404 | ||
405 | ret = strtobool(buf, &state); | |
406 | if (ret < 0) | |
407 | return ret; | |
408 | ||
409 | mutex_lock(&indio_dev->mlock); | |
410 | if (iio_buffer_is_active(indio_dev->buffer)) { | |
411 | ret = -EBUSY; | |
412 | goto error_ret; | |
413 | } | |
414 | indio_dev->buffer->scan_timestamp = state; | |
415 | error_ret: | |
416 | mutex_unlock(&indio_dev->mlock); | |
417 | ||
418 | return ret ? ret : len; | |
419 | } | |
420 | ||
421 | static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, | |
422 | const struct iio_chan_spec *chan) | |
423 | { | |
424 | int ret, attrcount = 0; | |
425 | struct iio_buffer *buffer = indio_dev->buffer; | |
426 | ||
427 | ret = __iio_add_chan_devattr("index", | |
428 | chan, | |
429 | &iio_show_scan_index, | |
430 | NULL, | |
431 | 0, | |
432 | IIO_SEPARATE, | |
433 | &indio_dev->dev, | |
434 | &buffer->scan_el_dev_attr_list); | |
435 | if (ret) | |
436 | return ret; | |
437 | attrcount++; | |
438 | ret = __iio_add_chan_devattr("type", | |
439 | chan, | |
440 | &iio_show_fixed_type, | |
441 | NULL, | |
442 | 0, | |
443 | 0, | |
444 | &indio_dev->dev, | |
445 | &buffer->scan_el_dev_attr_list); | |
446 | if (ret) | |
447 | return ret; | |
448 | attrcount++; | |
449 | if (chan->type != IIO_TIMESTAMP) | |
450 | ret = __iio_add_chan_devattr("en", | |
451 | chan, | |
452 | &iio_scan_el_show, | |
453 | &iio_scan_el_store, | |
454 | chan->scan_index, | |
455 | 0, | |
456 | &indio_dev->dev, | |
457 | &buffer->scan_el_dev_attr_list); | |
458 | else | |
459 | ret = __iio_add_chan_devattr("en", | |
460 | chan, | |
461 | &iio_scan_el_ts_show, | |
462 | &iio_scan_el_ts_store, | |
463 | chan->scan_index, | |
464 | 0, | |
465 | &indio_dev->dev, | |
466 | &buffer->scan_el_dev_attr_list); | |
467 | if (ret) | |
468 | return ret; | |
469 | attrcount++; | |
470 | ret = attrcount; | |
471 | return ret; | |
472 | } | |
473 | ||
474 | static ssize_t iio_buffer_read_length(struct device *dev, | |
475 | struct device_attribute *attr, | |
476 | char *buf) | |
477 | { | |
478 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
479 | struct iio_buffer *buffer = indio_dev->buffer; | |
480 | ||
481 | return sprintf(buf, "%d\n", buffer->length); | |
482 | } | |
483 | ||
484 | static ssize_t iio_buffer_write_length(struct device *dev, | |
485 | struct device_attribute *attr, | |
486 | const char *buf, size_t len) | |
487 | { | |
488 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
489 | struct iio_buffer *buffer = indio_dev->buffer; | |
490 | unsigned int val; | |
491 | int ret; | |
492 | ||
493 | ret = kstrtouint(buf, 10, &val); | |
494 | if (ret) | |
495 | return ret; | |
496 | ||
497 | if (val == buffer->length) | |
498 | return len; | |
499 | ||
500 | mutex_lock(&indio_dev->mlock); | |
501 | if (iio_buffer_is_active(indio_dev->buffer)) { | |
502 | ret = -EBUSY; | |
503 | } else { | |
504 | buffer->access->set_length(buffer, val); | |
505 | ret = 0; | |
506 | } | |
507 | if (ret) | |
508 | goto out; | |
509 | if (buffer->length && buffer->length < buffer->watermark) | |
510 | buffer->watermark = buffer->length; | |
511 | out: | |
512 | mutex_unlock(&indio_dev->mlock); | |
513 | ||
514 | return ret ? ret : len; | |
515 | } | |
516 | ||
517 | static ssize_t iio_buffer_show_enable(struct device *dev, | |
518 | struct device_attribute *attr, | |
519 | char *buf) | |
520 | { | |
521 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
522 | return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); | |
523 | } | |
524 | ||
525 | static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev, | |
526 | unsigned int scan_index) | |
527 | { | |
528 | const struct iio_chan_spec *ch; | |
529 | unsigned int bytes; | |
530 | ||
531 | ch = iio_find_channel_from_si(indio_dev, scan_index); | |
532 | bytes = ch->scan_type.storagebits / 8; | |
533 | if (ch->scan_type.repeat > 1) | |
534 | bytes *= ch->scan_type.repeat; | |
535 | return bytes; | |
536 | } | |
537 | ||
538 | static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev) | |
539 | { | |
540 | return iio_storage_bytes_for_si(indio_dev, | |
541 | indio_dev->scan_index_timestamp); | |
542 | } | |
543 | ||
544 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, | |
545 | const unsigned long *mask, bool timestamp) | |
546 | { | |
547 | unsigned bytes = 0; | |
548 | int length, i; | |
549 | ||
550 | /* How much space will the demuxed element take? */ | |
551 | for_each_set_bit(i, mask, | |
552 | indio_dev->masklength) { | |
553 | length = iio_storage_bytes_for_si(indio_dev, i); | |
554 | bytes = ALIGN(bytes, length); | |
555 | bytes += length; | |
556 | } | |
557 | ||
558 | if (timestamp) { | |
559 | length = iio_storage_bytes_for_timestamp(indio_dev); | |
560 | bytes = ALIGN(bytes, length); | |
561 | bytes += length; | |
562 | } | |
563 | return bytes; | |
564 | } | |
565 | ||
566 | static void iio_buffer_activate(struct iio_dev *indio_dev, | |
567 | struct iio_buffer *buffer) | |
568 | { | |
569 | iio_buffer_get(buffer); | |
570 | list_add(&buffer->buffer_list, &indio_dev->buffer_list); | |
571 | } | |
572 | ||
573 | static void iio_buffer_deactivate(struct iio_buffer *buffer) | |
574 | { | |
575 | list_del_init(&buffer->buffer_list); | |
576 | wake_up_interruptible(&buffer->pollq); | |
577 | iio_buffer_put(buffer); | |
578 | } | |
579 | ||
580 | static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) | |
581 | { | |
582 | struct iio_buffer *buffer, *_buffer; | |
583 | ||
584 | list_for_each_entry_safe(buffer, _buffer, | |
585 | &indio_dev->buffer_list, buffer_list) | |
586 | iio_buffer_deactivate(buffer); | |
587 | } | |
588 | ||
589 | static int iio_buffer_enable(struct iio_buffer *buffer, | |
590 | struct iio_dev *indio_dev) | |
591 | { | |
592 | if (!buffer->access->enable) | |
593 | return 0; | |
594 | return buffer->access->enable(buffer, indio_dev); | |
595 | } | |
596 | ||
597 | static int iio_buffer_disable(struct iio_buffer *buffer, | |
598 | struct iio_dev *indio_dev) | |
599 | { | |
600 | if (!buffer->access->disable) | |
601 | return 0; | |
602 | return buffer->access->disable(buffer, indio_dev); | |
603 | } | |
604 | ||
605 | static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, | |
606 | struct iio_buffer *buffer) | |
607 | { | |
608 | unsigned int bytes; | |
609 | ||
610 | if (!buffer->access->set_bytes_per_datum) | |
611 | return; | |
612 | ||
613 | bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, | |
614 | buffer->scan_timestamp); | |
615 | ||
616 | buffer->access->set_bytes_per_datum(buffer, bytes); | |
617 | } | |
618 | ||
619 | static int iio_buffer_request_update(struct iio_dev *indio_dev, | |
620 | struct iio_buffer *buffer) | |
621 | { | |
622 | int ret; | |
623 | ||
624 | iio_buffer_update_bytes_per_datum(indio_dev, buffer); | |
625 | if (buffer->access->request_update) { | |
626 | ret = buffer->access->request_update(buffer); | |
627 | if (ret) { | |
628 | dev_dbg(&indio_dev->dev, | |
629 | "Buffer not started: buffer parameter update failed (%d)\n", | |
630 | ret); | |
631 | return ret; | |
632 | } | |
633 | } | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
638 | static void iio_free_scan_mask(struct iio_dev *indio_dev, | |
639 | const unsigned long *mask) | |
640 | { | |
641 | /* If the mask is dynamically allocated free it, otherwise do nothing */ | |
642 | if (!indio_dev->available_scan_masks) | |
643 | kfree(mask); | |
644 | } | |
645 | ||
646 | struct iio_device_config { | |
647 | unsigned int mode; | |
648 | unsigned int watermark; | |
649 | const unsigned long *scan_mask; | |
650 | unsigned int scan_bytes; | |
651 | bool scan_timestamp; | |
652 | }; | |
653 | ||
654 | static int iio_verify_update(struct iio_dev *indio_dev, | |
655 | struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer, | |
656 | struct iio_device_config *config) | |
657 | { | |
658 | unsigned long *compound_mask; | |
659 | const unsigned long *scan_mask; | |
660 | bool strict_scanmask = false; | |
661 | struct iio_buffer *buffer; | |
662 | bool scan_timestamp; | |
663 | unsigned int modes; | |
664 | ||
665 | memset(config, 0, sizeof(*config)); | |
666 | config->watermark = ~0; | |
667 | ||
668 | /* | |
669 | * If there is just one buffer and we are removing it there is nothing | |
670 | * to verify. | |
671 | */ | |
672 | if (remove_buffer && !insert_buffer && | |
673 | list_is_singular(&indio_dev->buffer_list)) | |
674 | return 0; | |
675 | ||
676 | modes = indio_dev->modes; | |
677 | ||
678 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
679 | if (buffer == remove_buffer) | |
680 | continue; | |
681 | modes &= buffer->access->modes; | |
682 | config->watermark = min(config->watermark, buffer->watermark); | |
683 | } | |
684 | ||
685 | if (insert_buffer) { | |
686 | modes &= insert_buffer->access->modes; | |
687 | config->watermark = min(config->watermark, | |
688 | insert_buffer->watermark); | |
689 | } | |
690 | ||
691 | /* Definitely possible for devices to support both of these. */ | |
692 | if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { | |
693 | config->mode = INDIO_BUFFER_TRIGGERED; | |
694 | } else if (modes & INDIO_BUFFER_HARDWARE) { | |
695 | /* | |
696 | * Keep things simple for now and only allow a single buffer to | |
697 | * be connected in hardware mode. | |
698 | */ | |
699 | if (insert_buffer && !list_empty(&indio_dev->buffer_list)) | |
700 | return -EINVAL; | |
701 | config->mode = INDIO_BUFFER_HARDWARE; | |
702 | strict_scanmask = true; | |
703 | } else if (modes & INDIO_BUFFER_SOFTWARE) { | |
704 | config->mode = INDIO_BUFFER_SOFTWARE; | |
705 | } else { | |
706 | /* Can only occur on first buffer */ | |
707 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) | |
708 | dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); | |
709 | return -EINVAL; | |
710 | } | |
711 | ||
712 | /* What scan mask do we actually have? */ | |
713 | compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), | |
714 | sizeof(long), GFP_KERNEL); | |
715 | if (compound_mask == NULL) | |
716 | return -ENOMEM; | |
717 | ||
718 | scan_timestamp = false; | |
719 | ||
720 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
721 | if (buffer == remove_buffer) | |
722 | continue; | |
723 | bitmap_or(compound_mask, compound_mask, buffer->scan_mask, | |
724 | indio_dev->masklength); | |
725 | scan_timestamp |= buffer->scan_timestamp; | |
726 | } | |
727 | ||
728 | if (insert_buffer) { | |
729 | bitmap_or(compound_mask, compound_mask, | |
730 | insert_buffer->scan_mask, indio_dev->masklength); | |
731 | scan_timestamp |= insert_buffer->scan_timestamp; | |
732 | } | |
733 | ||
734 | if (indio_dev->available_scan_masks) { | |
735 | scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, | |
736 | indio_dev->masklength, | |
737 | compound_mask, | |
738 | strict_scanmask); | |
739 | kfree(compound_mask); | |
740 | if (scan_mask == NULL) | |
741 | return -EINVAL; | |
742 | } else { | |
743 | scan_mask = compound_mask; | |
744 | } | |
745 | ||
746 | config->scan_bytes = iio_compute_scan_bytes(indio_dev, | |
747 | scan_mask, scan_timestamp); | |
748 | config->scan_mask = scan_mask; | |
749 | config->scan_timestamp = scan_timestamp; | |
750 | ||
751 | return 0; | |
752 | } | |
753 | ||
754 | static int iio_enable_buffers(struct iio_dev *indio_dev, | |
755 | struct iio_device_config *config) | |
756 | { | |
757 | struct iio_buffer *buffer; | |
758 | int ret; | |
759 | ||
760 | indio_dev->active_scan_mask = config->scan_mask; | |
761 | indio_dev->scan_timestamp = config->scan_timestamp; | |
762 | indio_dev->scan_bytes = config->scan_bytes; | |
763 | ||
764 | iio_update_demux(indio_dev); | |
765 | ||
766 | /* Wind up again */ | |
767 | if (indio_dev->setup_ops->preenable) { | |
768 | ret = indio_dev->setup_ops->preenable(indio_dev); | |
769 | if (ret) { | |
770 | dev_dbg(&indio_dev->dev, | |
771 | "Buffer not started: buffer preenable failed (%d)\n", ret); | |
772 | goto err_undo_config; | |
773 | } | |
774 | } | |
775 | ||
776 | if (indio_dev->info->update_scan_mode) { | |
777 | ret = indio_dev->info | |
778 | ->update_scan_mode(indio_dev, | |
779 | indio_dev->active_scan_mask); | |
780 | if (ret < 0) { | |
781 | dev_dbg(&indio_dev->dev, | |
782 | "Buffer not started: update scan mode failed (%d)\n", | |
783 | ret); | |
784 | goto err_run_postdisable; | |
785 | } | |
786 | } | |
787 | ||
788 | if (indio_dev->info->hwfifo_set_watermark) | |
789 | indio_dev->info->hwfifo_set_watermark(indio_dev, | |
790 | config->watermark); | |
791 | ||
792 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
793 | ret = iio_buffer_enable(buffer, indio_dev); | |
794 | if (ret) | |
795 | goto err_disable_buffers; | |
796 | } | |
797 | ||
798 | indio_dev->currentmode = config->mode; | |
799 | ||
800 | if (indio_dev->setup_ops->postenable) { | |
801 | ret = indio_dev->setup_ops->postenable(indio_dev); | |
802 | if (ret) { | |
803 | dev_dbg(&indio_dev->dev, | |
804 | "Buffer not started: postenable failed (%d)\n", ret); | |
805 | goto err_disable_buffers; | |
806 | } | |
807 | } | |
808 | ||
809 | return 0; | |
810 | ||
811 | err_disable_buffers: | |
812 | list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list, | |
813 | buffer_list) | |
814 | iio_buffer_disable(buffer, indio_dev); | |
815 | err_run_postdisable: | |
816 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
817 | if (indio_dev->setup_ops->postdisable) | |
818 | indio_dev->setup_ops->postdisable(indio_dev); | |
819 | err_undo_config: | |
820 | indio_dev->active_scan_mask = NULL; | |
821 | ||
822 | return ret; | |
823 | } | |
824 | ||
825 | static int iio_disable_buffers(struct iio_dev *indio_dev) | |
826 | { | |
827 | struct iio_buffer *buffer; | |
828 | int ret = 0; | |
829 | int ret2; | |
830 | ||
831 | /* Wind down existing buffers - iff there are any */ | |
832 | if (list_empty(&indio_dev->buffer_list)) | |
833 | return 0; | |
834 | ||
835 | /* | |
836 | * If things go wrong at some step in disable we still need to continue | |
837 | * to perform the other steps, otherwise we leave the device in a | |
838 | * inconsistent state. We return the error code for the first error we | |
839 | * encountered. | |
840 | */ | |
841 | ||
842 | if (indio_dev->setup_ops->predisable) { | |
843 | ret2 = indio_dev->setup_ops->predisable(indio_dev); | |
844 | if (ret2 && !ret) | |
845 | ret = ret2; | |
846 | } | |
847 | ||
848 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
849 | ret2 = iio_buffer_disable(buffer, indio_dev); | |
850 | if (ret2 && !ret) | |
851 | ret = ret2; | |
852 | } | |
853 | ||
854 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
855 | ||
856 | if (indio_dev->setup_ops->postdisable) { | |
857 | ret2 = indio_dev->setup_ops->postdisable(indio_dev); | |
858 | if (ret2 && !ret) | |
859 | ret = ret2; | |
860 | } | |
861 | ||
862 | iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); | |
863 | indio_dev->active_scan_mask = NULL; | |
864 | ||
865 | return ret; | |
866 | } | |
867 | ||
868 | static int __iio_update_buffers(struct iio_dev *indio_dev, | |
869 | struct iio_buffer *insert_buffer, | |
870 | struct iio_buffer *remove_buffer) | |
871 | { | |
872 | struct iio_device_config new_config; | |
873 | int ret; | |
874 | ||
875 | ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer, | |
876 | &new_config); | |
877 | if (ret) | |
878 | return ret; | |
879 | ||
880 | if (insert_buffer) { | |
881 | ret = iio_buffer_request_update(indio_dev, insert_buffer); | |
882 | if (ret) | |
883 | goto err_free_config; | |
884 | } | |
885 | ||
886 | ret = iio_disable_buffers(indio_dev); | |
887 | if (ret) | |
888 | goto err_deactivate_all; | |
889 | ||
890 | if (remove_buffer) | |
891 | iio_buffer_deactivate(remove_buffer); | |
892 | if (insert_buffer) | |
893 | iio_buffer_activate(indio_dev, insert_buffer); | |
894 | ||
895 | /* If no buffers in list, we are done */ | |
896 | if (list_empty(&indio_dev->buffer_list)) | |
897 | return 0; | |
898 | ||
899 | ret = iio_enable_buffers(indio_dev, &new_config); | |
900 | if (ret) | |
901 | goto err_deactivate_all; | |
902 | ||
903 | return 0; | |
904 | ||
905 | err_deactivate_all: | |
906 | /* | |
907 | * We've already verified that the config is valid earlier. If things go | |
908 | * wrong in either enable or disable the most likely reason is an IO | |
909 | * error from the device. In this case there is no good recovery | |
910 | * strategy. Just make sure to disable everything and leave the device | |
911 | * in a sane state. With a bit of luck the device might come back to | |
912 | * life again later and userspace can try again. | |
913 | */ | |
914 | iio_buffer_deactivate_all(indio_dev); | |
915 | ||
916 | err_free_config: | |
917 | iio_free_scan_mask(indio_dev, new_config.scan_mask); | |
918 | return ret; | |
919 | } | |
920 | ||
921 | int iio_update_buffers(struct iio_dev *indio_dev, | |
922 | struct iio_buffer *insert_buffer, | |
923 | struct iio_buffer *remove_buffer) | |
924 | { | |
925 | int ret; | |
926 | ||
927 | if (insert_buffer == remove_buffer) | |
928 | return 0; | |
929 | ||
930 | mutex_lock(&indio_dev->info_exist_lock); | |
931 | mutex_lock(&indio_dev->mlock); | |
932 | ||
933 | if (insert_buffer && iio_buffer_is_active(insert_buffer)) | |
934 | insert_buffer = NULL; | |
935 | ||
936 | if (remove_buffer && !iio_buffer_is_active(remove_buffer)) | |
937 | remove_buffer = NULL; | |
938 | ||
939 | if (!insert_buffer && !remove_buffer) { | |
940 | ret = 0; | |
941 | goto out_unlock; | |
942 | } | |
943 | ||
944 | if (indio_dev->info == NULL) { | |
945 | ret = -ENODEV; | |
946 | goto out_unlock; | |
947 | } | |
948 | ||
949 | ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); | |
950 | ||
951 | out_unlock: | |
952 | mutex_unlock(&indio_dev->mlock); | |
953 | mutex_unlock(&indio_dev->info_exist_lock); | |
954 | ||
955 | return ret; | |
956 | } | |
957 | EXPORT_SYMBOL_GPL(iio_update_buffers); | |
958 | ||
959 | void iio_disable_all_buffers(struct iio_dev *indio_dev) | |
960 | { | |
961 | iio_disable_buffers(indio_dev); | |
962 | iio_buffer_deactivate_all(indio_dev); | |
963 | } | |
964 | ||
965 | static ssize_t iio_buffer_store_enable(struct device *dev, | |
966 | struct device_attribute *attr, | |
967 | const char *buf, | |
968 | size_t len) | |
969 | { | |
970 | int ret; | |
971 | bool requested_state; | |
972 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
973 | bool inlist; | |
974 | ||
975 | ret = strtobool(buf, &requested_state); | |
976 | if (ret < 0) | |
977 | return ret; | |
978 | ||
979 | mutex_lock(&indio_dev->mlock); | |
980 | ||
981 | /* Find out if it is in the list */ | |
982 | inlist = iio_buffer_is_active(indio_dev->buffer); | |
983 | /* Already in desired state */ | |
984 | if (inlist == requested_state) | |
985 | goto done; | |
986 | ||
987 | if (requested_state) | |
988 | ret = __iio_update_buffers(indio_dev, | |
989 | indio_dev->buffer, NULL); | |
990 | else | |
991 | ret = __iio_update_buffers(indio_dev, | |
992 | NULL, indio_dev->buffer); | |
993 | ||
994 | done: | |
995 | mutex_unlock(&indio_dev->mlock); | |
996 | return (ret < 0) ? ret : len; | |
997 | } | |
998 | ||
999 | static const char * const iio_scan_elements_group_name = "scan_elements"; | |
1000 | ||
1001 | static ssize_t iio_buffer_show_watermark(struct device *dev, | |
1002 | struct device_attribute *attr, | |
1003 | char *buf) | |
1004 | { | |
1005 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
1006 | struct iio_buffer *buffer = indio_dev->buffer; | |
1007 | ||
1008 | return sprintf(buf, "%u\n", buffer->watermark); | |
1009 | } | |
1010 | ||
1011 | static ssize_t iio_buffer_store_watermark(struct device *dev, | |
1012 | struct device_attribute *attr, | |
1013 | const char *buf, | |
1014 | size_t len) | |
1015 | { | |
1016 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
1017 | struct iio_buffer *buffer = indio_dev->buffer; | |
1018 | unsigned int val; | |
1019 | int ret; | |
1020 | ||
1021 | ret = kstrtouint(buf, 10, &val); | |
1022 | if (ret) | |
1023 | return ret; | |
1024 | if (!val) | |
1025 | return -EINVAL; | |
1026 | ||
1027 | mutex_lock(&indio_dev->mlock); | |
1028 | ||
1029 | if (val > buffer->length) { | |
1030 | ret = -EINVAL; | |
1031 | goto out; | |
1032 | } | |
1033 | ||
1034 | if (iio_buffer_is_active(indio_dev->buffer)) { | |
1035 | ret = -EBUSY; | |
1036 | goto out; | |
1037 | } | |
1038 | ||
1039 | buffer->watermark = val; | |
1040 | out: | |
1041 | mutex_unlock(&indio_dev->mlock); | |
1042 | ||
1043 | return ret ? ret : len; | |
1044 | } | |
1045 | ||
1046 | static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, | |
1047 | iio_buffer_write_length); | |
1048 | static struct device_attribute dev_attr_length_ro = __ATTR(length, | |
1049 | S_IRUGO, iio_buffer_read_length, NULL); | |
1050 | static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, | |
1051 | iio_buffer_show_enable, iio_buffer_store_enable); | |
1052 | static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, | |
1053 | iio_buffer_show_watermark, iio_buffer_store_watermark); | |
1054 | static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark, | |
1055 | S_IRUGO, iio_buffer_show_watermark, NULL); | |
1056 | ||
1057 | static struct attribute *iio_buffer_attrs[] = { | |
1058 | &dev_attr_length.attr, | |
1059 | &dev_attr_enable.attr, | |
1060 | &dev_attr_watermark.attr, | |
1061 | }; | |
1062 | ||
1063 | int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) | |
1064 | { | |
1065 | struct iio_dev_attr *p; | |
1066 | struct attribute **attr; | |
1067 | struct iio_buffer *buffer = indio_dev->buffer; | |
1068 | int ret, i, attrn, attrcount, attrcount_orig = 0; | |
1069 | const struct iio_chan_spec *channels; | |
1070 | ||
1071 | channels = indio_dev->channels; | |
1072 | if (channels) { | |
1073 | int ml = indio_dev->masklength; | |
1074 | ||
1075 | for (i = 0; i < indio_dev->num_channels; i++) | |
1076 | ml = max(ml, channels[i].scan_index + 1); | |
1077 | indio_dev->masklength = ml; | |
1078 | } | |
1079 | ||
1080 | if (!buffer) | |
1081 | return 0; | |
1082 | ||
1083 | attrcount = 0; | |
1084 | if (buffer->attrs) { | |
1085 | while (buffer->attrs[attrcount] != NULL) | |
1086 | attrcount++; | |
1087 | } | |
1088 | ||
1089 | attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1, | |
1090 | sizeof(struct attribute *), GFP_KERNEL); | |
1091 | if (!attr) | |
1092 | return -ENOMEM; | |
1093 | ||
1094 | memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); | |
1095 | if (!buffer->access->set_length) | |
1096 | attr[0] = &dev_attr_length_ro.attr; | |
1097 | ||
1098 | if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) | |
1099 | attr[2] = &dev_attr_watermark_ro.attr; | |
1100 | ||
1101 | if (buffer->attrs) | |
1102 | memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, | |
1103 | sizeof(struct attribute *) * attrcount); | |
1104 | ||
1105 | attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL; | |
1106 | ||
1107 | buffer->buffer_group.name = "buffer"; | |
1108 | buffer->buffer_group.attrs = attr; | |
1109 | ||
1110 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; | |
1111 | ||
1112 | if (buffer->scan_el_attrs != NULL) { | |
1113 | attr = buffer->scan_el_attrs->attrs; | |
1114 | while (*attr++ != NULL) | |
1115 | attrcount_orig++; | |
1116 | } | |
1117 | attrcount = attrcount_orig; | |
1118 | INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); | |
1119 | channels = indio_dev->channels; | |
1120 | if (channels) { | |
1121 | /* new magic */ | |
1122 | for (i = 0; i < indio_dev->num_channels; i++) { | |
1123 | if (channels[i].scan_index < 0) | |
1124 | continue; | |
1125 | ||
1126 | ret = iio_buffer_add_channel_sysfs(indio_dev, | |
1127 | &channels[i]); | |
1128 | if (ret < 0) | |
1129 | goto error_cleanup_dynamic; | |
1130 | attrcount += ret; | |
1131 | if (channels[i].type == IIO_TIMESTAMP) | |
1132 | indio_dev->scan_index_timestamp = | |
1133 | channels[i].scan_index; | |
1134 | } | |
1135 | if (indio_dev->masklength && buffer->scan_mask == NULL) { | |
1136 | buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), | |
1137 | sizeof(*buffer->scan_mask), | |
1138 | GFP_KERNEL); | |
1139 | if (buffer->scan_mask == NULL) { | |
1140 | ret = -ENOMEM; | |
1141 | goto error_cleanup_dynamic; | |
1142 | } | |
1143 | } | |
1144 | } | |
1145 | ||
1146 | buffer->scan_el_group.name = iio_scan_elements_group_name; | |
1147 | ||
1148 | buffer->scan_el_group.attrs = kcalloc(attrcount + 1, | |
1149 | sizeof(buffer->scan_el_group.attrs[0]), | |
1150 | GFP_KERNEL); | |
1151 | if (buffer->scan_el_group.attrs == NULL) { | |
1152 | ret = -ENOMEM; | |
1153 | goto error_free_scan_mask; | |
1154 | } | |
1155 | if (buffer->scan_el_attrs) | |
1156 | memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, | |
1157 | sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); | |
1158 | attrn = attrcount_orig; | |
1159 | ||
1160 | list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) | |
1161 | buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; | |
1162 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; | |
1163 | ||
1164 | return 0; | |
1165 | ||
1166 | error_free_scan_mask: | |
1167 | kfree(buffer->scan_mask); | |
1168 | error_cleanup_dynamic: | |
1169 | iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); | |
1170 | kfree(indio_dev->buffer->buffer_group.attrs); | |
1171 | ||
1172 | return ret; | |
1173 | } | |
1174 | ||
1175 | void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) | |
1176 | { | |
1177 | if (!indio_dev->buffer) | |
1178 | return; | |
1179 | ||
1180 | kfree(indio_dev->buffer->scan_mask); | |
1181 | kfree(indio_dev->buffer->buffer_group.attrs); | |
1182 | kfree(indio_dev->buffer->scan_el_group.attrs); | |
1183 | iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); | |
1184 | } | |
1185 | ||
1186 | /** | |
1187 | * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected | |
1188 | * @indio_dev: the iio device | |
1189 | * @mask: scan mask to be checked | |
1190 | * | |
1191 | * Return true if exactly one bit is set in the scan mask, false otherwise. It | |
1192 | * can be used for devices where only one channel can be active for sampling at | |
1193 | * a time. | |
1194 | */ | |
1195 | bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, | |
1196 | const unsigned long *mask) | |
1197 | { | |
1198 | return bitmap_weight(mask, indio_dev->masklength) == 1; | |
1199 | } | |
1200 | EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); | |
1201 | ||
1202 | int iio_scan_mask_query(struct iio_dev *indio_dev, | |
1203 | struct iio_buffer *buffer, int bit) | |
1204 | { | |
1205 | if (bit > indio_dev->masklength) | |
1206 | return -EINVAL; | |
1207 | ||
1208 | if (!buffer->scan_mask) | |
1209 | return 0; | |
1210 | ||
1211 | /* Ensure return value is 0 or 1. */ | |
1212 | return !!test_bit(bit, buffer->scan_mask); | |
1213 | }; | |
1214 | EXPORT_SYMBOL_GPL(iio_scan_mask_query); | |
1215 | ||
1216 | /** | |
1217 | * struct iio_demux_table - table describing demux memcpy ops | |
1218 | * @from: index to copy from | |
1219 | * @to: index to copy to | |
1220 | * @length: how many bytes to copy | |
1221 | * @l: list head used for management | |
1222 | */ | |
1223 | struct iio_demux_table { | |
1224 | unsigned from; | |
1225 | unsigned to; | |
1226 | unsigned length; | |
1227 | struct list_head l; | |
1228 | }; | |
1229 | ||
1230 | static const void *iio_demux(struct iio_buffer *buffer, | |
1231 | const void *datain) | |
1232 | { | |
1233 | struct iio_demux_table *t; | |
1234 | ||
1235 | if (list_empty(&buffer->demux_list)) | |
1236 | return datain; | |
1237 | list_for_each_entry(t, &buffer->demux_list, l) | |
1238 | memcpy(buffer->demux_bounce + t->to, | |
1239 | datain + t->from, t->length); | |
1240 | ||
1241 | return buffer->demux_bounce; | |
1242 | } | |
1243 | ||
1244 | static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) | |
1245 | { | |
1246 | const void *dataout = iio_demux(buffer, data); | |
1247 | int ret; | |
1248 | ||
1249 | ret = buffer->access->store_to(buffer, dataout); | |
1250 | if (ret) | |
1251 | return ret; | |
1252 | ||
1253 | /* | |
1254 | * We can't just test for watermark to decide if we wake the poll queue | |
1255 | * because read may request less samples than the watermark. | |
1256 | */ | |
1257 | wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM); | |
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | static void iio_buffer_demux_free(struct iio_buffer *buffer) | |
1262 | { | |
1263 | struct iio_demux_table *p, *q; | |
1264 | list_for_each_entry_safe(p, q, &buffer->demux_list, l) { | |
1265 | list_del(&p->l); | |
1266 | kfree(p); | |
1267 | } | |
1268 | } | |
1269 | ||
1270 | ||
1271 | int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) | |
1272 | { | |
1273 | int ret; | |
1274 | struct iio_buffer *buf; | |
1275 | ||
1276 | list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { | |
1277 | ret = iio_push_to_buffer(buf, data); | |
1278 | if (ret < 0) | |
1279 | return ret; | |
1280 | } | |
1281 | ||
1282 | return 0; | |
1283 | } | |
1284 | EXPORT_SYMBOL_GPL(iio_push_to_buffers); | |
1285 | ||
1286 | static int iio_buffer_add_demux(struct iio_buffer *buffer, | |
1287 | struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc, | |
1288 | unsigned int length) | |
1289 | { | |
1290 | ||
1291 | if (*p && (*p)->from + (*p)->length == in_loc && | |
1292 | (*p)->to + (*p)->length == out_loc) { | |
1293 | (*p)->length += length; | |
1294 | } else { | |
1295 | *p = kmalloc(sizeof(**p), GFP_KERNEL); | |
1296 | if (*p == NULL) | |
1297 | return -ENOMEM; | |
1298 | (*p)->from = in_loc; | |
1299 | (*p)->to = out_loc; | |
1300 | (*p)->length = length; | |
1301 | list_add_tail(&(*p)->l, &buffer->demux_list); | |
1302 | } | |
1303 | ||
1304 | return 0; | |
1305 | } | |
1306 | ||
1307 | static int iio_buffer_update_demux(struct iio_dev *indio_dev, | |
1308 | struct iio_buffer *buffer) | |
1309 | { | |
1310 | int ret, in_ind = -1, out_ind, length; | |
1311 | unsigned in_loc = 0, out_loc = 0; | |
1312 | struct iio_demux_table *p = NULL; | |
1313 | ||
1314 | /* Clear out any old demux */ | |
1315 | iio_buffer_demux_free(buffer); | |
1316 | kfree(buffer->demux_bounce); | |
1317 | buffer->demux_bounce = NULL; | |
1318 | ||
1319 | /* First work out which scan mode we will actually have */ | |
1320 | if (bitmap_equal(indio_dev->active_scan_mask, | |
1321 | buffer->scan_mask, | |
1322 | indio_dev->masklength)) | |
1323 | return 0; | |
1324 | ||
1325 | /* Now we have the two masks, work from least sig and build up sizes */ | |
1326 | for_each_set_bit(out_ind, | |
1327 | buffer->scan_mask, | |
1328 | indio_dev->masklength) { | |
1329 | in_ind = find_next_bit(indio_dev->active_scan_mask, | |
1330 | indio_dev->masklength, | |
1331 | in_ind + 1); | |
1332 | while (in_ind != out_ind) { | |
1333 | in_ind = find_next_bit(indio_dev->active_scan_mask, | |
1334 | indio_dev->masklength, | |
1335 | in_ind + 1); | |
1336 | length = iio_storage_bytes_for_si(indio_dev, in_ind); | |
1337 | /* Make sure we are aligned */ | |
1338 | in_loc = roundup(in_loc, length) + length; | |
1339 | } | |
1340 | length = iio_storage_bytes_for_si(indio_dev, in_ind); | |
1341 | out_loc = roundup(out_loc, length); | |
1342 | in_loc = roundup(in_loc, length); | |
1343 | ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); | |
1344 | if (ret) | |
1345 | goto error_clear_mux_table; | |
1346 | out_loc += length; | |
1347 | in_loc += length; | |
1348 | } | |
1349 | /* Relies on scan_timestamp being last */ | |
1350 | if (buffer->scan_timestamp) { | |
1351 | length = iio_storage_bytes_for_timestamp(indio_dev); | |
1352 | out_loc = roundup(out_loc, length); | |
1353 | in_loc = roundup(in_loc, length); | |
1354 | ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); | |
1355 | if (ret) | |
1356 | goto error_clear_mux_table; | |
1357 | out_loc += length; | |
1358 | in_loc += length; | |
1359 | } | |
1360 | buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); | |
1361 | if (buffer->demux_bounce == NULL) { | |
1362 | ret = -ENOMEM; | |
1363 | goto error_clear_mux_table; | |
1364 | } | |
1365 | return 0; | |
1366 | ||
1367 | error_clear_mux_table: | |
1368 | iio_buffer_demux_free(buffer); | |
1369 | ||
1370 | return ret; | |
1371 | } | |
1372 | ||
1373 | int iio_update_demux(struct iio_dev *indio_dev) | |
1374 | { | |
1375 | struct iio_buffer *buffer; | |
1376 | int ret; | |
1377 | ||
1378 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
1379 | ret = iio_buffer_update_demux(indio_dev, buffer); | |
1380 | if (ret < 0) | |
1381 | goto error_clear_mux_table; | |
1382 | } | |
1383 | return 0; | |
1384 | ||
1385 | error_clear_mux_table: | |
1386 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) | |
1387 | iio_buffer_demux_free(buffer); | |
1388 | ||
1389 | return ret; | |
1390 | } | |
1391 | EXPORT_SYMBOL_GPL(iio_update_demux); | |
1392 | ||
1393 | /** | |
1394 | * iio_buffer_release() - Free a buffer's resources | |
1395 | * @ref: Pointer to the kref embedded in the iio_buffer struct | |
1396 | * | |
1397 | * This function is called when the last reference to the buffer has been | |
1398 | * dropped. It will typically free all resources allocated by the buffer. Do not | |
1399 | * call this function manually, always use iio_buffer_put() when done using a | |
1400 | * buffer. | |
1401 | */ | |
1402 | static void iio_buffer_release(struct kref *ref) | |
1403 | { | |
1404 | struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); | |
1405 | ||
1406 | buffer->access->release(buffer); | |
1407 | } | |
1408 | ||
1409 | /** | |
1410 | * iio_buffer_get() - Grab a reference to the buffer | |
1411 | * @buffer: The buffer to grab a reference for, may be NULL | |
1412 | * | |
1413 | * Returns the pointer to the buffer that was passed into the function. | |
1414 | */ | |
1415 | struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) | |
1416 | { | |
1417 | if (buffer) | |
1418 | kref_get(&buffer->ref); | |
1419 | ||
1420 | return buffer; | |
1421 | } | |
1422 | EXPORT_SYMBOL_GPL(iio_buffer_get); | |
1423 | ||
1424 | /** | |
1425 | * iio_buffer_put() - Release the reference to the buffer | |
1426 | * @buffer: The buffer to release the reference for, may be NULL | |
1427 | */ | |
1428 | void iio_buffer_put(struct iio_buffer *buffer) | |
1429 | { | |
1430 | if (buffer) | |
1431 | kref_put(&buffer->ref, iio_buffer_release); | |
1432 | } | |
1433 | EXPORT_SYMBOL_GPL(iio_buffer_put); |