]>
Commit | Line | Data |
---|---|---|
0a769a95 LPC |
1 | /* Industrial I/O event handling |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * Based on elements of hwmon and input subsystems. | |
10 | */ | |
11 | ||
12 | #include <linux/anon_inodes.h> | |
13 | #include <linux/device.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/kernel.h> | |
2c00193f | 16 | #include <linux/kfifo.h> |
0a769a95 LPC |
17 | #include <linux/module.h> |
18 | #include <linux/sched.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/uaccess.h> | |
21 | #include <linux/wait.h> | |
22 | #include "iio.h" | |
23 | #include "iio_core.h" | |
24 | #include "sysfs.h" | |
25 | #include "events.h" | |
26 | ||
0a769a95 LPC |
27 | /** |
28 | * struct iio_event_interface - chrdev interface for an event line | |
29 | * @wait: wait queue to allow blocking reads of events | |
30 | * @event_list_lock: mutex to protect the list of detected events | |
31 | * @det_events: list of detected events | |
0a769a95 LPC |
32 | * @dev_attr_list: list of event interface sysfs attribute |
33 | * @flags: file operations related flags including busy flag. | |
34 | * @group: event interface sysfs attribute group | |
35 | */ | |
36 | struct iio_event_interface { | |
37 | wait_queue_head_t wait; | |
2c00193f LPC |
38 | DECLARE_KFIFO(det_events, struct iio_event_data, 16); |
39 | ||
0a769a95 LPC |
40 | struct list_head dev_attr_list; |
41 | unsigned long flags; | |
42 | struct attribute_group group; | |
43 | }; | |
44 | ||
45 | int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) | |
46 | { | |
47 | struct iio_event_interface *ev_int = indio_dev->event_interface; | |
2c00193f LPC |
48 | struct iio_event_data ev; |
49 | int copied; | |
0a769a95 LPC |
50 | |
51 | /* Does anyone care? */ | |
43ba1100 | 52 | spin_lock(&ev_int->wait.lock); |
0a769a95 | 53 | if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
0a769a95 | 54 | |
2c00193f LPC |
55 | ev.id = ev_code; |
56 | ev.timestamp = timestamp; | |
57 | ||
58 | copied = kfifo_put(&ev_int->det_events, &ev); | |
2c00193f | 59 | if (copied != 0) |
43ba1100 LPC |
60 | wake_up_locked(&ev_int->wait); |
61 | } | |
62 | spin_unlock(&ev_int->wait.lock); | |
0a769a95 | 63 | |
2c00193f | 64 | return 0; |
0a769a95 LPC |
65 | } |
66 | EXPORT_SYMBOL(iio_push_event); | |
67 | ||
68 | static ssize_t iio_event_chrdev_read(struct file *filep, | |
69 | char __user *buf, | |
70 | size_t count, | |
71 | loff_t *f_ps) | |
72 | { | |
73 | struct iio_event_interface *ev_int = filep->private_data; | |
2c00193f | 74 | unsigned int copied; |
0a769a95 LPC |
75 | int ret; |
76 | ||
2c00193f | 77 | if (count < sizeof(struct iio_event_data)) |
0a769a95 LPC |
78 | return -EINVAL; |
79 | ||
43ba1100 | 80 | spin_lock(&ev_int->wait.lock); |
2c00193f | 81 | if (kfifo_is_empty(&ev_int->det_events)) { |
0a769a95 LPC |
82 | if (filep->f_flags & O_NONBLOCK) { |
83 | ret = -EAGAIN; | |
43ba1100 | 84 | goto error_unlock; |
0a769a95 | 85 | } |
0a769a95 | 86 | /* Blocking on device; waiting for something to be there */ |
43ba1100 | 87 | ret = wait_event_interruptible_locked(ev_int->wait, |
2c00193f | 88 | !kfifo_is_empty(&ev_int->det_events)); |
0a769a95 | 89 | if (ret) |
43ba1100 | 90 | goto error_unlock; |
0a769a95 | 91 | /* Single access device so no one else can get the data */ |
0a769a95 LPC |
92 | } |
93 | ||
2c00193f | 94 | ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); |
0a769a95 | 95 | |
43ba1100 LPC |
96 | error_unlock: |
97 | spin_unlock(&ev_int->wait.lock); | |
98 | ||
2c00193f | 99 | return ret ? ret : copied; |
0a769a95 LPC |
100 | } |
101 | ||
102 | static int iio_event_chrdev_release(struct inode *inode, struct file *filep) | |
103 | { | |
104 | struct iio_event_interface *ev_int = filep->private_data; | |
0a769a95 | 105 | |
43ba1100 | 106 | spin_lock(&ev_int->wait.lock); |
0a769a95 LPC |
107 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
108 | /* | |
109 | * In order to maintain a clean state for reopening, | |
110 | * clear out any awaiting events. The mask will prevent | |
111 | * any new __iio_push_event calls running. | |
112 | */ | |
2c00193f | 113 | kfifo_reset_out(&ev_int->det_events); |
43ba1100 | 114 | spin_unlock(&ev_int->wait.lock); |
0a769a95 LPC |
115 | |
116 | return 0; | |
117 | } | |
118 | ||
119 | static const struct file_operations iio_event_chrdev_fileops = { | |
120 | .read = iio_event_chrdev_read, | |
121 | .release = iio_event_chrdev_release, | |
122 | .owner = THIS_MODULE, | |
123 | .llseek = noop_llseek, | |
124 | }; | |
125 | ||
126 | int iio_event_getfd(struct iio_dev *indio_dev) | |
127 | { | |
128 | struct iio_event_interface *ev_int = indio_dev->event_interface; | |
129 | int fd; | |
130 | ||
131 | if (ev_int == NULL) | |
132 | return -ENODEV; | |
133 | ||
43ba1100 | 134 | spin_lock(&ev_int->wait.lock); |
0a769a95 | 135 | if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
43ba1100 | 136 | spin_unlock(&ev_int->wait.lock); |
0a769a95 LPC |
137 | return -EBUSY; |
138 | } | |
43ba1100 | 139 | spin_unlock(&ev_int->wait.lock); |
0a769a95 LPC |
140 | fd = anon_inode_getfd("iio:event", |
141 | &iio_event_chrdev_fileops, ev_int, O_RDONLY); | |
142 | if (fd < 0) { | |
43ba1100 | 143 | spin_lock(&ev_int->wait.lock); |
0a769a95 | 144 | clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
43ba1100 | 145 | spin_unlock(&ev_int->wait.lock); |
0a769a95 LPC |
146 | } |
147 | return fd; | |
148 | } | |
149 | ||
150 | static const char * const iio_ev_type_text[] = { | |
151 | [IIO_EV_TYPE_THRESH] = "thresh", | |
152 | [IIO_EV_TYPE_MAG] = "mag", | |
153 | [IIO_EV_TYPE_ROC] = "roc", | |
154 | [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", | |
155 | [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", | |
156 | }; | |
157 | ||
158 | static const char * const iio_ev_dir_text[] = { | |
159 | [IIO_EV_DIR_EITHER] = "either", | |
160 | [IIO_EV_DIR_RISING] = "rising", | |
161 | [IIO_EV_DIR_FALLING] = "falling" | |
162 | }; | |
163 | ||
164 | static ssize_t iio_ev_state_store(struct device *dev, | |
165 | struct device_attribute *attr, | |
166 | const char *buf, | |
167 | size_t len) | |
168 | { | |
169 | struct iio_dev *indio_dev = dev_get_drvdata(dev); | |
170 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
171 | int ret; | |
172 | bool val; | |
173 | ||
174 | ret = strtobool(buf, &val); | |
175 | if (ret < 0) | |
176 | return ret; | |
177 | ||
178 | ret = indio_dev->info->write_event_config(indio_dev, | |
179 | this_attr->address, | |
180 | val); | |
181 | return (ret < 0) ? ret : len; | |
182 | } | |
183 | ||
184 | static ssize_t iio_ev_state_show(struct device *dev, | |
185 | struct device_attribute *attr, | |
186 | char *buf) | |
187 | { | |
188 | struct iio_dev *indio_dev = dev_get_drvdata(dev); | |
189 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
190 | int val = indio_dev->info->read_event_config(indio_dev, | |
191 | this_attr->address); | |
192 | ||
193 | if (val < 0) | |
194 | return val; | |
195 | else | |
196 | return sprintf(buf, "%d\n", val); | |
197 | } | |
198 | ||
199 | static ssize_t iio_ev_value_show(struct device *dev, | |
200 | struct device_attribute *attr, | |
201 | char *buf) | |
202 | { | |
203 | struct iio_dev *indio_dev = dev_get_drvdata(dev); | |
204 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
205 | int val, ret; | |
206 | ||
207 | ret = indio_dev->info->read_event_value(indio_dev, | |
208 | this_attr->address, &val); | |
209 | if (ret < 0) | |
210 | return ret; | |
211 | ||
212 | return sprintf(buf, "%d\n", val); | |
213 | } | |
214 | ||
215 | static ssize_t iio_ev_value_store(struct device *dev, | |
216 | struct device_attribute *attr, | |
217 | const char *buf, | |
218 | size_t len) | |
219 | { | |
220 | struct iio_dev *indio_dev = dev_get_drvdata(dev); | |
221 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
222 | unsigned long val; | |
223 | int ret; | |
224 | ||
225 | if (!indio_dev->info->write_event_value) | |
226 | return -EINVAL; | |
227 | ||
228 | ret = strict_strtoul(buf, 10, &val); | |
229 | if (ret) | |
230 | return ret; | |
231 | ||
232 | ret = indio_dev->info->write_event_value(indio_dev, this_attr->address, | |
233 | val); | |
234 | if (ret < 0) | |
235 | return ret; | |
236 | ||
237 | return len; | |
238 | } | |
239 | ||
240 | static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, | |
241 | struct iio_chan_spec const *chan) | |
242 | { | |
243 | int ret = 0, i, attrcount = 0; | |
244 | u64 mask = 0; | |
245 | char *postfix; | |
246 | if (!chan->event_mask) | |
247 | return 0; | |
248 | ||
249 | for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) { | |
250 | postfix = kasprintf(GFP_KERNEL, "%s_%s_en", | |
251 | iio_ev_type_text[i/IIO_EV_DIR_MAX], | |
252 | iio_ev_dir_text[i%IIO_EV_DIR_MAX]); | |
253 | if (postfix == NULL) { | |
254 | ret = -ENOMEM; | |
255 | goto error_ret; | |
256 | } | |
257 | if (chan->modified) | |
258 | mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel, | |
259 | i/IIO_EV_DIR_MAX, | |
260 | i%IIO_EV_DIR_MAX); | |
261 | else if (chan->differential) | |
262 | mask = IIO_EVENT_CODE(chan->type, | |
263 | 0, 0, | |
264 | i%IIO_EV_DIR_MAX, | |
265 | i/IIO_EV_DIR_MAX, | |
266 | 0, | |
267 | chan->channel, | |
268 | chan->channel2); | |
269 | else | |
270 | mask = IIO_UNMOD_EVENT_CODE(chan->type, | |
271 | chan->channel, | |
272 | i/IIO_EV_DIR_MAX, | |
273 | i%IIO_EV_DIR_MAX); | |
274 | ||
275 | ret = __iio_add_chan_devattr(postfix, | |
276 | chan, | |
277 | &iio_ev_state_show, | |
278 | iio_ev_state_store, | |
279 | mask, | |
280 | 0, | |
281 | &indio_dev->dev, | |
282 | &indio_dev->event_interface-> | |
283 | dev_attr_list); | |
284 | kfree(postfix); | |
285 | if (ret) | |
286 | goto error_ret; | |
287 | attrcount++; | |
288 | postfix = kasprintf(GFP_KERNEL, "%s_%s_value", | |
289 | iio_ev_type_text[i/IIO_EV_DIR_MAX], | |
290 | iio_ev_dir_text[i%IIO_EV_DIR_MAX]); | |
291 | if (postfix == NULL) { | |
292 | ret = -ENOMEM; | |
293 | goto error_ret; | |
294 | } | |
295 | ret = __iio_add_chan_devattr(postfix, chan, | |
296 | iio_ev_value_show, | |
297 | iio_ev_value_store, | |
298 | mask, | |
299 | 0, | |
300 | &indio_dev->dev, | |
301 | &indio_dev->event_interface-> | |
302 | dev_attr_list); | |
303 | kfree(postfix); | |
304 | if (ret) | |
305 | goto error_ret; | |
306 | attrcount++; | |
307 | } | |
308 | ret = attrcount; | |
309 | error_ret: | |
310 | return ret; | |
311 | } | |
312 | ||
313 | static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev) | |
314 | { | |
315 | struct iio_dev_attr *p, *n; | |
316 | list_for_each_entry_safe(p, n, | |
317 | &indio_dev->event_interface-> | |
318 | dev_attr_list, l) { | |
319 | kfree(p->dev_attr.attr.name); | |
320 | kfree(p); | |
321 | } | |
322 | } | |
323 | ||
324 | static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) | |
325 | { | |
326 | int j, ret, attrcount = 0; | |
327 | ||
328 | INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); | |
329 | /* Dynically created from the channels array */ | |
330 | for (j = 0; j < indio_dev->num_channels; j++) { | |
331 | ret = iio_device_add_event_sysfs(indio_dev, | |
332 | &indio_dev->channels[j]); | |
333 | if (ret < 0) | |
334 | goto error_clear_attrs; | |
335 | attrcount += ret; | |
336 | } | |
337 | return attrcount; | |
338 | ||
339 | error_clear_attrs: | |
340 | __iio_remove_event_config_attrs(indio_dev); | |
341 | ||
342 | return ret; | |
343 | } | |
344 | ||
345 | static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) | |
346 | { | |
347 | int j; | |
348 | ||
349 | for (j = 0; j < indio_dev->num_channels; j++) | |
350 | if (indio_dev->channels[j].event_mask != 0) | |
351 | return true; | |
352 | return false; | |
353 | } | |
354 | ||
355 | static void iio_setup_ev_int(struct iio_event_interface *ev_int) | |
356 | { | |
2c00193f | 357 | INIT_KFIFO(ev_int->det_events); |
0a769a95 LPC |
358 | init_waitqueue_head(&ev_int->wait); |
359 | } | |
360 | ||
361 | static const char *iio_event_group_name = "events"; | |
362 | int iio_device_register_eventset(struct iio_dev *indio_dev) | |
363 | { | |
364 | struct iio_dev_attr *p; | |
365 | int ret = 0, attrcount_orig = 0, attrcount, attrn; | |
366 | struct attribute **attr; | |
367 | ||
368 | if (!(indio_dev->info->event_attrs || | |
369 | iio_check_for_dynamic_events(indio_dev))) | |
370 | return 0; | |
371 | ||
372 | indio_dev->event_interface = | |
373 | kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); | |
374 | if (indio_dev->event_interface == NULL) { | |
375 | ret = -ENOMEM; | |
376 | goto error_ret; | |
377 | } | |
378 | ||
379 | iio_setup_ev_int(indio_dev->event_interface); | |
380 | if (indio_dev->info->event_attrs != NULL) { | |
381 | attr = indio_dev->info->event_attrs->attrs; | |
382 | while (*attr++ != NULL) | |
383 | attrcount_orig++; | |
384 | } | |
385 | attrcount = attrcount_orig; | |
386 | if (indio_dev->channels) { | |
387 | ret = __iio_add_event_config_attrs(indio_dev); | |
388 | if (ret < 0) | |
389 | goto error_free_setup_event_lines; | |
390 | attrcount += ret; | |
391 | } | |
392 | ||
393 | indio_dev->event_interface->group.name = iio_event_group_name; | |
394 | indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, | |
395 | sizeof(indio_dev->event_interface->group.attrs[0]), | |
396 | GFP_KERNEL); | |
397 | if (indio_dev->event_interface->group.attrs == NULL) { | |
398 | ret = -ENOMEM; | |
399 | goto error_free_setup_event_lines; | |
400 | } | |
401 | if (indio_dev->info->event_attrs) | |
402 | memcpy(indio_dev->event_interface->group.attrs, | |
403 | indio_dev->info->event_attrs->attrs, | |
404 | sizeof(indio_dev->event_interface->group.attrs[0]) | |
405 | *attrcount_orig); | |
406 | attrn = attrcount_orig; | |
407 | /* Add all elements from the list. */ | |
408 | list_for_each_entry(p, | |
409 | &indio_dev->event_interface->dev_attr_list, | |
410 | l) | |
411 | indio_dev->event_interface->group.attrs[attrn++] = | |
412 | &p->dev_attr.attr; | |
413 | indio_dev->groups[indio_dev->groupcounter++] = | |
414 | &indio_dev->event_interface->group; | |
415 | ||
416 | return 0; | |
417 | ||
418 | error_free_setup_event_lines: | |
419 | __iio_remove_event_config_attrs(indio_dev); | |
420 | kfree(indio_dev->event_interface); | |
421 | error_ret: | |
422 | ||
423 | return ret; | |
424 | } | |
425 | ||
426 | void iio_device_unregister_eventset(struct iio_dev *indio_dev) | |
427 | { | |
428 | if (indio_dev->event_interface == NULL) | |
429 | return; | |
430 | __iio_remove_event_config_attrs(indio_dev); | |
431 | kfree(indio_dev->event_interface->group.attrs); | |
432 | kfree(indio_dev->event_interface); | |
433 | } |