]>
Commit | Line | Data |
---|---|---|
5eef597e MP |
1 | /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ |
2 | ||
3 | /*** | |
4 | This file is part of systemd. | |
5 | ||
6 | Copyright (C) 2014 David Herrmann <dh.herrmann@gmail.com> | |
7 | ||
8 | systemd is free software; you can redistribute it and/or modify it | |
9 | under the terms of the GNU Lesser General Public License as published by | |
10 | the Free Software Foundation; either version 2.1 of the License, or | |
11 | (at your option) any later version. | |
12 | ||
13 | systemd is distributed in the hope that it will be useful, but | |
14 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | Lesser General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU Lesser General Public License | |
19 | along with systemd; If not, see <http://www.gnu.org/licenses/>. | |
20 | ***/ | |
21 | ||
22 | #include <fcntl.h> | |
5eef597e MP |
23 | #include <libevdev/libevdev.h> |
24 | #include <libudev.h> | |
25 | #include <stdbool.h> | |
26 | #include <stdlib.h> | |
27 | #include <systemd/sd-bus.h> | |
28 | #include <systemd/sd-event.h> | |
5eef597e | 29 | #include "bus-util.h" |
5eef597e MP |
30 | #include "idev.h" |
31 | #include "idev-internal.h" | |
32 | #include "macro.h" | |
5eef597e MP |
33 | #include "util.h" |
34 | ||
35 | typedef struct idev_evdev idev_evdev; | |
36 | typedef struct unmanaged_evdev unmanaged_evdev; | |
37 | typedef struct managed_evdev managed_evdev; | |
38 | ||
39 | struct idev_evdev { | |
40 | idev_element element; | |
41 | struct libevdev *evdev; | |
42 | int fd; | |
43 | sd_event_source *fd_src; | |
44 | sd_event_source *idle_src; | |
45 | ||
46 | bool unsync : 1; /* not in-sync with kernel */ | |
47 | bool resync : 1; /* re-syncing with kernel */ | |
48 | bool running : 1; | |
49 | }; | |
50 | ||
51 | struct unmanaged_evdev { | |
52 | idev_evdev evdev; | |
53 | char *devnode; | |
54 | }; | |
55 | ||
56 | struct managed_evdev { | |
57 | idev_evdev evdev; | |
58 | dev_t devnum; | |
59 | sd_bus_slot *slot_take_device; | |
60 | ||
61 | bool requested : 1; /* TakeDevice() was sent */ | |
62 | bool acquired : 1; /* TakeDevice() was successful */ | |
63 | }; | |
64 | ||
65 | #define idev_evdev_from_element(_e) container_of((_e), idev_evdev, element) | |
66 | #define unmanaged_evdev_from_element(_e) \ | |
67 | container_of(idev_evdev_from_element(_e), unmanaged_evdev, evdev) | |
68 | #define managed_evdev_from_element(_e) \ | |
69 | container_of(idev_evdev_from_element(_e), managed_evdev, evdev) | |
70 | ||
71 | #define IDEV_EVDEV_INIT(_vtable, _session) ((idev_evdev){ \ | |
72 | .element = IDEV_ELEMENT_INIT((_vtable), (_session)), \ | |
73 | .fd = -1, \ | |
74 | }) | |
75 | ||
76 | #define IDEV_EVDEV_NAME_MAX (8 + DECIMAL_STR_MAX(unsigned) * 2) | |
77 | ||
78 | static const idev_element_vtable unmanaged_evdev_vtable; | |
79 | static const idev_element_vtable managed_evdev_vtable; | |
80 | ||
81 | static int idev_evdev_resume(idev_evdev *evdev, int dev_fd); | |
82 | static void idev_evdev_pause(idev_evdev *evdev, bool release); | |
83 | ||
84 | /* | |
85 | * Virtual Evdev Element | |
86 | * The virtual evdev element is the base class of all other evdev elements. It | |
87 | * uses libevdev to access the kernel evdev API. It supports asynchronous | |
88 | * access revocation, re-syncing if events got dropped and more. | |
89 | * This element cannot be used by itself. There must be a wrapper around it | |
90 | * which opens a file-descriptor and passes it to the virtual evdev element. | |
91 | */ | |
92 | ||
93 | static void idev_evdev_name(char *out, dev_t devnum) { | |
94 | /* @out must be at least of size IDEV_EVDEV_NAME_MAX */ | |
95 | sprintf(out, "evdev/%u:%u", major(devnum), minor(devnum)); | |
96 | } | |
97 | ||
98 | static int idev_evdev_feed_resync(idev_evdev *evdev) { | |
99 | idev_data data = { | |
100 | .type = IDEV_DATA_RESYNC, | |
101 | .resync = evdev->resync, | |
102 | }; | |
103 | ||
104 | return idev_element_feed(&evdev->element, &data); | |
105 | } | |
106 | ||
107 | static int idev_evdev_feed_evdev(idev_evdev *evdev, struct input_event *event) { | |
108 | idev_data data = { | |
109 | .type = IDEV_DATA_EVDEV, | |
110 | .resync = evdev->resync, | |
111 | .evdev = { | |
112 | .event = *event, | |
113 | }, | |
114 | }; | |
115 | ||
116 | return idev_element_feed(&evdev->element, &data); | |
117 | } | |
118 | ||
119 | static void idev_evdev_hup(idev_evdev *evdev) { | |
120 | /* | |
121 | * On HUP, we close the current fd via idev_evdev_pause(). This drops | |
122 | * the event-sources from the main-loop and effectively puts the | |
123 | * element asleep. If the HUP is part of a hotplug-event, a following | |
124 | * udev-notification will destroy the element. Otherwise, the HUP is | |
125 | * either result of access-revokation or a serious error. | |
126 | * For unmanaged devices, we should never receive HUP (except for | |
127 | * unplug-events). But if we do, something went seriously wrong and we | |
128 | * shouldn't try to be clever. | |
129 | * Instead, we simply stay asleep and wait for the device to be | |
130 | * disabled and then re-enabled (or closed and re-opened). This will | |
131 | * re-open the device node and restart the device. | |
132 | * For managed devices, a HUP usually means our device-access was | |
133 | * revoked. In that case, we simply put the device asleep and wait for | |
134 | * logind to notify us once the device is alive again. logind also | |
135 | * passes us a new fd. Hence, we don't have to re-enable the device. | |
136 | * | |
137 | * Long story short: The only thing we have to do here, is close() the | |
138 | * file-descriptor and remove it from the main-loop. Everything else is | |
139 | * handled via additional events we receive. | |
140 | */ | |
141 | ||
142 | idev_evdev_pause(evdev, true); | |
143 | } | |
144 | ||
145 | static int idev_evdev_io(idev_evdev *evdev) { | |
146 | idev_element *e = &evdev->element; | |
147 | struct input_event ev; | |
148 | unsigned int flags; | |
149 | int r, error = 0; | |
150 | ||
151 | /* | |
152 | * Read input-events via libevdev until the input-queue is drained. In | |
153 | * case we're disabled, don't do anything. The input-queue might | |
154 | * overflow, but we don't care as we have to resync after wake-up, | |
155 | * anyway. | |
156 | * TODO: libevdev should give us a hint how many events to read. We | |
157 | * really want to avoid starvation, so we shouldn't read forever in | |
158 | * case we cannot keep up with the kernel. | |
159 | * TODO: Make sure libevdev always reports SYN_DROPPED to us, regardless | |
160 | * whether any event was synced afterwards. | |
161 | */ | |
162 | ||
163 | flags = LIBEVDEV_READ_FLAG_NORMAL; | |
164 | while (e->enabled) { | |
165 | if (evdev->unsync) { | |
166 | /* immediately resync, even if in sync right now */ | |
167 | evdev->unsync = false; | |
168 | evdev->resync = false; | |
169 | flags = LIBEVDEV_READ_FLAG_NORMAL; | |
170 | r = libevdev_next_event(evdev->evdev, flags | LIBEVDEV_READ_FLAG_FORCE_SYNC, &ev); | |
171 | if (r < 0 && r != -EAGAIN) { | |
172 | r = 0; | |
173 | goto error; | |
174 | } else if (r != LIBEVDEV_READ_STATUS_SYNC) { | |
175 | log_debug("idev-evdev: %s/%s: cannot force resync: %d", | |
176 | e->session->name, e->name, r); | |
177 | } | |
178 | } else { | |
179 | r = libevdev_next_event(evdev->evdev, flags, &ev); | |
180 | } | |
181 | ||
182 | if (evdev->resync && r == -EAGAIN) { | |
183 | /* end of re-sync */ | |
184 | evdev->resync = false; | |
185 | flags = LIBEVDEV_READ_FLAG_NORMAL; | |
186 | } else if (r == -EAGAIN) { | |
187 | /* no data available */ | |
188 | break; | |
189 | } else if (r < 0) { | |
190 | /* read error */ | |
191 | goto error; | |
192 | } else if (r == LIBEVDEV_READ_STATUS_SYNC) { | |
193 | if (evdev->resync) { | |
194 | /* sync-event */ | |
195 | r = idev_evdev_feed_evdev(evdev, &ev); | |
196 | if (r != 0) { | |
197 | error = r; | |
198 | break; | |
199 | } | |
200 | } else { | |
201 | /* start of sync */ | |
202 | evdev->resync = true; | |
203 | flags = LIBEVDEV_READ_FLAG_SYNC; | |
204 | r = idev_evdev_feed_resync(evdev); | |
205 | if (r != 0) { | |
206 | error = r; | |
207 | break; | |
208 | } | |
209 | } | |
210 | } else { | |
211 | /* normal event */ | |
212 | r = idev_evdev_feed_evdev(evdev, &ev); | |
213 | if (r != 0) { | |
214 | error = r; | |
215 | break; | |
216 | } | |
217 | } | |
218 | } | |
219 | ||
220 | if (error < 0) | |
f47781d8 MP |
221 | log_debug_errno(error, "idev-evdev: %s/%s: error on data event: %m", |
222 | e->session->name, e->name); | |
5eef597e MP |
223 | return error; |
224 | ||
225 | error: | |
226 | idev_evdev_hup(evdev); | |
227 | return 0; /* idev_evdev_hup() handles the error so discard it */ | |
228 | } | |
229 | ||
230 | static int idev_evdev_event_fn(sd_event_source *s, int fd, uint32_t revents, void *userdata) { | |
231 | idev_evdev *evdev = userdata; | |
232 | ||
233 | /* fetch data as long as EPOLLIN is signalled */ | |
234 | if (revents & EPOLLIN) | |
235 | return idev_evdev_io(evdev); | |
236 | ||
237 | if (revents & (EPOLLHUP | EPOLLERR)) | |
238 | idev_evdev_hup(evdev); | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | static int idev_evdev_idle_fn(sd_event_source *s, void *userdata) { | |
244 | idev_evdev *evdev = userdata; | |
245 | ||
246 | /* | |
247 | * The idle-event is raised whenever we have to re-sync the libevdev | |
248 | * state from the kernel. We simply call into idev_evdev_io() which | |
249 | * flushes the state and re-syncs it if @unsync is set. | |
250 | * State has to be synced whenever our view of the kernel device is | |
251 | * out of date. This is the case when we open the device, if the | |
252 | * kernel's receive buffer overflows, or on other exceptional | |
253 | * situations. Events during re-syncs must be forwarded to the upper | |
254 | * layers so they can update their view of the device. However, such | |
255 | * events must only be handled passively, as they might be out-of-order | |
256 | * and/or re-ordered. Therefore, we mark them as 'sync' events. | |
257 | */ | |
258 | ||
259 | if (!evdev->unsync) | |
260 | return 0; | |
261 | ||
262 | return idev_evdev_io(evdev); | |
263 | } | |
264 | ||
265 | static void idev_evdev_destroy(idev_evdev *evdev) { | |
266 | assert(evdev); | |
267 | assert(evdev->fd < 0); | |
268 | ||
269 | libevdev_free(evdev->evdev); | |
270 | evdev->evdev = NULL; | |
271 | } | |
272 | ||
273 | static void idev_evdev_enable(idev_evdev *evdev) { | |
274 | assert(evdev); | |
275 | assert(evdev->fd_src); | |
276 | assert(evdev->idle_src); | |
277 | ||
278 | if (evdev->running) | |
279 | return; | |
280 | if (evdev->fd < 0 || evdev->element.n_open < 1 || !evdev->element.enabled) | |
281 | return; | |
282 | ||
283 | evdev->running = true; | |
284 | sd_event_source_set_enabled(evdev->fd_src, SD_EVENT_ON); | |
285 | sd_event_source_set_enabled(evdev->idle_src, SD_EVENT_ONESHOT); | |
286 | } | |
287 | ||
288 | static void idev_evdev_disable(idev_evdev *evdev) { | |
289 | assert(evdev); | |
290 | assert(evdev->fd_src); | |
291 | assert(evdev->idle_src); | |
292 | ||
293 | if (!evdev->running) | |
294 | return; | |
295 | ||
296 | evdev->running = false; | |
297 | idev_evdev_feed_resync(evdev); | |
298 | sd_event_source_set_enabled(evdev->fd_src, SD_EVENT_OFF); | |
299 | sd_event_source_set_enabled(evdev->idle_src, SD_EVENT_OFF); | |
300 | } | |
301 | ||
302 | static int idev_evdev_resume(idev_evdev *evdev, int dev_fd) { | |
303 | idev_element *e = &evdev->element; | |
304 | _cleanup_close_ int fd = dev_fd; | |
305 | int r, flags; | |
306 | ||
307 | if (fd < 0 || evdev->fd == fd) { | |
308 | fd = -1; | |
309 | idev_evdev_enable(evdev); | |
310 | return 0; | |
311 | } | |
312 | ||
313 | idev_evdev_pause(evdev, true); | |
314 | log_debug("idev-evdev: %s/%s: resume", e->session->name, e->name); | |
315 | ||
316 | r = fd_nonblock(fd, true); | |
317 | if (r < 0) | |
318 | return r; | |
319 | ||
320 | r = fd_cloexec(fd, true); | |
321 | if (r < 0) | |
322 | return r; | |
323 | ||
324 | flags = fcntl(fd, F_GETFL, 0); | |
325 | if (flags < 0) | |
326 | return -errno; | |
327 | ||
328 | flags &= O_ACCMODE; | |
329 | if (flags == O_WRONLY) | |
330 | return -EACCES; | |
331 | ||
332 | evdev->element.readable = true; | |
333 | evdev->element.writable = !(flags & O_RDONLY); | |
334 | ||
335 | /* | |
336 | * TODO: We *MUST* re-sync the device so we get a delta of the changed | |
337 | * state while we didn't read events from the device. This works just | |
338 | * fine with libevdev_change_fd(), however, libevdev_new_from_fd() (or | |
339 | * libevdev_set_fd()) don't pass us events for the initial device | |
340 | * state. So even if we force a re-sync, we will not get the delta for | |
341 | * the initial device state. | |
342 | * We really need to fix libevdev to support that! | |
343 | */ | |
344 | if (evdev->evdev) | |
345 | r = libevdev_change_fd(evdev->evdev, fd); | |
346 | else | |
347 | r = libevdev_new_from_fd(fd, &evdev->evdev); | |
348 | ||
349 | if (r < 0) | |
350 | return r; | |
351 | ||
352 | r = sd_event_add_io(e->session->context->event, | |
353 | &evdev->fd_src, | |
354 | fd, | |
355 | EPOLLHUP | EPOLLERR | EPOLLIN, | |
356 | idev_evdev_event_fn, | |
357 | evdev); | |
358 | if (r < 0) | |
359 | return r; | |
360 | ||
361 | r = sd_event_add_defer(e->session->context->event, | |
362 | &evdev->idle_src, | |
363 | idev_evdev_idle_fn, | |
364 | evdev); | |
365 | if (r < 0) { | |
366 | evdev->fd_src = sd_event_source_unref(evdev->fd_src); | |
367 | return r; | |
368 | } | |
369 | ||
370 | sd_event_source_set_enabled(evdev->fd_src, SD_EVENT_OFF); | |
371 | sd_event_source_set_enabled(evdev->idle_src, SD_EVENT_OFF); | |
372 | ||
373 | evdev->unsync = true; | |
374 | evdev->fd = fd; | |
375 | fd = -1; | |
376 | ||
377 | idev_evdev_enable(evdev); | |
378 | return 0; | |
379 | } | |
380 | ||
381 | static void idev_evdev_pause(idev_evdev *evdev, bool release) { | |
382 | idev_element *e = &evdev->element; | |
383 | ||
384 | if (evdev->fd < 0) | |
385 | return; | |
386 | ||
387 | log_debug("idev-evdev: %s/%s: pause", e->session->name, e->name); | |
388 | ||
389 | idev_evdev_disable(evdev); | |
390 | if (release) { | |
391 | evdev->idle_src = sd_event_source_unref(evdev->idle_src); | |
392 | evdev->fd_src = sd_event_source_unref(evdev->fd_src); | |
393 | evdev->fd = safe_close(evdev->fd); | |
394 | } | |
395 | } | |
396 | ||
397 | /* | |
398 | * Unmanaged Evdev Element | |
399 | * The unmanaged evdev element opens the evdev node for a given input device | |
400 | * directly (/dev/input/eventX) and thus needs sufficient privileges. It opens | |
401 | * the device only if we really require it and releases it as soon as we're | |
402 | * disabled or closed. | |
403 | * The unmanaged element can be used in all situations where you have direct | |
404 | * access to input device nodes. Unlike managed evdev elements, it can be used | |
405 | * outside of user sessions and in emergency situations where logind is not | |
406 | * available. | |
407 | */ | |
408 | ||
409 | static void unmanaged_evdev_resume(idev_element *e) { | |
410 | unmanaged_evdev *eu = unmanaged_evdev_from_element(e); | |
411 | int r, fd; | |
412 | ||
413 | /* | |
414 | * Unmanaged devices can be acquired on-demand. Therefore, don't | |
415 | * acquire it unless someone opened the device *and* we're enabled. | |
416 | */ | |
417 | if (e->n_open < 1 || !e->enabled) | |
418 | return; | |
419 | ||
420 | fd = eu->evdev.fd; | |
421 | if (fd < 0) { | |
422 | fd = open(eu->devnode, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK); | |
423 | if (fd < 0) { | |
424 | if (errno != EACCES && errno != EPERM) { | |
f47781d8 MP |
425 | log_debug_errno(errno, "idev-evdev: %s/%s: cannot open node %s: %m", |
426 | e->session->name, e->name, eu->devnode); | |
5eef597e MP |
427 | return; |
428 | } | |
429 | ||
430 | fd = open(eu->devnode, O_RDONLY | O_CLOEXEC | O_NOCTTY | O_NONBLOCK); | |
431 | if (fd < 0) { | |
f47781d8 MP |
432 | log_debug_errno(errno, "idev-evdev: %s/%s: cannot open node %s: %m", |
433 | e->session->name, e->name, eu->devnode); | |
5eef597e MP |
434 | return; |
435 | } | |
436 | ||
437 | e->readable = true; | |
438 | e->writable = false; | |
439 | } else { | |
440 | e->readable = true; | |
441 | e->writable = true; | |
442 | } | |
443 | } | |
444 | ||
445 | r = idev_evdev_resume(&eu->evdev, fd); | |
446 | if (r < 0) | |
f47781d8 MP |
447 | log_debug_errno(r, "idev-evdev: %s/%s: cannot resume: %m", |
448 | e->session->name, e->name); | |
5eef597e MP |
449 | } |
450 | ||
451 | static void unmanaged_evdev_pause(idev_element *e) { | |
452 | unmanaged_evdev *eu = unmanaged_evdev_from_element(e); | |
453 | ||
454 | /* | |
455 | * Release the device if the device is disabled or there is no-one who | |
456 | * opened it. This guarantees we stay only available if we're opened | |
457 | * *and* enabled. | |
458 | */ | |
459 | ||
460 | idev_evdev_pause(&eu->evdev, true); | |
461 | } | |
462 | ||
463 | static int unmanaged_evdev_new(idev_element **out, idev_session *s, struct udev_device *ud) { | |
464 | _cleanup_(idev_element_freep) idev_element *e = NULL; | |
465 | char name[IDEV_EVDEV_NAME_MAX]; | |
466 | unmanaged_evdev *eu; | |
467 | const char *devnode; | |
468 | dev_t devnum; | |
469 | int r; | |
470 | ||
471 | assert_return(s, -EINVAL); | |
472 | assert_return(ud, -EINVAL); | |
473 | ||
474 | devnode = udev_device_get_devnode(ud); | |
475 | devnum = udev_device_get_devnum(ud); | |
476 | if (!devnode || devnum == 0) | |
477 | return -ENODEV; | |
478 | ||
479 | idev_evdev_name(name, devnum); | |
480 | ||
481 | eu = new0(unmanaged_evdev, 1); | |
482 | if (!eu) | |
483 | return -ENOMEM; | |
484 | ||
485 | e = &eu->evdev.element; | |
486 | eu->evdev = IDEV_EVDEV_INIT(&unmanaged_evdev_vtable, s); | |
487 | ||
488 | eu->devnode = strdup(devnode); | |
489 | if (!eu->devnode) | |
490 | return -ENOMEM; | |
491 | ||
492 | r = idev_element_add(e, name); | |
493 | if (r < 0) | |
494 | return r; | |
495 | ||
496 | if (out) | |
497 | *out = e; | |
498 | e = NULL; | |
499 | return 0; | |
500 | } | |
501 | ||
502 | static void unmanaged_evdev_free(idev_element *e) { | |
503 | unmanaged_evdev *eu = unmanaged_evdev_from_element(e); | |
504 | ||
505 | idev_evdev_destroy(&eu->evdev); | |
506 | free(eu->devnode); | |
507 | free(eu); | |
508 | } | |
509 | ||
510 | static const idev_element_vtable unmanaged_evdev_vtable = { | |
511 | .free = unmanaged_evdev_free, | |
512 | .enable = unmanaged_evdev_resume, | |
513 | .disable = unmanaged_evdev_pause, | |
514 | .open = unmanaged_evdev_resume, | |
515 | .close = unmanaged_evdev_pause, | |
516 | }; | |
517 | ||
518 | /* | |
519 | * Managed Evdev Element | |
520 | * The managed evdev element uses systemd-logind to acquire evdev devices. This | |
521 | * means, we do not open the device node /dev/input/eventX directly. Instead, | |
522 | * logind passes us a file-descriptor whenever our session is activated. Thus, | |
523 | * we don't need access to the device node directly. | |
524 | * Furthermore, whenever the session is put asleep, logind revokes the | |
525 | * file-descriptor so we loose access to the device. | |
526 | * Managed evdev elements should be preferred over unmanaged elements whenever | |
527 | * you run inside a user session with exclusive device access. | |
528 | */ | |
529 | ||
e3bff60a | 530 | static int managed_evdev_take_device_fn(sd_bus_message *reply, |
5eef597e MP |
531 | void *userdata, |
532 | sd_bus_error *ret_error) { | |
533 | managed_evdev *em = userdata; | |
534 | idev_element *e = &em->evdev.element; | |
535 | idev_session *s = e->session; | |
536 | int r, paused, fd; | |
537 | ||
538 | em->slot_take_device = sd_bus_slot_unref(em->slot_take_device); | |
539 | ||
540 | if (sd_bus_message_is_method_error(reply, NULL)) { | |
541 | const sd_bus_error *error = sd_bus_message_get_error(reply); | |
542 | ||
543 | log_debug("idev-evdev: %s/%s: TakeDevice failed: %s: %s", | |
544 | s->name, e->name, error->name, error->message); | |
545 | return 0; | |
546 | } | |
547 | ||
548 | em->acquired = true; | |
549 | ||
550 | r = sd_bus_message_read(reply, "hb", &fd, &paused); | |
551 | if (r < 0) { | |
552 | log_debug("idev-evdev: %s/%s: erroneous TakeDevice reply", s->name, e->name); | |
553 | return 0; | |
554 | } | |
555 | ||
556 | /* If the device is paused, ignore it; we will get the next fd via | |
557 | * ResumeDevice signals. */ | |
558 | if (paused) | |
559 | return 0; | |
560 | ||
561 | fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); | |
562 | if (fd < 0) { | |
f47781d8 | 563 | log_debug_errno(errno, "idev-evdev: %s/%s: cannot duplicate evdev fd: %m", s->name, e->name); |
5eef597e MP |
564 | return 0; |
565 | } | |
566 | ||
567 | r = idev_evdev_resume(&em->evdev, fd); | |
568 | if (r < 0) | |
f47781d8 MP |
569 | log_debug_errno(r, "idev-evdev: %s/%s: cannot resume: %m", |
570 | s->name, e->name); | |
5eef597e MP |
571 | |
572 | return 0; | |
573 | } | |
574 | ||
575 | static void managed_evdev_enable(idev_element *e) { | |
576 | _cleanup_bus_message_unref_ sd_bus_message *m = NULL; | |
577 | managed_evdev *em = managed_evdev_from_element(e); | |
578 | idev_session *s = e->session; | |
579 | idev_context *c = s->context; | |
580 | int r; | |
581 | ||
582 | /* | |
583 | * Acquiring managed devices is heavy, so do it only once we're | |
584 | * enabled *and* opened by someone. | |
585 | */ | |
586 | if (e->n_open < 1 || !e->enabled) | |
587 | return; | |
588 | ||
589 | /* bail out if already pending */ | |
590 | if (em->requested) | |
591 | return; | |
592 | ||
593 | r = sd_bus_message_new_method_call(c->sysbus, | |
594 | &m, | |
595 | "org.freedesktop.login1", | |
596 | s->path, | |
597 | "org.freedesktop.login1.Session", | |
598 | "TakeDevice"); | |
599 | if (r < 0) | |
600 | goto error; | |
601 | ||
602 | r = sd_bus_message_append(m, "uu", major(em->devnum), minor(em->devnum)); | |
603 | if (r < 0) | |
604 | goto error; | |
605 | ||
606 | r = sd_bus_call_async(c->sysbus, | |
607 | &em->slot_take_device, | |
608 | m, | |
609 | managed_evdev_take_device_fn, | |
610 | em, | |
611 | 0); | |
612 | if (r < 0) | |
613 | goto error; | |
614 | ||
615 | em->requested = true; | |
616 | return; | |
617 | ||
618 | error: | |
f47781d8 MP |
619 | log_debug_errno(r, "idev-evdev: %s/%s: cannot send TakeDevice request: %m", |
620 | s->name, e->name); | |
5eef597e MP |
621 | } |
622 | ||
623 | static void managed_evdev_disable(idev_element *e) { | |
624 | _cleanup_bus_message_unref_ sd_bus_message *m = NULL; | |
625 | managed_evdev *em = managed_evdev_from_element(e); | |
626 | idev_session *s = e->session; | |
627 | idev_context *c = s->context; | |
628 | int r; | |
629 | ||
630 | /* | |
631 | * Releasing managed devices is heavy. Once acquired, we get | |
632 | * notifications for sleep/wake-up events, so there's no reason to | |
633 | * release it if disabled but opened. However, if a device is closed, | |
634 | * we release it immediately as we don't care for sleep/wake-up events | |
635 | * then (even if we're actually enabled). | |
636 | */ | |
637 | ||
638 | idev_evdev_pause(&em->evdev, false); | |
639 | ||
640 | if (e->n_open > 0 || !em->requested) | |
641 | return; | |
642 | ||
643 | /* | |
644 | * If TakeDevice() is pending or was successful, make sure to | |
645 | * release the device again. We don't care for return-values, | |
646 | * so send it without waiting or callbacks. | |
647 | * If a failed TakeDevice() is pending, but someone else took | |
648 | * the device on the same bus-connection, we might incorrectly | |
649 | * release their device. This is an unlikely race, though. | |
650 | * Furthermore, you really shouldn't have two users of the | |
651 | * controller-API on the same session, on the same devices, *AND* on | |
652 | * the same bus-connection. So we don't care for that race.. | |
653 | */ | |
654 | ||
655 | idev_evdev_pause(&em->evdev, true); | |
656 | em->requested = false; | |
657 | ||
658 | if (!em->acquired && !em->slot_take_device) | |
659 | return; | |
660 | ||
661 | em->slot_take_device = sd_bus_slot_unref(em->slot_take_device); | |
662 | em->acquired = false; | |
663 | ||
664 | r = sd_bus_message_new_method_call(c->sysbus, | |
665 | &m, | |
666 | "org.freedesktop.login1", | |
667 | s->path, | |
668 | "org.freedesktop.login1.Session", | |
669 | "ReleaseDevice"); | |
670 | if (r >= 0) { | |
671 | r = sd_bus_message_append(m, "uu", major(em->devnum), minor(em->devnum)); | |
672 | if (r >= 0) | |
673 | r = sd_bus_send(c->sysbus, m, NULL); | |
674 | } | |
675 | ||
676 | if (r < 0 && r != -ENOTCONN) | |
f47781d8 MP |
677 | log_debug_errno(r, "idev-evdev: %s/%s: cannot send ReleaseDevice: %m", |
678 | s->name, e->name); | |
5eef597e MP |
679 | } |
680 | ||
681 | static void managed_evdev_resume(idev_element *e, int fd) { | |
682 | managed_evdev *em = managed_evdev_from_element(e); | |
683 | idev_session *s = e->session; | |
684 | int r; | |
685 | ||
686 | /* | |
687 | * We get ResumeDevice signals whenever logind resumed a previously | |
688 | * paused device. The arguments contain the major/minor number of the | |
689 | * related device and a new file-descriptor for the freshly opened | |
690 | * device-node. We take the file-descriptor and immediately resume the | |
691 | * device. | |
692 | */ | |
693 | ||
694 | fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); | |
695 | if (fd < 0) { | |
f47781d8 MP |
696 | log_debug_errno(errno, "idev-evdev: %s/%s: cannot duplicate evdev fd: %m", |
697 | s->name, e->name); | |
5eef597e MP |
698 | return; |
699 | } | |
700 | ||
701 | r = idev_evdev_resume(&em->evdev, fd); | |
702 | if (r < 0) | |
f47781d8 MP |
703 | log_debug_errno(r, "idev-evdev: %s/%s: cannot resume: %m", |
704 | s->name, e->name); | |
5eef597e MP |
705 | |
706 | return; | |
707 | } | |
708 | ||
709 | static void managed_evdev_pause(idev_element *e, const char *mode) { | |
710 | managed_evdev *em = managed_evdev_from_element(e); | |
711 | idev_session *s = e->session; | |
712 | idev_context *c = s->context; | |
713 | int r; | |
714 | ||
715 | /* | |
716 | * We get PauseDevice() signals from logind whenever a device we | |
717 | * requested was, or is about to be, paused. Arguments are major/minor | |
718 | * number of the device and the mode of the operation. | |
719 | * We treat it as asynchronous access-revocation (as if we got HUP on | |
720 | * the device fd). Note that we might have already treated the HUP | |
721 | * event via EPOLLHUP, whichever comes first. | |
722 | * | |
723 | * @mode can be one of the following: | |
724 | * "pause": The device is about to be paused. We must react | |
725 | * immediately and respond with PauseDeviceComplete(). Once | |
726 | * we replied, logind will pause the device. Note that | |
727 | * logind might apply any kind of timeout and force pause | |
728 | * the device if we don't respond in a timely manner. In | |
729 | * this case, we will receive a second PauseDevice event | |
730 | * with @mode set to "force" (or similar). | |
731 | * "force": The device was disabled forecfully by logind. Access is | |
732 | * already revoked. This is just an asynchronous | |
733 | * notification so we can put the device asleep (in case | |
734 | * we didn't already notice the access revocation). | |
735 | * "gone": This is like "force" but is sent if the device was | |
736 | * paused due to a device-removal event. | |
737 | * | |
738 | * We always handle PauseDevice signals as "force" as we properly | |
739 | * support asynchronous access revocation, anyway. But in case logind | |
740 | * sent mode "pause", we also call PauseDeviceComplete() to immediately | |
741 | * acknowledge the request. | |
742 | */ | |
743 | ||
744 | idev_evdev_pause(&em->evdev, true); | |
745 | ||
746 | if (streq(mode, "pause")) { | |
747 | _cleanup_bus_message_unref_ sd_bus_message *m = NULL; | |
748 | ||
749 | /* | |
750 | * Sending PauseDeviceComplete() is racy if logind triggers the | |
751 | * timeout. That is, if we take too long and logind pauses the | |
752 | * device by sending a forced PauseDevice, our | |
753 | * PauseDeviceComplete call will be stray. That's fine, though. | |
754 | * logind ignores such stray calls. Only if logind also sent a | |
755 | * further PauseDevice() signal, it might match our call | |
756 | * incorrectly to the newer PauseDevice(). That's fine, too, as | |
757 | * we handle that event asynchronously, anyway. Therefore, | |
758 | * whatever happens, we're fine. Yay! | |
759 | */ | |
760 | ||
761 | r = sd_bus_message_new_method_call(c->sysbus, | |
762 | &m, | |
763 | "org.freedesktop.login1", | |
764 | s->path, | |
765 | "org.freedesktop.login1.Session", | |
766 | "PauseDeviceComplete"); | |
767 | if (r >= 0) { | |
768 | r = sd_bus_message_append(m, "uu", major(em->devnum), minor(em->devnum)); | |
769 | if (r >= 0) | |
770 | r = sd_bus_send(c->sysbus, m, NULL); | |
771 | } | |
772 | ||
773 | if (r < 0) | |
f47781d8 MP |
774 | log_debug_errno(r, "idev-evdev: %s/%s: cannot send PauseDeviceComplete: %m", |
775 | s->name, e->name); | |
5eef597e MP |
776 | } |
777 | } | |
778 | ||
779 | static int managed_evdev_new(idev_element **out, idev_session *s, struct udev_device *ud) { | |
780 | _cleanup_(idev_element_freep) idev_element *e = NULL; | |
781 | char name[IDEV_EVDEV_NAME_MAX]; | |
782 | managed_evdev *em; | |
783 | dev_t devnum; | |
784 | int r; | |
785 | ||
786 | assert_return(s, -EINVAL); | |
787 | assert_return(s->managed, -EINVAL); | |
788 | assert_return(s->context->sysbus, -EINVAL); | |
789 | assert_return(ud, -EINVAL); | |
790 | ||
791 | devnum = udev_device_get_devnum(ud); | |
792 | if (devnum == 0) | |
793 | return -ENODEV; | |
794 | ||
795 | idev_evdev_name(name, devnum); | |
796 | ||
797 | em = new0(managed_evdev, 1); | |
798 | if (!em) | |
799 | return -ENOMEM; | |
800 | ||
801 | e = &em->evdev.element; | |
802 | em->evdev = IDEV_EVDEV_INIT(&managed_evdev_vtable, s); | |
803 | em->devnum = devnum; | |
804 | ||
805 | r = idev_element_add(e, name); | |
806 | if (r < 0) | |
807 | return r; | |
808 | ||
809 | if (out) | |
810 | *out = e; | |
811 | e = NULL; | |
812 | return 0; | |
813 | } | |
814 | ||
815 | static void managed_evdev_free(idev_element *e) { | |
816 | managed_evdev *em = managed_evdev_from_element(e); | |
817 | ||
818 | idev_evdev_destroy(&em->evdev); | |
819 | free(em); | |
820 | } | |
821 | ||
822 | static const idev_element_vtable managed_evdev_vtable = { | |
823 | .free = managed_evdev_free, | |
824 | .enable = managed_evdev_enable, | |
825 | .disable = managed_evdev_disable, | |
826 | .open = managed_evdev_enable, | |
827 | .close = managed_evdev_disable, | |
828 | .resume = managed_evdev_resume, | |
829 | .pause = managed_evdev_pause, | |
830 | }; | |
831 | ||
832 | /* | |
833 | * Generic Constructor | |
834 | * Instead of relying on the caller to choose between managed and unmanaged | |
835 | * evdev devices, the idev_evdev_new() constructor does that for you (by | |
836 | * looking at s->managed). | |
837 | */ | |
838 | ||
839 | bool idev_is_evdev(idev_element *e) { | |
840 | return e && (e->vtable == &unmanaged_evdev_vtable || | |
841 | e->vtable == &managed_evdev_vtable); | |
842 | } | |
843 | ||
844 | idev_element *idev_find_evdev(idev_session *s, dev_t devnum) { | |
845 | char name[IDEV_EVDEV_NAME_MAX]; | |
846 | ||
847 | assert_return(s, NULL); | |
848 | assert_return(devnum != 0, NULL); | |
849 | ||
850 | idev_evdev_name(name, devnum); | |
851 | return idev_find_element(s, name); | |
852 | } | |
853 | ||
854 | int idev_evdev_new(idev_element **out, idev_session *s, struct udev_device *ud) { | |
855 | assert_return(s, -EINVAL); | |
856 | assert_return(ud, -EINVAL); | |
857 | ||
858 | return s->managed ? managed_evdev_new(out, s, ud) : unmanaged_evdev_new(out, s, ud); | |
859 | } |