2 * Event char devices, giving access to raw input device events.
4 * Copyright (c) 1999-2002 Vojtech Pavlik
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #define EVDEV_MINOR_BASE 64
14 #define EVDEV_MINORS 32
15 #define EVDEV_MIN_BUFFER_SIZE 64U
16 #define EVDEV_BUF_PACKETS 8
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/input/mt.h>
26 #include <linux/major.h>
27 #include <linux/device.h>
28 #include <linux/cdev.h>
29 #include "input-compat.h"
31 enum evdev_clock_type
{
40 struct input_handle handle
;
41 wait_queue_head_t wait
;
42 struct evdev_client __rcu
*grab
;
43 struct list_head client_list
;
44 spinlock_t client_lock
; /* protects client_list */
54 unsigned int packet_head
; /* [future] position of the first element of next packet */
55 spinlock_t buffer_lock
; /* protects access to buffer, head and tail */
56 struct fasync_struct
*fasync
;
58 struct list_head node
;
59 unsigned int clk_type
;
61 unsigned long *evmasks
[EV_CNT
];
63 struct input_event buffer
[];
66 static size_t evdev_get_mask_cnt(unsigned int type
)
68 static const size_t counts
[EV_CNT
] = {
69 /* EV_SYN==0 is EV_CNT, _not_ SYN_CNT, see EVIOCGBIT */
81 return (type
< EV_CNT
) ? counts
[type
] : 0;
84 /* requires the buffer lock to be held */
85 static bool __evdev_is_filtered(struct evdev_client
*client
,
92 /* EV_SYN and unknown codes are never filtered */
93 if (type
== EV_SYN
|| type
>= EV_CNT
)
96 /* first test whether the type is filtered */
97 mask
= client
->evmasks
[0];
98 if (mask
&& !test_bit(type
, mask
))
101 /* unknown values are never filtered */
102 cnt
= evdev_get_mask_cnt(type
);
103 if (!cnt
|| code
>= cnt
)
106 mask
= client
->evmasks
[type
];
107 return mask
&& !test_bit(code
, mask
);
110 /* flush queued events of type @type, caller must hold client->buffer_lock */
111 static void __evdev_flush_queue(struct evdev_client
*client
, unsigned int type
)
113 unsigned int i
, head
, num
;
114 unsigned int mask
= client
->bufsize
- 1;
116 struct input_event
*ev
;
118 BUG_ON(type
== EV_SYN
);
121 client
->packet_head
= client
->tail
;
123 /* init to 1 so a leading SYN_REPORT will not be dropped */
126 for (i
= client
->tail
; i
!= client
->head
; i
= (i
+ 1) & mask
) {
127 ev
= &client
->buffer
[i
];
128 is_report
= ev
->type
== EV_SYN
&& ev
->code
== SYN_REPORT
;
130 if (ev
->type
== type
) {
131 /* drop matched entry */
133 } else if (is_report
&& !num
) {
134 /* drop empty SYN_REPORT groups */
136 } else if (head
!= i
) {
137 /* move entry to fill the gap */
138 client
->buffer
[head
] = *ev
;
142 head
= (head
+ 1) & mask
;
146 client
->packet_head
= head
;
153 static void __evdev_queue_syn_dropped(struct evdev_client
*client
)
155 struct input_event ev
;
157 struct timespec64 ts
;
159 time
= client
->clk_type
== EV_CLK_REAL
?
161 client
->clk_type
== EV_CLK_MONO
?
163 ktime_get_boottime();
165 ts
= ktime_to_timespec64(time
);
166 ev
.input_event_sec
= ts
.tv_sec
;
167 ev
.input_event_usec
= ts
.tv_nsec
/ NSEC_PER_USEC
;
169 ev
.code
= SYN_DROPPED
;
172 client
->buffer
[client
->head
++] = ev
;
173 client
->head
&= client
->bufsize
- 1;
175 if (unlikely(client
->head
== client
->tail
)) {
176 /* drop queue but keep our SYN_DROPPED event */
177 client
->tail
= (client
->head
- 1) & (client
->bufsize
- 1);
178 client
->packet_head
= client
->tail
;
182 static void evdev_queue_syn_dropped(struct evdev_client
*client
)
186 spin_lock_irqsave(&client
->buffer_lock
, flags
);
187 __evdev_queue_syn_dropped(client
);
188 spin_unlock_irqrestore(&client
->buffer_lock
, flags
);
191 static int evdev_set_clk_type(struct evdev_client
*client
, unsigned int clkid
)
194 unsigned int clk_type
;
199 clk_type
= EV_CLK_REAL
;
201 case CLOCK_MONOTONIC
:
202 clk_type
= EV_CLK_MONO
;
205 clk_type
= EV_CLK_BOOT
;
211 if (client
->clk_type
!= clk_type
) {
212 client
->clk_type
= clk_type
;
215 * Flush pending events and queue SYN_DROPPED event,
216 * but only if the queue is not empty.
218 spin_lock_irqsave(&client
->buffer_lock
, flags
);
220 if (client
->head
!= client
->tail
) {
221 client
->packet_head
= client
->head
= client
->tail
;
222 __evdev_queue_syn_dropped(client
);
225 spin_unlock_irqrestore(&client
->buffer_lock
, flags
);
231 static void __pass_event(struct evdev_client
*client
,
232 const struct input_event
*event
)
234 client
->buffer
[client
->head
++] = *event
;
235 client
->head
&= client
->bufsize
- 1;
237 if (unlikely(client
->head
== client
->tail
)) {
239 * This effectively "drops" all unconsumed events, leaving
240 * EV_SYN/SYN_DROPPED plus the newest event in the queue.
242 client
->tail
= (client
->head
- 2) & (client
->bufsize
- 1);
244 client
->buffer
[client
->tail
].input_event_sec
=
245 event
->input_event_sec
;
246 client
->buffer
[client
->tail
].input_event_usec
=
247 event
->input_event_usec
;
248 client
->buffer
[client
->tail
].type
= EV_SYN
;
249 client
->buffer
[client
->tail
].code
= SYN_DROPPED
;
250 client
->buffer
[client
->tail
].value
= 0;
252 client
->packet_head
= client
->tail
;
255 if (event
->type
== EV_SYN
&& event
->code
== SYN_REPORT
) {
256 client
->packet_head
= client
->head
;
257 kill_fasync(&client
->fasync
, SIGIO
, POLL_IN
);
261 static void evdev_pass_values(struct evdev_client
*client
,
262 const struct input_value
*vals
, unsigned int count
,
265 struct evdev
*evdev
= client
->evdev
;
266 const struct input_value
*v
;
267 struct input_event event
;
268 struct timespec64 ts
;
274 ts
= ktime_to_timespec64(ev_time
[client
->clk_type
]);
275 event
.input_event_sec
= ts
.tv_sec
;
276 event
.input_event_usec
= ts
.tv_nsec
/ NSEC_PER_USEC
;
278 /* Interrupts are disabled, just acquire the lock. */
279 spin_lock(&client
->buffer_lock
);
281 for (v
= vals
; v
!= vals
+ count
; v
++) {
282 if (__evdev_is_filtered(client
, v
->type
, v
->code
))
285 if (v
->type
== EV_SYN
&& v
->code
== SYN_REPORT
) {
286 /* drop empty SYN_REPORT */
287 if (client
->packet_head
== client
->head
)
293 event
.type
= v
->type
;
294 event
.code
= v
->code
;
295 event
.value
= v
->value
;
296 __pass_event(client
, &event
);
299 spin_unlock(&client
->buffer_lock
);
302 wake_up_interruptible(&evdev
->wait
);
306 * Pass incoming events to all connected clients.
308 static void evdev_events(struct input_handle
*handle
,
309 const struct input_value
*vals
, unsigned int count
)
311 struct evdev
*evdev
= handle
->private;
312 struct evdev_client
*client
;
313 ktime_t ev_time
[EV_CLK_MAX
];
315 ev_time
[EV_CLK_MONO
] = ktime_get();
316 ev_time
[EV_CLK_REAL
] = ktime_mono_to_real(ev_time
[EV_CLK_MONO
]);
317 ev_time
[EV_CLK_BOOT
] = ktime_mono_to_any(ev_time
[EV_CLK_MONO
],
322 client
= rcu_dereference(evdev
->grab
);
325 evdev_pass_values(client
, vals
, count
, ev_time
);
327 list_for_each_entry_rcu(client
, &evdev
->client_list
, node
)
328 evdev_pass_values(client
, vals
, count
, ev_time
);
334 * Pass incoming event to all connected clients.
336 static void evdev_event(struct input_handle
*handle
,
337 unsigned int type
, unsigned int code
, int value
)
339 struct input_value vals
[] = { { type
, code
, value
} };
341 evdev_events(handle
, vals
, 1);
344 static int evdev_fasync(int fd
, struct file
*file
, int on
)
346 struct evdev_client
*client
= file
->private_data
;
348 return fasync_helper(fd
, file
, on
, &client
->fasync
);
351 static int evdev_flush(struct file
*file
, fl_owner_t id
)
353 struct evdev_client
*client
= file
->private_data
;
354 struct evdev
*evdev
= client
->evdev
;
356 mutex_lock(&evdev
->mutex
);
358 if (evdev
->exist
&& !client
->revoked
)
359 input_flush_device(&evdev
->handle
, file
);
361 mutex_unlock(&evdev
->mutex
);
365 static void evdev_free(struct device
*dev
)
367 struct evdev
*evdev
= container_of(dev
, struct evdev
, dev
);
369 input_put_device(evdev
->handle
.dev
);
374 * Grabs an event device (along with underlying input device).
375 * This function is called with evdev->mutex taken.
377 static int evdev_grab(struct evdev
*evdev
, struct evdev_client
*client
)
384 error
= input_grab_device(&evdev
->handle
);
388 rcu_assign_pointer(evdev
->grab
, client
);
393 static int evdev_ungrab(struct evdev
*evdev
, struct evdev_client
*client
)
395 struct evdev_client
*grab
= rcu_dereference_protected(evdev
->grab
,
396 lockdep_is_held(&evdev
->mutex
));
401 rcu_assign_pointer(evdev
->grab
, NULL
);
403 input_release_device(&evdev
->handle
);
408 static void evdev_attach_client(struct evdev
*evdev
,
409 struct evdev_client
*client
)
411 spin_lock(&evdev
->client_lock
);
412 list_add_tail_rcu(&client
->node
, &evdev
->client_list
);
413 spin_unlock(&evdev
->client_lock
);
416 static void evdev_detach_client(struct evdev
*evdev
,
417 struct evdev_client
*client
)
419 spin_lock(&evdev
->client_lock
);
420 list_del_rcu(&client
->node
);
421 spin_unlock(&evdev
->client_lock
);
425 static int evdev_open_device(struct evdev
*evdev
)
429 retval
= mutex_lock_interruptible(&evdev
->mutex
);
435 else if (!evdev
->open
++) {
436 retval
= input_open_device(&evdev
->handle
);
441 mutex_unlock(&evdev
->mutex
);
445 static void evdev_close_device(struct evdev
*evdev
)
447 mutex_lock(&evdev
->mutex
);
449 if (evdev
->exist
&& !--evdev
->open
)
450 input_close_device(&evdev
->handle
);
452 mutex_unlock(&evdev
->mutex
);
456 * Wake up users waiting for IO so they can disconnect from
459 static void evdev_hangup(struct evdev
*evdev
)
461 struct evdev_client
*client
;
463 spin_lock(&evdev
->client_lock
);
464 list_for_each_entry(client
, &evdev
->client_list
, node
)
465 kill_fasync(&client
->fasync
, SIGIO
, POLL_HUP
);
466 spin_unlock(&evdev
->client_lock
);
468 wake_up_interruptible(&evdev
->wait
);
471 static int evdev_release(struct inode
*inode
, struct file
*file
)
473 struct evdev_client
*client
= file
->private_data
;
474 struct evdev
*evdev
= client
->evdev
;
477 mutex_lock(&evdev
->mutex
);
478 evdev_ungrab(evdev
, client
);
479 mutex_unlock(&evdev
->mutex
);
481 evdev_detach_client(evdev
, client
);
483 for (i
= 0; i
< EV_CNT
; ++i
)
484 bitmap_free(client
->evmasks
[i
]);
488 evdev_close_device(evdev
);
493 static unsigned int evdev_compute_buffer_size(struct input_dev
*dev
)
495 unsigned int n_events
=
496 max(dev
->hint_events_per_packet
* EVDEV_BUF_PACKETS
,
497 EVDEV_MIN_BUFFER_SIZE
);
499 return roundup_pow_of_two(n_events
);
502 static int evdev_open(struct inode
*inode
, struct file
*file
)
504 struct evdev
*evdev
= container_of(inode
->i_cdev
, struct evdev
, cdev
);
505 unsigned int bufsize
= evdev_compute_buffer_size(evdev
->handle
.dev
);
506 struct evdev_client
*client
;
509 client
= kzalloc(struct_size(client
, buffer
, bufsize
),
510 GFP_KERNEL
| __GFP_NOWARN
);
512 client
= vzalloc(struct_size(client
, buffer
, bufsize
));
516 client
->bufsize
= bufsize
;
517 spin_lock_init(&client
->buffer_lock
);
518 client
->evdev
= evdev
;
519 evdev_attach_client(evdev
, client
);
521 error
= evdev_open_device(evdev
);
523 goto err_free_client
;
525 file
->private_data
= client
;
526 stream_open(inode
, file
);
531 evdev_detach_client(evdev
, client
);
536 static ssize_t
evdev_write(struct file
*file
, const char __user
*buffer
,
537 size_t count
, loff_t
*ppos
)
539 struct evdev_client
*client
= file
->private_data
;
540 struct evdev
*evdev
= client
->evdev
;
541 struct input_event event
;
544 if (count
!= 0 && count
< input_event_size())
547 retval
= mutex_lock_interruptible(&evdev
->mutex
);
551 if (!evdev
->exist
|| client
->revoked
) {
556 while (retval
+ input_event_size() <= count
) {
558 if (input_event_from_user(buffer
+ retval
, &event
)) {
562 retval
+= input_event_size();
564 input_inject_event(&evdev
->handle
,
565 event
.type
, event
.code
, event
.value
);
570 mutex_unlock(&evdev
->mutex
);
574 static int evdev_fetch_next_event(struct evdev_client
*client
,
575 struct input_event
*event
)
579 spin_lock_irq(&client
->buffer_lock
);
581 have_event
= client
->packet_head
!= client
->tail
;
583 *event
= client
->buffer
[client
->tail
++];
584 client
->tail
&= client
->bufsize
- 1;
587 spin_unlock_irq(&client
->buffer_lock
);
592 static ssize_t
evdev_read(struct file
*file
, char __user
*buffer
,
593 size_t count
, loff_t
*ppos
)
595 struct evdev_client
*client
= file
->private_data
;
596 struct evdev
*evdev
= client
->evdev
;
597 struct input_event event
;
601 if (count
!= 0 && count
< input_event_size())
605 if (!evdev
->exist
|| client
->revoked
)
608 if (client
->packet_head
== client
->tail
&&
609 (file
->f_flags
& O_NONBLOCK
))
613 * count == 0 is special - no IO is done but we check
614 * for error conditions (see above).
619 while (read
+ input_event_size() <= count
&&
620 evdev_fetch_next_event(client
, &event
)) {
622 if (input_event_to_user(buffer
+ read
, &event
))
625 read
+= input_event_size();
631 if (!(file
->f_flags
& O_NONBLOCK
)) {
632 error
= wait_event_interruptible(evdev
->wait
,
633 client
->packet_head
!= client
->tail
||
634 !evdev
->exist
|| client
->revoked
);
643 /* No kernel lock - fine */
644 static __poll_t
evdev_poll(struct file
*file
, poll_table
*wait
)
646 struct evdev_client
*client
= file
->private_data
;
647 struct evdev
*evdev
= client
->evdev
;
650 poll_wait(file
, &evdev
->wait
, wait
);
652 if (evdev
->exist
&& !client
->revoked
)
653 mask
= EPOLLOUT
| EPOLLWRNORM
;
655 mask
= EPOLLHUP
| EPOLLERR
;
657 if (client
->packet_head
!= client
->tail
)
658 mask
|= EPOLLIN
| EPOLLRDNORM
;
665 #define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
666 #define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1)
669 static int bits_to_user(unsigned long *bits
, unsigned int maxbit
,
670 unsigned int maxlen
, void __user
*p
, int compat
)
675 len
= BITS_TO_LONGS_COMPAT(maxbit
) * sizeof(compat_long_t
);
679 for (i
= 0; i
< len
/ sizeof(compat_long_t
); i
++)
680 if (copy_to_user((compat_long_t __user
*) p
+ i
,
681 (compat_long_t
*) bits
+
682 i
+ 1 - ((i
% 2) << 1),
683 sizeof(compat_long_t
)))
686 len
= BITS_TO_LONGS(maxbit
) * sizeof(long);
690 if (copy_to_user(p
, bits
, len
))
697 static int bits_from_user(unsigned long *bits
, unsigned int maxbit
,
698 unsigned int maxlen
, const void __user
*p
, int compat
)
703 if (maxlen
% sizeof(compat_long_t
))
706 len
= BITS_TO_LONGS_COMPAT(maxbit
) * sizeof(compat_long_t
);
710 for (i
= 0; i
< len
/ sizeof(compat_long_t
); i
++)
711 if (copy_from_user((compat_long_t
*) bits
+
712 i
+ 1 - ((i
% 2) << 1),
713 (compat_long_t __user
*) p
+ i
,
714 sizeof(compat_long_t
)))
717 *((compat_long_t
*) bits
+ i
- 1) = 0;
720 if (maxlen
% sizeof(long))
723 len
= BITS_TO_LONGS(maxbit
) * sizeof(long);
727 if (copy_from_user(bits
, p
, len
))
736 static int bits_to_user(unsigned long *bits
, unsigned int maxbit
,
737 unsigned int maxlen
, void __user
*p
, int compat
)
740 BITS_TO_LONGS_COMPAT(maxbit
) * sizeof(compat_long_t
) :
741 BITS_TO_LONGS(maxbit
) * sizeof(long);
746 return copy_to_user(p
, bits
, len
) ? -EFAULT
: len
;
749 static int bits_from_user(unsigned long *bits
, unsigned int maxbit
,
750 unsigned int maxlen
, const void __user
*p
, int compat
)
752 size_t chunk_size
= compat
? sizeof(compat_long_t
) : sizeof(long);
755 if (maxlen
% chunk_size
)
758 len
= compat
? BITS_TO_LONGS_COMPAT(maxbit
) : BITS_TO_LONGS(maxbit
);
763 return copy_from_user(bits
, p
, len
) ? -EFAULT
: len
;
766 #endif /* __BIG_ENDIAN */
770 static int bits_to_user(unsigned long *bits
, unsigned int maxbit
,
771 unsigned int maxlen
, void __user
*p
, int compat
)
773 int len
= BITS_TO_LONGS(maxbit
) * sizeof(long);
778 return copy_to_user(p
, bits
, len
) ? -EFAULT
: len
;
781 static int bits_from_user(unsigned long *bits
, unsigned int maxbit
,
782 unsigned int maxlen
, const void __user
*p
, int compat
)
786 if (maxlen
% sizeof(long))
789 len
= BITS_TO_LONGS(maxbit
) * sizeof(long);
793 return copy_from_user(bits
, p
, len
) ? -EFAULT
: len
;
796 #endif /* CONFIG_COMPAT */
798 static int str_to_user(const char *str
, unsigned int maxlen
, void __user
*p
)
805 len
= strlen(str
) + 1;
809 return copy_to_user(p
, str
, len
) ? -EFAULT
: len
;
812 static int handle_eviocgbit(struct input_dev
*dev
,
813 unsigned int type
, unsigned int size
,
814 void __user
*p
, int compat_mode
)
821 case 0: bits
= dev
->evbit
; len
= EV_MAX
; break;
822 case EV_KEY
: bits
= dev
->keybit
; len
= KEY_MAX
; break;
823 case EV_REL
: bits
= dev
->relbit
; len
= REL_MAX
; break;
824 case EV_ABS
: bits
= dev
->absbit
; len
= ABS_MAX
; break;
825 case EV_MSC
: bits
= dev
->mscbit
; len
= MSC_MAX
; break;
826 case EV_LED
: bits
= dev
->ledbit
; len
= LED_MAX
; break;
827 case EV_SND
: bits
= dev
->sndbit
; len
= SND_MAX
; break;
828 case EV_FF
: bits
= dev
->ffbit
; len
= FF_MAX
; break;
829 case EV_SW
: bits
= dev
->swbit
; len
= SW_MAX
; break;
830 default: return -EINVAL
;
833 return bits_to_user(bits
, len
, size
, p
, compat_mode
);
836 static int evdev_handle_get_keycode(struct input_dev
*dev
, void __user
*p
)
838 struct input_keymap_entry ke
= {
839 .len
= sizeof(unsigned int),
842 int __user
*ip
= (int __user
*)p
;
846 if (copy_from_user(ke
.scancode
, p
, sizeof(unsigned int)))
849 error
= input_get_keycode(dev
, &ke
);
853 if (put_user(ke
.keycode
, ip
+ 1))
859 static int evdev_handle_get_keycode_v2(struct input_dev
*dev
, void __user
*p
)
861 struct input_keymap_entry ke
;
864 if (copy_from_user(&ke
, p
, sizeof(ke
)))
867 error
= input_get_keycode(dev
, &ke
);
871 if (copy_to_user(p
, &ke
, sizeof(ke
)))
877 static int evdev_handle_set_keycode(struct input_dev
*dev
, void __user
*p
)
879 struct input_keymap_entry ke
= {
880 .len
= sizeof(unsigned int),
883 int __user
*ip
= (int __user
*)p
;
885 if (copy_from_user(ke
.scancode
, p
, sizeof(unsigned int)))
888 if (get_user(ke
.keycode
, ip
+ 1))
891 return input_set_keycode(dev
, &ke
);
894 static int evdev_handle_set_keycode_v2(struct input_dev
*dev
, void __user
*p
)
896 struct input_keymap_entry ke
;
898 if (copy_from_user(&ke
, p
, sizeof(ke
)))
901 if (ke
.len
> sizeof(ke
.scancode
))
904 return input_set_keycode(dev
, &ke
);
908 * If we transfer state to the user, we should flush all pending events
909 * of the same type from the client's queue. Otherwise, they might end up
910 * with duplicate events, which can screw up client's state tracking.
911 * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED
912 * event so user-space will notice missing events.
915 * We need to take event_lock before buffer_lock to avoid dead-locks. But we
916 * need the even_lock only to guarantee consistent state. We can safely release
917 * it while flushing the queue. This allows input-core to handle filters while
918 * we flush the queue.
920 static int evdev_handle_get_val(struct evdev_client
*client
,
921 struct input_dev
*dev
, unsigned int type
,
922 unsigned long *bits
, unsigned int maxbit
,
923 unsigned int maxlen
, void __user
*p
,
929 mem
= bitmap_alloc(maxbit
, GFP_KERNEL
);
933 spin_lock_irq(&dev
->event_lock
);
934 spin_lock(&client
->buffer_lock
);
936 bitmap_copy(mem
, bits
, maxbit
);
938 spin_unlock(&dev
->event_lock
);
940 __evdev_flush_queue(client
, type
);
942 spin_unlock_irq(&client
->buffer_lock
);
944 ret
= bits_to_user(mem
, maxbit
, maxlen
, p
, compat
);
946 evdev_queue_syn_dropped(client
);
953 static int evdev_handle_mt_request(struct input_dev
*dev
,
957 const struct input_mt
*mt
= dev
->mt
;
962 if (get_user(code
, &ip
[0]))
964 if (!mt
|| !input_is_mt_value(code
))
967 max_slots
= (size
- sizeof(__u32
)) / sizeof(__s32
);
968 for (i
= 0; i
< mt
->num_slots
&& i
< max_slots
; i
++) {
969 int value
= input_mt_get_value(&mt
->slots
[i
], code
);
970 if (put_user(value
, &ip
[1 + i
]))
977 static int evdev_revoke(struct evdev
*evdev
, struct evdev_client
*client
,
980 client
->revoked
= true;
981 evdev_ungrab(evdev
, client
);
982 input_flush_device(&evdev
->handle
, file
);
983 wake_up_interruptible(&evdev
->wait
);
988 /* must be called with evdev-mutex held */
989 static int evdev_set_mask(struct evdev_client
*client
,
991 const void __user
*codes
,
995 unsigned long flags
, *mask
, *oldmask
;
999 /* we allow unknown types and 'codes_size > size' for forward-compat */
1000 cnt
= evdev_get_mask_cnt(type
);
1004 mask
= bitmap_zalloc(cnt
, GFP_KERNEL
);
1008 error
= bits_from_user(mask
, cnt
- 1, codes_size
, codes
, compat
);
1014 spin_lock_irqsave(&client
->buffer_lock
, flags
);
1015 oldmask
= client
->evmasks
[type
];
1016 client
->evmasks
[type
] = mask
;
1017 spin_unlock_irqrestore(&client
->buffer_lock
, flags
);
1019 bitmap_free(oldmask
);
1024 /* must be called with evdev-mutex held */
1025 static int evdev_get_mask(struct evdev_client
*client
,
1031 unsigned long *mask
;
1032 size_t cnt
, size
, xfer_size
;
1036 /* we allow unknown types and 'codes_size > size' for forward-compat */
1037 cnt
= evdev_get_mask_cnt(type
);
1038 size
= sizeof(unsigned long) * BITS_TO_LONGS(cnt
);
1039 xfer_size
= min_t(size_t, codes_size
, size
);
1042 mask
= client
->evmasks
[type
];
1044 error
= bits_to_user(mask
, cnt
- 1,
1045 xfer_size
, codes
, compat
);
1049 /* fake mask with all bits set */
1050 for (i
= 0; i
< xfer_size
; i
++)
1051 if (put_user(0xffU
, (u8 __user
*)codes
+ i
))
1056 if (xfer_size
< codes_size
)
1057 if (clear_user(codes
+ xfer_size
, codes_size
- xfer_size
))
1063 static long evdev_do_ioctl(struct file
*file
, unsigned int cmd
,
1064 void __user
*p
, int compat_mode
)
1066 struct evdev_client
*client
= file
->private_data
;
1067 struct evdev
*evdev
= client
->evdev
;
1068 struct input_dev
*dev
= evdev
->handle
.dev
;
1069 struct input_absinfo abs
;
1070 struct input_mask mask
;
1071 struct ff_effect effect
;
1072 int __user
*ip
= (int __user
*)p
;
1073 unsigned int i
, t
, u
, v
;
1077 /* First we check for fixed-length commands */
1081 return put_user(EV_VERSION
, ip
);
1084 if (copy_to_user(p
, &dev
->id
, sizeof(struct input_id
)))
1089 if (!test_bit(EV_REP
, dev
->evbit
))
1091 if (put_user(dev
->rep
[REP_DELAY
], ip
))
1093 if (put_user(dev
->rep
[REP_PERIOD
], ip
+ 1))
1098 if (!test_bit(EV_REP
, dev
->evbit
))
1100 if (get_user(u
, ip
))
1102 if (get_user(v
, ip
+ 1))
1105 input_inject_event(&evdev
->handle
, EV_REP
, REP_DELAY
, u
);
1106 input_inject_event(&evdev
->handle
, EV_REP
, REP_PERIOD
, v
);
1111 return input_ff_erase(dev
, (int)(unsigned long) p
, file
);
1114 i
= test_bit(EV_FF
, dev
->evbit
) ?
1115 dev
->ff
->max_effects
: 0;
1116 if (put_user(i
, ip
))
1122 return evdev_grab(evdev
, client
);
1124 return evdev_ungrab(evdev
, client
);
1130 return evdev_revoke(evdev
, client
, file
);
1133 void __user
*codes_ptr
;
1135 if (copy_from_user(&mask
, p
, sizeof(mask
)))
1138 codes_ptr
= (void __user
*)(unsigned long)mask
.codes_ptr
;
1139 return evdev_get_mask(client
,
1140 mask
.type
, codes_ptr
, mask
.codes_size
,
1145 const void __user
*codes_ptr
;
1147 if (copy_from_user(&mask
, p
, sizeof(mask
)))
1150 codes_ptr
= (const void __user
*)(unsigned long)mask
.codes_ptr
;
1151 return evdev_set_mask(client
,
1152 mask
.type
, codes_ptr
, mask
.codes_size
,
1157 if (copy_from_user(&i
, p
, sizeof(unsigned int)))
1160 return evdev_set_clk_type(client
, i
);
1163 return evdev_handle_get_keycode(dev
, p
);
1166 return evdev_handle_set_keycode(dev
, p
);
1168 case EVIOCGKEYCODE_V2
:
1169 return evdev_handle_get_keycode_v2(dev
, p
);
1171 case EVIOCSKEYCODE_V2
:
1172 return evdev_handle_set_keycode_v2(dev
, p
);
1175 size
= _IOC_SIZE(cmd
);
1177 /* Now check variable-length commands */
1178 #define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
1179 switch (EVIOC_MASK_SIZE(cmd
)) {
1182 return bits_to_user(dev
->propbit
, INPUT_PROP_MAX
,
1183 size
, p
, compat_mode
);
1185 case EVIOCGMTSLOTS(0):
1186 return evdev_handle_mt_request(dev
, size
, ip
);
1189 return evdev_handle_get_val(client
, dev
, EV_KEY
, dev
->key
,
1190 KEY_MAX
, size
, p
, compat_mode
);
1193 return evdev_handle_get_val(client
, dev
, EV_LED
, dev
->led
,
1194 LED_MAX
, size
, p
, compat_mode
);
1197 return evdev_handle_get_val(client
, dev
, EV_SND
, dev
->snd
,
1198 SND_MAX
, size
, p
, compat_mode
);
1201 return evdev_handle_get_val(client
, dev
, EV_SW
, dev
->sw
,
1202 SW_MAX
, size
, p
, compat_mode
);
1205 return str_to_user(dev
->name
, size
, p
);
1208 return str_to_user(dev
->phys
, size
, p
);
1211 return str_to_user(dev
->uniq
, size
, p
);
1213 case EVIOC_MASK_SIZE(EVIOCSFF
):
1214 if (input_ff_effect_from_user(p
, size
, &effect
))
1217 error
= input_ff_upload(dev
, &effect
, file
);
1221 if (put_user(effect
.id
, &(((struct ff_effect __user
*)p
)->id
)))
1227 /* Multi-number variable-length handlers */
1228 if (_IOC_TYPE(cmd
) != 'E')
1231 if (_IOC_DIR(cmd
) == _IOC_READ
) {
1233 if ((_IOC_NR(cmd
) & ~EV_MAX
) == _IOC_NR(EVIOCGBIT(0, 0)))
1234 return handle_eviocgbit(dev
,
1235 _IOC_NR(cmd
) & EV_MAX
, size
,
1238 if ((_IOC_NR(cmd
) & ~ABS_MAX
) == _IOC_NR(EVIOCGABS(0))) {
1243 t
= _IOC_NR(cmd
) & ABS_MAX
;
1244 abs
= dev
->absinfo
[t
];
1246 if (copy_to_user(p
, &abs
, min_t(size_t,
1247 size
, sizeof(struct input_absinfo
))))
1254 if (_IOC_DIR(cmd
) == _IOC_WRITE
) {
1256 if ((_IOC_NR(cmd
) & ~ABS_MAX
) == _IOC_NR(EVIOCSABS(0))) {
1261 t
= _IOC_NR(cmd
) & ABS_MAX
;
1263 if (copy_from_user(&abs
, p
, min_t(size_t,
1264 size
, sizeof(struct input_absinfo
))))
1267 if (size
< sizeof(struct input_absinfo
))
1270 /* We can't change number of reserved MT slots */
1271 if (t
== ABS_MT_SLOT
)
1275 * Take event lock to ensure that we are not
1276 * changing device parameters in the middle
1279 spin_lock_irq(&dev
->event_lock
);
1280 dev
->absinfo
[t
] = abs
;
1281 spin_unlock_irq(&dev
->event_lock
);
1290 static long evdev_ioctl_handler(struct file
*file
, unsigned int cmd
,
1291 void __user
*p
, int compat_mode
)
1293 struct evdev_client
*client
= file
->private_data
;
1294 struct evdev
*evdev
= client
->evdev
;
1297 retval
= mutex_lock_interruptible(&evdev
->mutex
);
1301 if (!evdev
->exist
|| client
->revoked
) {
1306 retval
= evdev_do_ioctl(file
, cmd
, p
, compat_mode
);
1309 mutex_unlock(&evdev
->mutex
);
1313 static long evdev_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1315 return evdev_ioctl_handler(file
, cmd
, (void __user
*)arg
, 0);
1318 #ifdef CONFIG_COMPAT
1319 static long evdev_ioctl_compat(struct file
*file
,
1320 unsigned int cmd
, unsigned long arg
)
1322 return evdev_ioctl_handler(file
, cmd
, compat_ptr(arg
), 1);
1326 static const struct file_operations evdev_fops
= {
1327 .owner
= THIS_MODULE
,
1329 .write
= evdev_write
,
1332 .release
= evdev_release
,
1333 .unlocked_ioctl
= evdev_ioctl
,
1334 #ifdef CONFIG_COMPAT
1335 .compat_ioctl
= evdev_ioctl_compat
,
1337 .fasync
= evdev_fasync
,
1338 .flush
= evdev_flush
,
1339 .llseek
= no_llseek
,
1343 * Mark device non-existent. This disables writes, ioctls and
1344 * prevents new users from opening the device. Already posted
1345 * blocking reads will stay, however new ones will fail.
1347 static void evdev_mark_dead(struct evdev
*evdev
)
1349 mutex_lock(&evdev
->mutex
);
1350 evdev
->exist
= false;
1351 mutex_unlock(&evdev
->mutex
);
1354 static void evdev_cleanup(struct evdev
*evdev
)
1356 struct input_handle
*handle
= &evdev
->handle
;
1358 evdev_mark_dead(evdev
);
1359 evdev_hangup(evdev
);
1361 /* evdev is marked dead so no one else accesses evdev->open */
1363 input_flush_device(handle
, NULL
);
1364 input_close_device(handle
);
1369 * Create new evdev device. Note that input core serializes calls
1370 * to connect and disconnect.
1372 static int evdev_connect(struct input_handler
*handler
, struct input_dev
*dev
,
1373 const struct input_device_id
*id
)
1375 struct evdev
*evdev
;
1380 minor
= input_get_new_minor(EVDEV_MINOR_BASE
, EVDEV_MINORS
, true);
1383 pr_err("failed to reserve new minor: %d\n", error
);
1387 evdev
= kzalloc(sizeof(struct evdev
), GFP_KERNEL
);
1390 goto err_free_minor
;
1393 INIT_LIST_HEAD(&evdev
->client_list
);
1394 spin_lock_init(&evdev
->client_lock
);
1395 mutex_init(&evdev
->mutex
);
1396 init_waitqueue_head(&evdev
->wait
);
1397 evdev
->exist
= true;
1400 /* Normalize device number if it falls into legacy range */
1401 if (dev_no
< EVDEV_MINOR_BASE
+ EVDEV_MINORS
)
1402 dev_no
-= EVDEV_MINOR_BASE
;
1403 dev_set_name(&evdev
->dev
, "event%d", dev_no
);
1405 evdev
->handle
.dev
= input_get_device(dev
);
1406 evdev
->handle
.name
= dev_name(&evdev
->dev
);
1407 evdev
->handle
.handler
= handler
;
1408 evdev
->handle
.private = evdev
;
1410 evdev
->dev
.devt
= MKDEV(INPUT_MAJOR
, minor
);
1411 evdev
->dev
.class = &input_class
;
1412 evdev
->dev
.parent
= &dev
->dev
;
1413 evdev
->dev
.release
= evdev_free
;
1414 device_initialize(&evdev
->dev
);
1416 error
= input_register_handle(&evdev
->handle
);
1418 goto err_free_evdev
;
1420 cdev_init(&evdev
->cdev
, &evdev_fops
);
1422 error
= cdev_device_add(&evdev
->cdev
, &evdev
->dev
);
1424 goto err_cleanup_evdev
;
1429 evdev_cleanup(evdev
);
1430 input_unregister_handle(&evdev
->handle
);
1432 put_device(&evdev
->dev
);
1434 input_free_minor(minor
);
1438 static void evdev_disconnect(struct input_handle
*handle
)
1440 struct evdev
*evdev
= handle
->private;
1442 cdev_device_del(&evdev
->cdev
, &evdev
->dev
);
1443 evdev_cleanup(evdev
);
1444 input_free_minor(MINOR(evdev
->dev
.devt
));
1445 input_unregister_handle(handle
);
1446 put_device(&evdev
->dev
);
1449 static const struct input_device_id evdev_ids
[] = {
1450 { .driver_info
= 1 }, /* Matches all devices */
1451 { }, /* Terminating zero entry */
1454 MODULE_DEVICE_TABLE(input
, evdev_ids
);
1456 static struct input_handler evdev_handler
= {
1457 .event
= evdev_event
,
1458 .events
= evdev_events
,
1459 .connect
= evdev_connect
,
1460 .disconnect
= evdev_disconnect
,
1461 .legacy_minors
= true,
1462 .minor
= EVDEV_MINOR_BASE
,
1464 .id_table
= evdev_ids
,
1467 static int __init
evdev_init(void)
1469 return input_register_handler(&evdev_handler
);
1472 static void __exit
evdev_exit(void)
1474 input_unregister_handler(&evdev_handler
);
1477 module_init(evdev_init
);
1478 module_exit(evdev_exit
);
1480 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
1481 MODULE_DESCRIPTION("Input driver event char devices");
1482 MODULE_LICENSE("GPL");