]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/input/input.c
Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[mirror_ubuntu-artful-kernel.git] / drivers / input / input.c
1 /*
2 * The input core
3 *
4 * Copyright (c) 1999-2002 Vojtech Pavlik
5 */
6
7 /*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
14
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/idr.h>
18 #include <linux/input/mt.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/random.h>
22 #include <linux/major.h>
23 #include <linux/proc_fs.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
26 #include <linux/poll.h>
27 #include <linux/device.h>
28 #include <linux/mutex.h>
29 #include <linux/rcupdate.h>
30 #include "input-compat.h"
31
32 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
33 MODULE_DESCRIPTION("Input core");
34 MODULE_LICENSE("GPL");
35
36 #define INPUT_MAX_CHAR_DEVICES 1024
37 #define INPUT_FIRST_DYNAMIC_DEV 256
38 static DEFINE_IDA(input_ida);
39
40 static LIST_HEAD(input_dev_list);
41 static LIST_HEAD(input_handler_list);
42
43 /*
44 * input_mutex protects access to both input_dev_list and input_handler_list.
45 * This also causes input_[un]register_device and input_[un]register_handler
46 * be mutually exclusive which simplifies locking in drivers implementing
47 * input handlers.
48 */
49 static DEFINE_MUTEX(input_mutex);
50
51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
52
53 static inline int is_event_supported(unsigned int code,
54 unsigned long *bm, unsigned int max)
55 {
56 return code <= max && test_bit(code, bm);
57 }
58
59 static int input_defuzz_abs_event(int value, int old_val, int fuzz)
60 {
61 if (fuzz) {
62 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
63 return old_val;
64
65 if (value > old_val - fuzz && value < old_val + fuzz)
66 return (old_val * 3 + value) / 4;
67
68 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
69 return (old_val + value) / 2;
70 }
71
72 return value;
73 }
74
75 static void input_start_autorepeat(struct input_dev *dev, int code)
76 {
77 if (test_bit(EV_REP, dev->evbit) &&
78 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
79 dev->timer.data) {
80 dev->repeat_key = code;
81 mod_timer(&dev->timer,
82 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
83 }
84 }
85
86 static void input_stop_autorepeat(struct input_dev *dev)
87 {
88 del_timer(&dev->timer);
89 }
90
91 /*
92 * Pass event first through all filters and then, if event has not been
93 * filtered out, through all open handles. This function is called with
94 * dev->event_lock held and interrupts disabled.
95 */
96 static unsigned int input_to_handler(struct input_handle *handle,
97 struct input_value *vals, unsigned int count)
98 {
99 struct input_handler *handler = handle->handler;
100 struct input_value *end = vals;
101 struct input_value *v;
102
103 if (handler->filter) {
104 for (v = vals; v != vals + count; v++) {
105 if (handler->filter(handle, v->type, v->code, v->value))
106 continue;
107 if (end != v)
108 *end = *v;
109 end++;
110 }
111 count = end - vals;
112 }
113
114 if (!count)
115 return 0;
116
117 if (handler->events)
118 handler->events(handle, vals, count);
119 else if (handler->event)
120 for (v = vals; v != vals + count; v++)
121 handler->event(handle, v->type, v->code, v->value);
122
123 return count;
124 }
125
126 /*
127 * Pass values first through all filters and then, if event has not been
128 * filtered out, through all open handles. This function is called with
129 * dev->event_lock held and interrupts disabled.
130 */
131 static void input_pass_values(struct input_dev *dev,
132 struct input_value *vals, unsigned int count)
133 {
134 struct input_handle *handle;
135 struct input_value *v;
136
137 if (!count)
138 return;
139
140 rcu_read_lock();
141
142 handle = rcu_dereference(dev->grab);
143 if (handle) {
144 count = input_to_handler(handle, vals, count);
145 } else {
146 list_for_each_entry_rcu(handle, &dev->h_list, d_node)
147 if (handle->open) {
148 count = input_to_handler(handle, vals, count);
149 if (!count)
150 break;
151 }
152 }
153
154 rcu_read_unlock();
155
156 /* trigger auto repeat for key events */
157 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
158 for (v = vals; v != vals + count; v++) {
159 if (v->type == EV_KEY && v->value != 2) {
160 if (v->value)
161 input_start_autorepeat(dev, v->code);
162 else
163 input_stop_autorepeat(dev);
164 }
165 }
166 }
167 }
168
169 static void input_pass_event(struct input_dev *dev,
170 unsigned int type, unsigned int code, int value)
171 {
172 struct input_value vals[] = { { type, code, value } };
173
174 input_pass_values(dev, vals, ARRAY_SIZE(vals));
175 }
176
177 /*
178 * Generate software autorepeat event. Note that we take
179 * dev->event_lock here to avoid racing with input_event
180 * which may cause keys get "stuck".
181 */
182 static void input_repeat_key(unsigned long data)
183 {
184 struct input_dev *dev = (void *) data;
185 unsigned long flags;
186
187 spin_lock_irqsave(&dev->event_lock, flags);
188
189 if (test_bit(dev->repeat_key, dev->key) &&
190 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
191 struct input_value vals[] = {
192 { EV_KEY, dev->repeat_key, 2 },
193 input_value_sync
194 };
195
196 input_pass_values(dev, vals, ARRAY_SIZE(vals));
197
198 if (dev->rep[REP_PERIOD])
199 mod_timer(&dev->timer, jiffies +
200 msecs_to_jiffies(dev->rep[REP_PERIOD]));
201 }
202
203 spin_unlock_irqrestore(&dev->event_lock, flags);
204 }
205
206 #define INPUT_IGNORE_EVENT 0
207 #define INPUT_PASS_TO_HANDLERS 1
208 #define INPUT_PASS_TO_DEVICE 2
209 #define INPUT_SLOT 4
210 #define INPUT_FLUSH 8
211 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
212
213 static int input_handle_abs_event(struct input_dev *dev,
214 unsigned int code, int *pval)
215 {
216 struct input_mt *mt = dev->mt;
217 bool is_mt_event;
218 int *pold;
219
220 if (code == ABS_MT_SLOT) {
221 /*
222 * "Stage" the event; we'll flush it later, when we
223 * get actual touch data.
224 */
225 if (mt && *pval >= 0 && *pval < mt->num_slots)
226 mt->slot = *pval;
227
228 return INPUT_IGNORE_EVENT;
229 }
230
231 is_mt_event = input_is_mt_value(code);
232
233 if (!is_mt_event) {
234 pold = &dev->absinfo[code].value;
235 } else if (mt) {
236 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
237 } else {
238 /*
239 * Bypass filtering for multi-touch events when
240 * not employing slots.
241 */
242 pold = NULL;
243 }
244
245 if (pold) {
246 *pval = input_defuzz_abs_event(*pval, *pold,
247 dev->absinfo[code].fuzz);
248 if (*pold == *pval)
249 return INPUT_IGNORE_EVENT;
250
251 *pold = *pval;
252 }
253
254 /* Flush pending "slot" event */
255 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
256 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
257 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
258 }
259
260 return INPUT_PASS_TO_HANDLERS;
261 }
262
263 static int input_get_disposition(struct input_dev *dev,
264 unsigned int type, unsigned int code, int *pval)
265 {
266 int disposition = INPUT_IGNORE_EVENT;
267 int value = *pval;
268
269 switch (type) {
270
271 case EV_SYN:
272 switch (code) {
273 case SYN_CONFIG:
274 disposition = INPUT_PASS_TO_ALL;
275 break;
276
277 case SYN_REPORT:
278 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
279 break;
280 case SYN_MT_REPORT:
281 disposition = INPUT_PASS_TO_HANDLERS;
282 break;
283 }
284 break;
285
286 case EV_KEY:
287 if (is_event_supported(code, dev->keybit, KEY_MAX)) {
288
289 /* auto-repeat bypasses state updates */
290 if (value == 2) {
291 disposition = INPUT_PASS_TO_HANDLERS;
292 break;
293 }
294
295 if (!!test_bit(code, dev->key) != !!value) {
296
297 __change_bit(code, dev->key);
298 disposition = INPUT_PASS_TO_HANDLERS;
299 }
300 }
301 break;
302
303 case EV_SW:
304 if (is_event_supported(code, dev->swbit, SW_MAX) &&
305 !!test_bit(code, dev->sw) != !!value) {
306
307 __change_bit(code, dev->sw);
308 disposition = INPUT_PASS_TO_HANDLERS;
309 }
310 break;
311
312 case EV_ABS:
313 if (is_event_supported(code, dev->absbit, ABS_MAX))
314 disposition = input_handle_abs_event(dev, code, &value);
315
316 break;
317
318 case EV_REL:
319 if (is_event_supported(code, dev->relbit, REL_MAX) && value)
320 disposition = INPUT_PASS_TO_HANDLERS;
321
322 break;
323
324 case EV_MSC:
325 if (is_event_supported(code, dev->mscbit, MSC_MAX))
326 disposition = INPUT_PASS_TO_ALL;
327
328 break;
329
330 case EV_LED:
331 if (is_event_supported(code, dev->ledbit, LED_MAX) &&
332 !!test_bit(code, dev->led) != !!value) {
333
334 __change_bit(code, dev->led);
335 disposition = INPUT_PASS_TO_ALL;
336 }
337 break;
338
339 case EV_SND:
340 if (is_event_supported(code, dev->sndbit, SND_MAX)) {
341
342 if (!!test_bit(code, dev->snd) != !!value)
343 __change_bit(code, dev->snd);
344 disposition = INPUT_PASS_TO_ALL;
345 }
346 break;
347
348 case EV_REP:
349 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
350 dev->rep[code] = value;
351 disposition = INPUT_PASS_TO_ALL;
352 }
353 break;
354
355 case EV_FF:
356 if (value >= 0)
357 disposition = INPUT_PASS_TO_ALL;
358 break;
359
360 case EV_PWR:
361 disposition = INPUT_PASS_TO_ALL;
362 break;
363 }
364
365 *pval = value;
366 return disposition;
367 }
368
369 static void input_handle_event(struct input_dev *dev,
370 unsigned int type, unsigned int code, int value)
371 {
372 int disposition = input_get_disposition(dev, type, code, &value);
373
374 if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
375 add_input_randomness(type, code, value);
376
377 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
378 dev->event(dev, type, code, value);
379
380 if (!dev->vals)
381 return;
382
383 if (disposition & INPUT_PASS_TO_HANDLERS) {
384 struct input_value *v;
385
386 if (disposition & INPUT_SLOT) {
387 v = &dev->vals[dev->num_vals++];
388 v->type = EV_ABS;
389 v->code = ABS_MT_SLOT;
390 v->value = dev->mt->slot;
391 }
392
393 v = &dev->vals[dev->num_vals++];
394 v->type = type;
395 v->code = code;
396 v->value = value;
397 }
398
399 if (disposition & INPUT_FLUSH) {
400 if (dev->num_vals >= 2)
401 input_pass_values(dev, dev->vals, dev->num_vals);
402 dev->num_vals = 0;
403 } else if (dev->num_vals >= dev->max_vals - 2) {
404 dev->vals[dev->num_vals++] = input_value_sync;
405 input_pass_values(dev, dev->vals, dev->num_vals);
406 dev->num_vals = 0;
407 }
408
409 }
410
411 /**
412 * input_event() - report new input event
413 * @dev: device that generated the event
414 * @type: type of the event
415 * @code: event code
416 * @value: value of the event
417 *
418 * This function should be used by drivers implementing various input
419 * devices to report input events. See also input_inject_event().
420 *
421 * NOTE: input_event() may be safely used right after input device was
422 * allocated with input_allocate_device(), even before it is registered
423 * with input_register_device(), but the event will not reach any of the
424 * input handlers. Such early invocation of input_event() may be used
425 * to 'seed' initial state of a switch or initial position of absolute
426 * axis, etc.
427 */
428 void input_event(struct input_dev *dev,
429 unsigned int type, unsigned int code, int value)
430 {
431 unsigned long flags;
432
433 if (is_event_supported(type, dev->evbit, EV_MAX)) {
434
435 spin_lock_irqsave(&dev->event_lock, flags);
436 input_handle_event(dev, type, code, value);
437 spin_unlock_irqrestore(&dev->event_lock, flags);
438 }
439 }
440 EXPORT_SYMBOL(input_event);
441
442 /**
443 * input_inject_event() - send input event from input handler
444 * @handle: input handle to send event through
445 * @type: type of the event
446 * @code: event code
447 * @value: value of the event
448 *
449 * Similar to input_event() but will ignore event if device is
450 * "grabbed" and handle injecting event is not the one that owns
451 * the device.
452 */
453 void input_inject_event(struct input_handle *handle,
454 unsigned int type, unsigned int code, int value)
455 {
456 struct input_dev *dev = handle->dev;
457 struct input_handle *grab;
458 unsigned long flags;
459
460 if (is_event_supported(type, dev->evbit, EV_MAX)) {
461 spin_lock_irqsave(&dev->event_lock, flags);
462
463 rcu_read_lock();
464 grab = rcu_dereference(dev->grab);
465 if (!grab || grab == handle)
466 input_handle_event(dev, type, code, value);
467 rcu_read_unlock();
468
469 spin_unlock_irqrestore(&dev->event_lock, flags);
470 }
471 }
472 EXPORT_SYMBOL(input_inject_event);
473
474 /**
475 * input_alloc_absinfo - allocates array of input_absinfo structs
476 * @dev: the input device emitting absolute events
477 *
478 * If the absinfo struct the caller asked for is already allocated, this
479 * functions will not do anything.
480 */
481 void input_alloc_absinfo(struct input_dev *dev)
482 {
483 if (!dev->absinfo)
484 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
485 GFP_KERNEL);
486
487 WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
488 }
489 EXPORT_SYMBOL(input_alloc_absinfo);
490
491 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
492 int min, int max, int fuzz, int flat)
493 {
494 struct input_absinfo *absinfo;
495
496 input_alloc_absinfo(dev);
497 if (!dev->absinfo)
498 return;
499
500 absinfo = &dev->absinfo[axis];
501 absinfo->minimum = min;
502 absinfo->maximum = max;
503 absinfo->fuzz = fuzz;
504 absinfo->flat = flat;
505
506 __set_bit(EV_ABS, dev->evbit);
507 __set_bit(axis, dev->absbit);
508 }
509 EXPORT_SYMBOL(input_set_abs_params);
510
511
512 /**
513 * input_grab_device - grabs device for exclusive use
514 * @handle: input handle that wants to own the device
515 *
516 * When a device is grabbed by an input handle all events generated by
517 * the device are delivered only to this handle. Also events injected
518 * by other input handles are ignored while device is grabbed.
519 */
520 int input_grab_device(struct input_handle *handle)
521 {
522 struct input_dev *dev = handle->dev;
523 int retval;
524
525 retval = mutex_lock_interruptible(&dev->mutex);
526 if (retval)
527 return retval;
528
529 if (dev->grab) {
530 retval = -EBUSY;
531 goto out;
532 }
533
534 rcu_assign_pointer(dev->grab, handle);
535
536 out:
537 mutex_unlock(&dev->mutex);
538 return retval;
539 }
540 EXPORT_SYMBOL(input_grab_device);
541
542 static void __input_release_device(struct input_handle *handle)
543 {
544 struct input_dev *dev = handle->dev;
545 struct input_handle *grabber;
546
547 grabber = rcu_dereference_protected(dev->grab,
548 lockdep_is_held(&dev->mutex));
549 if (grabber == handle) {
550 rcu_assign_pointer(dev->grab, NULL);
551 /* Make sure input_pass_event() notices that grab is gone */
552 synchronize_rcu();
553
554 list_for_each_entry(handle, &dev->h_list, d_node)
555 if (handle->open && handle->handler->start)
556 handle->handler->start(handle);
557 }
558 }
559
560 /**
561 * input_release_device - release previously grabbed device
562 * @handle: input handle that owns the device
563 *
564 * Releases previously grabbed device so that other input handles can
565 * start receiving input events. Upon release all handlers attached
566 * to the device have their start() method called so they have a change
567 * to synchronize device state with the rest of the system.
568 */
569 void input_release_device(struct input_handle *handle)
570 {
571 struct input_dev *dev = handle->dev;
572
573 mutex_lock(&dev->mutex);
574 __input_release_device(handle);
575 mutex_unlock(&dev->mutex);
576 }
577 EXPORT_SYMBOL(input_release_device);
578
579 /**
580 * input_open_device - open input device
581 * @handle: handle through which device is being accessed
582 *
583 * This function should be called by input handlers when they
584 * want to start receive events from given input device.
585 */
586 int input_open_device(struct input_handle *handle)
587 {
588 struct input_dev *dev = handle->dev;
589 int retval;
590
591 retval = mutex_lock_interruptible(&dev->mutex);
592 if (retval)
593 return retval;
594
595 if (dev->going_away) {
596 retval = -ENODEV;
597 goto out;
598 }
599
600 handle->open++;
601
602 if (!dev->users++ && dev->open)
603 retval = dev->open(dev);
604
605 if (retval) {
606 dev->users--;
607 if (!--handle->open) {
608 /*
609 * Make sure we are not delivering any more events
610 * through this handle
611 */
612 synchronize_rcu();
613 }
614 }
615
616 out:
617 mutex_unlock(&dev->mutex);
618 return retval;
619 }
620 EXPORT_SYMBOL(input_open_device);
621
622 int input_flush_device(struct input_handle *handle, struct file *file)
623 {
624 struct input_dev *dev = handle->dev;
625 int retval;
626
627 retval = mutex_lock_interruptible(&dev->mutex);
628 if (retval)
629 return retval;
630
631 if (dev->flush)
632 retval = dev->flush(dev, file);
633
634 mutex_unlock(&dev->mutex);
635 return retval;
636 }
637 EXPORT_SYMBOL(input_flush_device);
638
639 /**
640 * input_close_device - close input device
641 * @handle: handle through which device is being accessed
642 *
643 * This function should be called by input handlers when they
644 * want to stop receive events from given input device.
645 */
646 void input_close_device(struct input_handle *handle)
647 {
648 struct input_dev *dev = handle->dev;
649
650 mutex_lock(&dev->mutex);
651
652 __input_release_device(handle);
653
654 if (!--dev->users && dev->close)
655 dev->close(dev);
656
657 if (!--handle->open) {
658 /*
659 * synchronize_rcu() makes sure that input_pass_event()
660 * completed and that no more input events are delivered
661 * through this handle
662 */
663 synchronize_rcu();
664 }
665
666 mutex_unlock(&dev->mutex);
667 }
668 EXPORT_SYMBOL(input_close_device);
669
670 /*
671 * Simulate keyup events for all keys that are marked as pressed.
672 * The function must be called with dev->event_lock held.
673 */
674 static void input_dev_release_keys(struct input_dev *dev)
675 {
676 bool need_sync = false;
677 int code;
678
679 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
680 for_each_set_bit(code, dev->key, KEY_CNT) {
681 input_pass_event(dev, EV_KEY, code, 0);
682 need_sync = true;
683 }
684
685 if (need_sync)
686 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
687
688 memset(dev->key, 0, sizeof(dev->key));
689 }
690 }
691
692 /*
693 * Prepare device for unregistering
694 */
695 static void input_disconnect_device(struct input_dev *dev)
696 {
697 struct input_handle *handle;
698
699 /*
700 * Mark device as going away. Note that we take dev->mutex here
701 * not to protect access to dev->going_away but rather to ensure
702 * that there are no threads in the middle of input_open_device()
703 */
704 mutex_lock(&dev->mutex);
705 dev->going_away = true;
706 mutex_unlock(&dev->mutex);
707
708 spin_lock_irq(&dev->event_lock);
709
710 /*
711 * Simulate keyup events for all pressed keys so that handlers
712 * are not left with "stuck" keys. The driver may continue
713 * generate events even after we done here but they will not
714 * reach any handlers.
715 */
716 input_dev_release_keys(dev);
717
718 list_for_each_entry(handle, &dev->h_list, d_node)
719 handle->open = 0;
720
721 spin_unlock_irq(&dev->event_lock);
722 }
723
724 /**
725 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
726 * @ke: keymap entry containing scancode to be converted.
727 * @scancode: pointer to the location where converted scancode should
728 * be stored.
729 *
730 * This function is used to convert scancode stored in &struct keymap_entry
731 * into scalar form understood by legacy keymap handling methods. These
732 * methods expect scancodes to be represented as 'unsigned int'.
733 */
734 int input_scancode_to_scalar(const struct input_keymap_entry *ke,
735 unsigned int *scancode)
736 {
737 switch (ke->len) {
738 case 1:
739 *scancode = *((u8 *)ke->scancode);
740 break;
741
742 case 2:
743 *scancode = *((u16 *)ke->scancode);
744 break;
745
746 case 4:
747 *scancode = *((u32 *)ke->scancode);
748 break;
749
750 default:
751 return -EINVAL;
752 }
753
754 return 0;
755 }
756 EXPORT_SYMBOL(input_scancode_to_scalar);
757
758 /*
759 * Those routines handle the default case where no [gs]etkeycode() is
760 * defined. In this case, an array indexed by the scancode is used.
761 */
762
763 static unsigned int input_fetch_keycode(struct input_dev *dev,
764 unsigned int index)
765 {
766 switch (dev->keycodesize) {
767 case 1:
768 return ((u8 *)dev->keycode)[index];
769
770 case 2:
771 return ((u16 *)dev->keycode)[index];
772
773 default:
774 return ((u32 *)dev->keycode)[index];
775 }
776 }
777
778 static int input_default_getkeycode(struct input_dev *dev,
779 struct input_keymap_entry *ke)
780 {
781 unsigned int index;
782 int error;
783
784 if (!dev->keycodesize)
785 return -EINVAL;
786
787 if (ke->flags & INPUT_KEYMAP_BY_INDEX)
788 index = ke->index;
789 else {
790 error = input_scancode_to_scalar(ke, &index);
791 if (error)
792 return error;
793 }
794
795 if (index >= dev->keycodemax)
796 return -EINVAL;
797
798 ke->keycode = input_fetch_keycode(dev, index);
799 ke->index = index;
800 ke->len = sizeof(index);
801 memcpy(ke->scancode, &index, sizeof(index));
802
803 return 0;
804 }
805
806 static int input_default_setkeycode(struct input_dev *dev,
807 const struct input_keymap_entry *ke,
808 unsigned int *old_keycode)
809 {
810 unsigned int index;
811 int error;
812 int i;
813
814 if (!dev->keycodesize)
815 return -EINVAL;
816
817 if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
818 index = ke->index;
819 } else {
820 error = input_scancode_to_scalar(ke, &index);
821 if (error)
822 return error;
823 }
824
825 if (index >= dev->keycodemax)
826 return -EINVAL;
827
828 if (dev->keycodesize < sizeof(ke->keycode) &&
829 (ke->keycode >> (dev->keycodesize * 8)))
830 return -EINVAL;
831
832 switch (dev->keycodesize) {
833 case 1: {
834 u8 *k = (u8 *)dev->keycode;
835 *old_keycode = k[index];
836 k[index] = ke->keycode;
837 break;
838 }
839 case 2: {
840 u16 *k = (u16 *)dev->keycode;
841 *old_keycode = k[index];
842 k[index] = ke->keycode;
843 break;
844 }
845 default: {
846 u32 *k = (u32 *)dev->keycode;
847 *old_keycode = k[index];
848 k[index] = ke->keycode;
849 break;
850 }
851 }
852
853 __clear_bit(*old_keycode, dev->keybit);
854 __set_bit(ke->keycode, dev->keybit);
855
856 for (i = 0; i < dev->keycodemax; i++) {
857 if (input_fetch_keycode(dev, i) == *old_keycode) {
858 __set_bit(*old_keycode, dev->keybit);
859 break; /* Setting the bit twice is useless, so break */
860 }
861 }
862
863 return 0;
864 }
865
866 /**
867 * input_get_keycode - retrieve keycode currently mapped to a given scancode
868 * @dev: input device which keymap is being queried
869 * @ke: keymap entry
870 *
871 * This function should be called by anyone interested in retrieving current
872 * keymap. Presently evdev handlers use it.
873 */
874 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
875 {
876 unsigned long flags;
877 int retval;
878
879 spin_lock_irqsave(&dev->event_lock, flags);
880 retval = dev->getkeycode(dev, ke);
881 spin_unlock_irqrestore(&dev->event_lock, flags);
882
883 return retval;
884 }
885 EXPORT_SYMBOL(input_get_keycode);
886
887 /**
888 * input_set_keycode - attribute a keycode to a given scancode
889 * @dev: input device which keymap is being updated
890 * @ke: new keymap entry
891 *
892 * This function should be called by anyone needing to update current
893 * keymap. Presently keyboard and evdev handlers use it.
894 */
895 int input_set_keycode(struct input_dev *dev,
896 const struct input_keymap_entry *ke)
897 {
898 unsigned long flags;
899 unsigned int old_keycode;
900 int retval;
901
902 if (ke->keycode > KEY_MAX)
903 return -EINVAL;
904
905 spin_lock_irqsave(&dev->event_lock, flags);
906
907 retval = dev->setkeycode(dev, ke, &old_keycode);
908 if (retval)
909 goto out;
910
911 /* Make sure KEY_RESERVED did not get enabled. */
912 __clear_bit(KEY_RESERVED, dev->keybit);
913
914 /*
915 * Simulate keyup event if keycode is not present
916 * in the keymap anymore
917 */
918 if (test_bit(EV_KEY, dev->evbit) &&
919 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
920 __test_and_clear_bit(old_keycode, dev->key)) {
921 struct input_value vals[] = {
922 { EV_KEY, old_keycode, 0 },
923 input_value_sync
924 };
925
926 input_pass_values(dev, vals, ARRAY_SIZE(vals));
927 }
928
929 out:
930 spin_unlock_irqrestore(&dev->event_lock, flags);
931
932 return retval;
933 }
934 EXPORT_SYMBOL(input_set_keycode);
935
936 static const struct input_device_id *input_match_device(struct input_handler *handler,
937 struct input_dev *dev)
938 {
939 const struct input_device_id *id;
940
941 for (id = handler->id_table; id->flags || id->driver_info; id++) {
942
943 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
944 if (id->bustype != dev->id.bustype)
945 continue;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
948 if (id->vendor != dev->id.vendor)
949 continue;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
952 if (id->product != dev->id.product)
953 continue;
954
955 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
956 if (id->version != dev->id.version)
957 continue;
958
959 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
960 continue;
961
962 if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
963 continue;
964
965 if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
966 continue;
967
968 if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
969 continue;
970
971 if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
972 continue;
973
974 if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
975 continue;
976
977 if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
978 continue;
979
980 if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
981 continue;
982
983 if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
984 continue;
985
986 if (!handler->match || handler->match(handler, dev))
987 return id;
988 }
989
990 return NULL;
991 }
992
993 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
994 {
995 const struct input_device_id *id;
996 int error;
997
998 id = input_match_device(handler, dev);
999 if (!id)
1000 return -ENODEV;
1001
1002 error = handler->connect(handler, dev, id);
1003 if (error && error != -ENODEV)
1004 pr_err("failed to attach handler %s to device %s, error: %d\n",
1005 handler->name, kobject_name(&dev->dev.kobj), error);
1006
1007 return error;
1008 }
1009
1010 #ifdef CONFIG_COMPAT
1011
1012 static int input_bits_to_string(char *buf, int buf_size,
1013 unsigned long bits, bool skip_empty)
1014 {
1015 int len = 0;
1016
1017 if (in_compat_syscall()) {
1018 u32 dword = bits >> 32;
1019 if (dword || !skip_empty)
1020 len += snprintf(buf, buf_size, "%x ", dword);
1021
1022 dword = bits & 0xffffffffUL;
1023 if (dword || !skip_empty || len)
1024 len += snprintf(buf + len, max(buf_size - len, 0),
1025 "%x", dword);
1026 } else {
1027 if (bits || !skip_empty)
1028 len += snprintf(buf, buf_size, "%lx", bits);
1029 }
1030
1031 return len;
1032 }
1033
1034 #else /* !CONFIG_COMPAT */
1035
1036 static int input_bits_to_string(char *buf, int buf_size,
1037 unsigned long bits, bool skip_empty)
1038 {
1039 return bits || !skip_empty ?
1040 snprintf(buf, buf_size, "%lx", bits) : 0;
1041 }
1042
1043 #endif
1044
1045 #ifdef CONFIG_PROC_FS
1046
1047 static struct proc_dir_entry *proc_bus_input_dir;
1048 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1049 static int input_devices_state;
1050
1051 static inline void input_wakeup_procfs_readers(void)
1052 {
1053 input_devices_state++;
1054 wake_up(&input_devices_poll_wait);
1055 }
1056
1057 static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
1058 {
1059 poll_wait(file, &input_devices_poll_wait, wait);
1060 if (file->f_version != input_devices_state) {
1061 file->f_version = input_devices_state;
1062 return POLLIN | POLLRDNORM;
1063 }
1064
1065 return 0;
1066 }
1067
1068 union input_seq_state {
1069 struct {
1070 unsigned short pos;
1071 bool mutex_acquired;
1072 };
1073 void *p;
1074 };
1075
1076 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1077 {
1078 union input_seq_state *state = (union input_seq_state *)&seq->private;
1079 int error;
1080
1081 /* We need to fit into seq->private pointer */
1082 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1083
1084 error = mutex_lock_interruptible(&input_mutex);
1085 if (error) {
1086 state->mutex_acquired = false;
1087 return ERR_PTR(error);
1088 }
1089
1090 state->mutex_acquired = true;
1091
1092 return seq_list_start(&input_dev_list, *pos);
1093 }
1094
1095 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1096 {
1097 return seq_list_next(v, &input_dev_list, pos);
1098 }
1099
1100 static void input_seq_stop(struct seq_file *seq, void *v)
1101 {
1102 union input_seq_state *state = (union input_seq_state *)&seq->private;
1103
1104 if (state->mutex_acquired)
1105 mutex_unlock(&input_mutex);
1106 }
1107
1108 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1109 unsigned long *bitmap, int max)
1110 {
1111 int i;
1112 bool skip_empty = true;
1113 char buf[18];
1114
1115 seq_printf(seq, "B: %s=", name);
1116
1117 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1118 if (input_bits_to_string(buf, sizeof(buf),
1119 bitmap[i], skip_empty)) {
1120 skip_empty = false;
1121 seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1122 }
1123 }
1124
1125 /*
1126 * If no output was produced print a single 0.
1127 */
1128 if (skip_empty)
1129 seq_putc(seq, '0');
1130
1131 seq_putc(seq, '\n');
1132 }
1133
1134 static int input_devices_seq_show(struct seq_file *seq, void *v)
1135 {
1136 struct input_dev *dev = container_of(v, struct input_dev, node);
1137 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1138 struct input_handle *handle;
1139
1140 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1141 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1142
1143 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1144 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1145 seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1146 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1147 seq_puts(seq, "H: Handlers=");
1148
1149 list_for_each_entry(handle, &dev->h_list, d_node)
1150 seq_printf(seq, "%s ", handle->name);
1151 seq_putc(seq, '\n');
1152
1153 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1154
1155 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1156 if (test_bit(EV_KEY, dev->evbit))
1157 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1158 if (test_bit(EV_REL, dev->evbit))
1159 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1160 if (test_bit(EV_ABS, dev->evbit))
1161 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1162 if (test_bit(EV_MSC, dev->evbit))
1163 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1164 if (test_bit(EV_LED, dev->evbit))
1165 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1166 if (test_bit(EV_SND, dev->evbit))
1167 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1168 if (test_bit(EV_FF, dev->evbit))
1169 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1170 if (test_bit(EV_SW, dev->evbit))
1171 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1172
1173 seq_putc(seq, '\n');
1174
1175 kfree(path);
1176 return 0;
1177 }
1178
1179 static const struct seq_operations input_devices_seq_ops = {
1180 .start = input_devices_seq_start,
1181 .next = input_devices_seq_next,
1182 .stop = input_seq_stop,
1183 .show = input_devices_seq_show,
1184 };
1185
1186 static int input_proc_devices_open(struct inode *inode, struct file *file)
1187 {
1188 return seq_open(file, &input_devices_seq_ops);
1189 }
1190
1191 static const struct file_operations input_devices_fileops = {
1192 .owner = THIS_MODULE,
1193 .open = input_proc_devices_open,
1194 .poll = input_proc_devices_poll,
1195 .read = seq_read,
1196 .llseek = seq_lseek,
1197 .release = seq_release,
1198 };
1199
1200 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1201 {
1202 union input_seq_state *state = (union input_seq_state *)&seq->private;
1203 int error;
1204
1205 /* We need to fit into seq->private pointer */
1206 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1207
1208 error = mutex_lock_interruptible(&input_mutex);
1209 if (error) {
1210 state->mutex_acquired = false;
1211 return ERR_PTR(error);
1212 }
1213
1214 state->mutex_acquired = true;
1215 state->pos = *pos;
1216
1217 return seq_list_start(&input_handler_list, *pos);
1218 }
1219
1220 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1221 {
1222 union input_seq_state *state = (union input_seq_state *)&seq->private;
1223
1224 state->pos = *pos + 1;
1225 return seq_list_next(v, &input_handler_list, pos);
1226 }
1227
1228 static int input_handlers_seq_show(struct seq_file *seq, void *v)
1229 {
1230 struct input_handler *handler = container_of(v, struct input_handler, node);
1231 union input_seq_state *state = (union input_seq_state *)&seq->private;
1232
1233 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1234 if (handler->filter)
1235 seq_puts(seq, " (filter)");
1236 if (handler->legacy_minors)
1237 seq_printf(seq, " Minor=%d", handler->minor);
1238 seq_putc(seq, '\n');
1239
1240 return 0;
1241 }
1242
1243 static const struct seq_operations input_handlers_seq_ops = {
1244 .start = input_handlers_seq_start,
1245 .next = input_handlers_seq_next,
1246 .stop = input_seq_stop,
1247 .show = input_handlers_seq_show,
1248 };
1249
1250 static int input_proc_handlers_open(struct inode *inode, struct file *file)
1251 {
1252 return seq_open(file, &input_handlers_seq_ops);
1253 }
1254
1255 static const struct file_operations input_handlers_fileops = {
1256 .owner = THIS_MODULE,
1257 .open = input_proc_handlers_open,
1258 .read = seq_read,
1259 .llseek = seq_lseek,
1260 .release = seq_release,
1261 };
1262
1263 static int __init input_proc_init(void)
1264 {
1265 struct proc_dir_entry *entry;
1266
1267 proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1268 if (!proc_bus_input_dir)
1269 return -ENOMEM;
1270
1271 entry = proc_create("devices", 0, proc_bus_input_dir,
1272 &input_devices_fileops);
1273 if (!entry)
1274 goto fail1;
1275
1276 entry = proc_create("handlers", 0, proc_bus_input_dir,
1277 &input_handlers_fileops);
1278 if (!entry)
1279 goto fail2;
1280
1281 return 0;
1282
1283 fail2: remove_proc_entry("devices", proc_bus_input_dir);
1284 fail1: remove_proc_entry("bus/input", NULL);
1285 return -ENOMEM;
1286 }
1287
1288 static void input_proc_exit(void)
1289 {
1290 remove_proc_entry("devices", proc_bus_input_dir);
1291 remove_proc_entry("handlers", proc_bus_input_dir);
1292 remove_proc_entry("bus/input", NULL);
1293 }
1294
1295 #else /* !CONFIG_PROC_FS */
1296 static inline void input_wakeup_procfs_readers(void) { }
1297 static inline int input_proc_init(void) { return 0; }
1298 static inline void input_proc_exit(void) { }
1299 #endif
1300
1301 #define INPUT_DEV_STRING_ATTR_SHOW(name) \
1302 static ssize_t input_dev_show_##name(struct device *dev, \
1303 struct device_attribute *attr, \
1304 char *buf) \
1305 { \
1306 struct input_dev *input_dev = to_input_dev(dev); \
1307 \
1308 return scnprintf(buf, PAGE_SIZE, "%s\n", \
1309 input_dev->name ? input_dev->name : ""); \
1310 } \
1311 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1312
1313 INPUT_DEV_STRING_ATTR_SHOW(name);
1314 INPUT_DEV_STRING_ATTR_SHOW(phys);
1315 INPUT_DEV_STRING_ATTR_SHOW(uniq);
1316
1317 static int input_print_modalias_bits(char *buf, int size,
1318 char name, unsigned long *bm,
1319 unsigned int min_bit, unsigned int max_bit)
1320 {
1321 int len = 0, i;
1322
1323 len += snprintf(buf, max(size, 0), "%c", name);
1324 for (i = min_bit; i < max_bit; i++)
1325 if (bm[BIT_WORD(i)] & BIT_MASK(i))
1326 len += snprintf(buf + len, max(size - len, 0), "%X,", i);
1327 return len;
1328 }
1329
1330 static int input_print_modalias(char *buf, int size, struct input_dev *id,
1331 int add_cr)
1332 {
1333 int len;
1334
1335 len = snprintf(buf, max(size, 0),
1336 "input:b%04Xv%04Xp%04Xe%04X-",
1337 id->id.bustype, id->id.vendor,
1338 id->id.product, id->id.version);
1339
1340 len += input_print_modalias_bits(buf + len, size - len,
1341 'e', id->evbit, 0, EV_MAX);
1342 len += input_print_modalias_bits(buf + len, size - len,
1343 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1344 len += input_print_modalias_bits(buf + len, size - len,
1345 'r', id->relbit, 0, REL_MAX);
1346 len += input_print_modalias_bits(buf + len, size - len,
1347 'a', id->absbit, 0, ABS_MAX);
1348 len += input_print_modalias_bits(buf + len, size - len,
1349 'm', id->mscbit, 0, MSC_MAX);
1350 len += input_print_modalias_bits(buf + len, size - len,
1351 'l', id->ledbit, 0, LED_MAX);
1352 len += input_print_modalias_bits(buf + len, size - len,
1353 's', id->sndbit, 0, SND_MAX);
1354 len += input_print_modalias_bits(buf + len, size - len,
1355 'f', id->ffbit, 0, FF_MAX);
1356 len += input_print_modalias_bits(buf + len, size - len,
1357 'w', id->swbit, 0, SW_MAX);
1358
1359 if (add_cr)
1360 len += snprintf(buf + len, max(size - len, 0), "\n");
1361
1362 return len;
1363 }
1364
1365 static ssize_t input_dev_show_modalias(struct device *dev,
1366 struct device_attribute *attr,
1367 char *buf)
1368 {
1369 struct input_dev *id = to_input_dev(dev);
1370 ssize_t len;
1371
1372 len = input_print_modalias(buf, PAGE_SIZE, id, 1);
1373
1374 return min_t(int, len, PAGE_SIZE);
1375 }
1376 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1377
1378 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1379 int max, int add_cr);
1380
1381 static ssize_t input_dev_show_properties(struct device *dev,
1382 struct device_attribute *attr,
1383 char *buf)
1384 {
1385 struct input_dev *input_dev = to_input_dev(dev);
1386 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1387 INPUT_PROP_MAX, true);
1388 return min_t(int, len, PAGE_SIZE);
1389 }
1390 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1391
1392 static struct attribute *input_dev_attrs[] = {
1393 &dev_attr_name.attr,
1394 &dev_attr_phys.attr,
1395 &dev_attr_uniq.attr,
1396 &dev_attr_modalias.attr,
1397 &dev_attr_properties.attr,
1398 NULL
1399 };
1400
1401 static struct attribute_group input_dev_attr_group = {
1402 .attrs = input_dev_attrs,
1403 };
1404
1405 #define INPUT_DEV_ID_ATTR(name) \
1406 static ssize_t input_dev_show_id_##name(struct device *dev, \
1407 struct device_attribute *attr, \
1408 char *buf) \
1409 { \
1410 struct input_dev *input_dev = to_input_dev(dev); \
1411 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
1412 } \
1413 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1414
1415 INPUT_DEV_ID_ATTR(bustype);
1416 INPUT_DEV_ID_ATTR(vendor);
1417 INPUT_DEV_ID_ATTR(product);
1418 INPUT_DEV_ID_ATTR(version);
1419
1420 static struct attribute *input_dev_id_attrs[] = {
1421 &dev_attr_bustype.attr,
1422 &dev_attr_vendor.attr,
1423 &dev_attr_product.attr,
1424 &dev_attr_version.attr,
1425 NULL
1426 };
1427
1428 static struct attribute_group input_dev_id_attr_group = {
1429 .name = "id",
1430 .attrs = input_dev_id_attrs,
1431 };
1432
1433 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1434 int max, int add_cr)
1435 {
1436 int i;
1437 int len = 0;
1438 bool skip_empty = true;
1439
1440 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1441 len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1442 bitmap[i], skip_empty);
1443 if (len) {
1444 skip_empty = false;
1445 if (i > 0)
1446 len += snprintf(buf + len, max(buf_size - len, 0), " ");
1447 }
1448 }
1449
1450 /*
1451 * If no output was produced print a single 0.
1452 */
1453 if (len == 0)
1454 len = snprintf(buf, buf_size, "%d", 0);
1455
1456 if (add_cr)
1457 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1458
1459 return len;
1460 }
1461
1462 #define INPUT_DEV_CAP_ATTR(ev, bm) \
1463 static ssize_t input_dev_show_cap_##bm(struct device *dev, \
1464 struct device_attribute *attr, \
1465 char *buf) \
1466 { \
1467 struct input_dev *input_dev = to_input_dev(dev); \
1468 int len = input_print_bitmap(buf, PAGE_SIZE, \
1469 input_dev->bm##bit, ev##_MAX, \
1470 true); \
1471 return min_t(int, len, PAGE_SIZE); \
1472 } \
1473 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1474
1475 INPUT_DEV_CAP_ATTR(EV, ev);
1476 INPUT_DEV_CAP_ATTR(KEY, key);
1477 INPUT_DEV_CAP_ATTR(REL, rel);
1478 INPUT_DEV_CAP_ATTR(ABS, abs);
1479 INPUT_DEV_CAP_ATTR(MSC, msc);
1480 INPUT_DEV_CAP_ATTR(LED, led);
1481 INPUT_DEV_CAP_ATTR(SND, snd);
1482 INPUT_DEV_CAP_ATTR(FF, ff);
1483 INPUT_DEV_CAP_ATTR(SW, sw);
1484
1485 static struct attribute *input_dev_caps_attrs[] = {
1486 &dev_attr_ev.attr,
1487 &dev_attr_key.attr,
1488 &dev_attr_rel.attr,
1489 &dev_attr_abs.attr,
1490 &dev_attr_msc.attr,
1491 &dev_attr_led.attr,
1492 &dev_attr_snd.attr,
1493 &dev_attr_ff.attr,
1494 &dev_attr_sw.attr,
1495 NULL
1496 };
1497
1498 static struct attribute_group input_dev_caps_attr_group = {
1499 .name = "capabilities",
1500 .attrs = input_dev_caps_attrs,
1501 };
1502
1503 static const struct attribute_group *input_dev_attr_groups[] = {
1504 &input_dev_attr_group,
1505 &input_dev_id_attr_group,
1506 &input_dev_caps_attr_group,
1507 NULL
1508 };
1509
1510 static void input_dev_release(struct device *device)
1511 {
1512 struct input_dev *dev = to_input_dev(device);
1513
1514 input_ff_destroy(dev);
1515 input_mt_destroy_slots(dev);
1516 kfree(dev->absinfo);
1517 kfree(dev->vals);
1518 kfree(dev);
1519
1520 module_put(THIS_MODULE);
1521 }
1522
1523 /*
1524 * Input uevent interface - loading event handlers based on
1525 * device bitfields.
1526 */
1527 static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1528 const char *name, unsigned long *bitmap, int max)
1529 {
1530 int len;
1531
1532 if (add_uevent_var(env, "%s", name))
1533 return -ENOMEM;
1534
1535 len = input_print_bitmap(&env->buf[env->buflen - 1],
1536 sizeof(env->buf) - env->buflen,
1537 bitmap, max, false);
1538 if (len >= (sizeof(env->buf) - env->buflen))
1539 return -ENOMEM;
1540
1541 env->buflen += len;
1542 return 0;
1543 }
1544
1545 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1546 struct input_dev *dev)
1547 {
1548 int len;
1549
1550 if (add_uevent_var(env, "MODALIAS="))
1551 return -ENOMEM;
1552
1553 len = input_print_modalias(&env->buf[env->buflen - 1],
1554 sizeof(env->buf) - env->buflen,
1555 dev, 0);
1556 if (len >= (sizeof(env->buf) - env->buflen))
1557 return -ENOMEM;
1558
1559 env->buflen += len;
1560 return 0;
1561 }
1562
1563 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
1564 do { \
1565 int err = add_uevent_var(env, fmt, val); \
1566 if (err) \
1567 return err; \
1568 } while (0)
1569
1570 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
1571 do { \
1572 int err = input_add_uevent_bm_var(env, name, bm, max); \
1573 if (err) \
1574 return err; \
1575 } while (0)
1576
1577 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
1578 do { \
1579 int err = input_add_uevent_modalias_var(env, dev); \
1580 if (err) \
1581 return err; \
1582 } while (0)
1583
1584 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1585 {
1586 struct input_dev *dev = to_input_dev(device);
1587
1588 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1589 dev->id.bustype, dev->id.vendor,
1590 dev->id.product, dev->id.version);
1591 if (dev->name)
1592 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1593 if (dev->phys)
1594 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1595 if (dev->uniq)
1596 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1597
1598 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1599
1600 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1601 if (test_bit(EV_KEY, dev->evbit))
1602 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1603 if (test_bit(EV_REL, dev->evbit))
1604 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1605 if (test_bit(EV_ABS, dev->evbit))
1606 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1607 if (test_bit(EV_MSC, dev->evbit))
1608 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1609 if (test_bit(EV_LED, dev->evbit))
1610 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1611 if (test_bit(EV_SND, dev->evbit))
1612 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1613 if (test_bit(EV_FF, dev->evbit))
1614 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1615 if (test_bit(EV_SW, dev->evbit))
1616 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1617
1618 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1619
1620 return 0;
1621 }
1622
1623 #define INPUT_DO_TOGGLE(dev, type, bits, on) \
1624 do { \
1625 int i; \
1626 bool active; \
1627 \
1628 if (!test_bit(EV_##type, dev->evbit)) \
1629 break; \
1630 \
1631 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
1632 active = test_bit(i, dev->bits); \
1633 if (!active && !on) \
1634 continue; \
1635 \
1636 dev->event(dev, EV_##type, i, on ? active : 0); \
1637 } \
1638 } while (0)
1639
1640 static void input_dev_toggle(struct input_dev *dev, bool activate)
1641 {
1642 if (!dev->event)
1643 return;
1644
1645 INPUT_DO_TOGGLE(dev, LED, led, activate);
1646 INPUT_DO_TOGGLE(dev, SND, snd, activate);
1647
1648 if (activate && test_bit(EV_REP, dev->evbit)) {
1649 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1650 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1651 }
1652 }
1653
1654 /**
1655 * input_reset_device() - reset/restore the state of input device
1656 * @dev: input device whose state needs to be reset
1657 *
1658 * This function tries to reset the state of an opened input device and
1659 * bring internal state and state if the hardware in sync with each other.
1660 * We mark all keys as released, restore LED state, repeat rate, etc.
1661 */
1662 void input_reset_device(struct input_dev *dev)
1663 {
1664 unsigned long flags;
1665
1666 mutex_lock(&dev->mutex);
1667 spin_lock_irqsave(&dev->event_lock, flags);
1668
1669 input_dev_toggle(dev, true);
1670 input_dev_release_keys(dev);
1671
1672 spin_unlock_irqrestore(&dev->event_lock, flags);
1673 mutex_unlock(&dev->mutex);
1674 }
1675 EXPORT_SYMBOL(input_reset_device);
1676
1677 #ifdef CONFIG_PM_SLEEP
1678 static int input_dev_suspend(struct device *dev)
1679 {
1680 struct input_dev *input_dev = to_input_dev(dev);
1681
1682 spin_lock_irq(&input_dev->event_lock);
1683
1684 /*
1685 * Keys that are pressed now are unlikely to be
1686 * still pressed when we resume.
1687 */
1688 input_dev_release_keys(input_dev);
1689
1690 /* Turn off LEDs and sounds, if any are active. */
1691 input_dev_toggle(input_dev, false);
1692
1693 spin_unlock_irq(&input_dev->event_lock);
1694
1695 return 0;
1696 }
1697
1698 static int input_dev_resume(struct device *dev)
1699 {
1700 struct input_dev *input_dev = to_input_dev(dev);
1701
1702 spin_lock_irq(&input_dev->event_lock);
1703
1704 /* Restore state of LEDs and sounds, if any were active. */
1705 input_dev_toggle(input_dev, true);
1706
1707 spin_unlock_irq(&input_dev->event_lock);
1708
1709 return 0;
1710 }
1711
1712 static int input_dev_freeze(struct device *dev)
1713 {
1714 struct input_dev *input_dev = to_input_dev(dev);
1715
1716 spin_lock_irq(&input_dev->event_lock);
1717
1718 /*
1719 * Keys that are pressed now are unlikely to be
1720 * still pressed when we resume.
1721 */
1722 input_dev_release_keys(input_dev);
1723
1724 spin_unlock_irq(&input_dev->event_lock);
1725
1726 return 0;
1727 }
1728
1729 static int input_dev_poweroff(struct device *dev)
1730 {
1731 struct input_dev *input_dev = to_input_dev(dev);
1732
1733 spin_lock_irq(&input_dev->event_lock);
1734
1735 /* Turn off LEDs and sounds, if any are active. */
1736 input_dev_toggle(input_dev, false);
1737
1738 spin_unlock_irq(&input_dev->event_lock);
1739
1740 return 0;
1741 }
1742
1743 static const struct dev_pm_ops input_dev_pm_ops = {
1744 .suspend = input_dev_suspend,
1745 .resume = input_dev_resume,
1746 .freeze = input_dev_freeze,
1747 .poweroff = input_dev_poweroff,
1748 .restore = input_dev_resume,
1749 };
1750 #endif /* CONFIG_PM */
1751
1752 static const struct device_type input_dev_type = {
1753 .groups = input_dev_attr_groups,
1754 .release = input_dev_release,
1755 .uevent = input_dev_uevent,
1756 #ifdef CONFIG_PM_SLEEP
1757 .pm = &input_dev_pm_ops,
1758 #endif
1759 };
1760
1761 static char *input_devnode(struct device *dev, umode_t *mode)
1762 {
1763 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1764 }
1765
1766 struct class input_class = {
1767 .name = "input",
1768 .devnode = input_devnode,
1769 };
1770 EXPORT_SYMBOL_GPL(input_class);
1771
1772 /**
1773 * input_allocate_device - allocate memory for new input device
1774 *
1775 * Returns prepared struct input_dev or %NULL.
1776 *
1777 * NOTE: Use input_free_device() to free devices that have not been
1778 * registered; input_unregister_device() should be used for already
1779 * registered devices.
1780 */
1781 struct input_dev *input_allocate_device(void)
1782 {
1783 static atomic_t input_no = ATOMIC_INIT(-1);
1784 struct input_dev *dev;
1785
1786 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1787 if (dev) {
1788 dev->dev.type = &input_dev_type;
1789 dev->dev.class = &input_class;
1790 device_initialize(&dev->dev);
1791 mutex_init(&dev->mutex);
1792 spin_lock_init(&dev->event_lock);
1793 init_timer(&dev->timer);
1794 INIT_LIST_HEAD(&dev->h_list);
1795 INIT_LIST_HEAD(&dev->node);
1796
1797 dev_set_name(&dev->dev, "input%lu",
1798 (unsigned long)atomic_inc_return(&input_no));
1799
1800 __module_get(THIS_MODULE);
1801 }
1802
1803 return dev;
1804 }
1805 EXPORT_SYMBOL(input_allocate_device);
1806
1807 struct input_devres {
1808 struct input_dev *input;
1809 };
1810
1811 static int devm_input_device_match(struct device *dev, void *res, void *data)
1812 {
1813 struct input_devres *devres = res;
1814
1815 return devres->input == data;
1816 }
1817
1818 static void devm_input_device_release(struct device *dev, void *res)
1819 {
1820 struct input_devres *devres = res;
1821 struct input_dev *input = devres->input;
1822
1823 dev_dbg(dev, "%s: dropping reference to %s\n",
1824 __func__, dev_name(&input->dev));
1825 input_put_device(input);
1826 }
1827
1828 /**
1829 * devm_input_allocate_device - allocate managed input device
1830 * @dev: device owning the input device being created
1831 *
1832 * Returns prepared struct input_dev or %NULL.
1833 *
1834 * Managed input devices do not need to be explicitly unregistered or
1835 * freed as it will be done automatically when owner device unbinds from
1836 * its driver (or binding fails). Once managed input device is allocated,
1837 * it is ready to be set up and registered in the same fashion as regular
1838 * input device. There are no special devm_input_device_[un]register()
1839 * variants, regular ones work with both managed and unmanaged devices,
1840 * should you need them. In most cases however, managed input device need
1841 * not be explicitly unregistered or freed.
1842 *
1843 * NOTE: the owner device is set up as parent of input device and users
1844 * should not override it.
1845 */
1846 struct input_dev *devm_input_allocate_device(struct device *dev)
1847 {
1848 struct input_dev *input;
1849 struct input_devres *devres;
1850
1851 devres = devres_alloc(devm_input_device_release,
1852 sizeof(*devres), GFP_KERNEL);
1853 if (!devres)
1854 return NULL;
1855
1856 input = input_allocate_device();
1857 if (!input) {
1858 devres_free(devres);
1859 return NULL;
1860 }
1861
1862 input->dev.parent = dev;
1863 input->devres_managed = true;
1864
1865 devres->input = input;
1866 devres_add(dev, devres);
1867
1868 return input;
1869 }
1870 EXPORT_SYMBOL(devm_input_allocate_device);
1871
1872 /**
1873 * input_free_device - free memory occupied by input_dev structure
1874 * @dev: input device to free
1875 *
1876 * This function should only be used if input_register_device()
1877 * was not called yet or if it failed. Once device was registered
1878 * use input_unregister_device() and memory will be freed once last
1879 * reference to the device is dropped.
1880 *
1881 * Device should be allocated by input_allocate_device().
1882 *
1883 * NOTE: If there are references to the input device then memory
1884 * will not be freed until last reference is dropped.
1885 */
1886 void input_free_device(struct input_dev *dev)
1887 {
1888 if (dev) {
1889 if (dev->devres_managed)
1890 WARN_ON(devres_destroy(dev->dev.parent,
1891 devm_input_device_release,
1892 devm_input_device_match,
1893 dev));
1894 input_put_device(dev);
1895 }
1896 }
1897 EXPORT_SYMBOL(input_free_device);
1898
1899 /**
1900 * input_set_capability - mark device as capable of a certain event
1901 * @dev: device that is capable of emitting or accepting event
1902 * @type: type of the event (EV_KEY, EV_REL, etc...)
1903 * @code: event code
1904 *
1905 * In addition to setting up corresponding bit in appropriate capability
1906 * bitmap the function also adjusts dev->evbit.
1907 */
1908 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
1909 {
1910 switch (type) {
1911 case EV_KEY:
1912 __set_bit(code, dev->keybit);
1913 break;
1914
1915 case EV_REL:
1916 __set_bit(code, dev->relbit);
1917 break;
1918
1919 case EV_ABS:
1920 input_alloc_absinfo(dev);
1921 if (!dev->absinfo)
1922 return;
1923
1924 __set_bit(code, dev->absbit);
1925 break;
1926
1927 case EV_MSC:
1928 __set_bit(code, dev->mscbit);
1929 break;
1930
1931 case EV_SW:
1932 __set_bit(code, dev->swbit);
1933 break;
1934
1935 case EV_LED:
1936 __set_bit(code, dev->ledbit);
1937 break;
1938
1939 case EV_SND:
1940 __set_bit(code, dev->sndbit);
1941 break;
1942
1943 case EV_FF:
1944 __set_bit(code, dev->ffbit);
1945 break;
1946
1947 case EV_PWR:
1948 /* do nothing */
1949 break;
1950
1951 default:
1952 pr_err("input_set_capability: unknown type %u (code %u)\n",
1953 type, code);
1954 dump_stack();
1955 return;
1956 }
1957
1958 __set_bit(type, dev->evbit);
1959 }
1960 EXPORT_SYMBOL(input_set_capability);
1961
1962 static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1963 {
1964 int mt_slots;
1965 int i;
1966 unsigned int events;
1967
1968 if (dev->mt) {
1969 mt_slots = dev->mt->num_slots;
1970 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1971 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1972 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1973 mt_slots = clamp(mt_slots, 2, 32);
1974 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1975 mt_slots = 2;
1976 } else {
1977 mt_slots = 0;
1978 }
1979
1980 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
1981
1982 if (test_bit(EV_ABS, dev->evbit))
1983 for_each_set_bit(i, dev->absbit, ABS_CNT)
1984 events += input_is_mt_axis(i) ? mt_slots : 1;
1985
1986 if (test_bit(EV_REL, dev->evbit))
1987 events += bitmap_weight(dev->relbit, REL_CNT);
1988
1989 /* Make room for KEY and MSC events */
1990 events += 7;
1991
1992 return events;
1993 }
1994
1995 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \
1996 do { \
1997 if (!test_bit(EV_##type, dev->evbit)) \
1998 memset(dev->bits##bit, 0, \
1999 sizeof(dev->bits##bit)); \
2000 } while (0)
2001
2002 static void input_cleanse_bitmasks(struct input_dev *dev)
2003 {
2004 INPUT_CLEANSE_BITMASK(dev, KEY, key);
2005 INPUT_CLEANSE_BITMASK(dev, REL, rel);
2006 INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2007 INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2008 INPUT_CLEANSE_BITMASK(dev, LED, led);
2009 INPUT_CLEANSE_BITMASK(dev, SND, snd);
2010 INPUT_CLEANSE_BITMASK(dev, FF, ff);
2011 INPUT_CLEANSE_BITMASK(dev, SW, sw);
2012 }
2013
2014 static void __input_unregister_device(struct input_dev *dev)
2015 {
2016 struct input_handle *handle, *next;
2017
2018 input_disconnect_device(dev);
2019
2020 mutex_lock(&input_mutex);
2021
2022 list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2023 handle->handler->disconnect(handle);
2024 WARN_ON(!list_empty(&dev->h_list));
2025
2026 del_timer_sync(&dev->timer);
2027 list_del_init(&dev->node);
2028
2029 input_wakeup_procfs_readers();
2030
2031 mutex_unlock(&input_mutex);
2032
2033 device_del(&dev->dev);
2034 }
2035
2036 static void devm_input_device_unregister(struct device *dev, void *res)
2037 {
2038 struct input_devres *devres = res;
2039 struct input_dev *input = devres->input;
2040
2041 dev_dbg(dev, "%s: unregistering device %s\n",
2042 __func__, dev_name(&input->dev));
2043 __input_unregister_device(input);
2044 }
2045
2046 /**
2047 * input_enable_softrepeat - enable software autorepeat
2048 * @dev: input device
2049 * @delay: repeat delay
2050 * @period: repeat period
2051 *
2052 * Enable software autorepeat on the input device.
2053 */
2054 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2055 {
2056 dev->timer.data = (unsigned long) dev;
2057 dev->timer.function = input_repeat_key;
2058 dev->rep[REP_DELAY] = delay;
2059 dev->rep[REP_PERIOD] = period;
2060 }
2061 EXPORT_SYMBOL(input_enable_softrepeat);
2062
2063 /**
2064 * input_register_device - register device with input core
2065 * @dev: device to be registered
2066 *
2067 * This function registers device with input core. The device must be
2068 * allocated with input_allocate_device() and all it's capabilities
2069 * set up before registering.
2070 * If function fails the device must be freed with input_free_device().
2071 * Once device has been successfully registered it can be unregistered
2072 * with input_unregister_device(); input_free_device() should not be
2073 * called in this case.
2074 *
2075 * Note that this function is also used to register managed input devices
2076 * (ones allocated with devm_input_allocate_device()). Such managed input
2077 * devices need not be explicitly unregistered or freed, their tear down
2078 * is controlled by the devres infrastructure. It is also worth noting
2079 * that tear down of managed input devices is internally a 2-step process:
2080 * registered managed input device is first unregistered, but stays in
2081 * memory and can still handle input_event() calls (although events will
2082 * not be delivered anywhere). The freeing of managed input device will
2083 * happen later, when devres stack is unwound to the point where device
2084 * allocation was made.
2085 */
2086 int input_register_device(struct input_dev *dev)
2087 {
2088 struct input_devres *devres = NULL;
2089 struct input_handler *handler;
2090 unsigned int packet_size;
2091 const char *path;
2092 int error;
2093
2094 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2095 dev_err(&dev->dev,
2096 "Absolute device without dev->absinfo, refusing to register\n");
2097 return -EINVAL;
2098 }
2099
2100 if (dev->devres_managed) {
2101 devres = devres_alloc(devm_input_device_unregister,
2102 sizeof(*devres), GFP_KERNEL);
2103 if (!devres)
2104 return -ENOMEM;
2105
2106 devres->input = dev;
2107 }
2108
2109 /* Every input device generates EV_SYN/SYN_REPORT events. */
2110 __set_bit(EV_SYN, dev->evbit);
2111
2112 /* KEY_RESERVED is not supposed to be transmitted to userspace. */
2113 __clear_bit(KEY_RESERVED, dev->keybit);
2114
2115 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2116 input_cleanse_bitmasks(dev);
2117
2118 packet_size = input_estimate_events_per_packet(dev);
2119 if (dev->hint_events_per_packet < packet_size)
2120 dev->hint_events_per_packet = packet_size;
2121
2122 dev->max_vals = dev->hint_events_per_packet + 2;
2123 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2124 if (!dev->vals) {
2125 error = -ENOMEM;
2126 goto err_devres_free;
2127 }
2128
2129 /*
2130 * If delay and period are pre-set by the driver, then autorepeating
2131 * is handled by the driver itself and we don't do it in input.c.
2132 */
2133 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2134 input_enable_softrepeat(dev, 250, 33);
2135
2136 if (!dev->getkeycode)
2137 dev->getkeycode = input_default_getkeycode;
2138
2139 if (!dev->setkeycode)
2140 dev->setkeycode = input_default_setkeycode;
2141
2142 error = device_add(&dev->dev);
2143 if (error)
2144 goto err_free_vals;
2145
2146 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2147 pr_info("%s as %s\n",
2148 dev->name ? dev->name : "Unspecified device",
2149 path ? path : "N/A");
2150 kfree(path);
2151
2152 error = mutex_lock_interruptible(&input_mutex);
2153 if (error)
2154 goto err_device_del;
2155
2156 list_add_tail(&dev->node, &input_dev_list);
2157
2158 list_for_each_entry(handler, &input_handler_list, node)
2159 input_attach_handler(dev, handler);
2160
2161 input_wakeup_procfs_readers();
2162
2163 mutex_unlock(&input_mutex);
2164
2165 if (dev->devres_managed) {
2166 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2167 __func__, dev_name(&dev->dev));
2168 devres_add(dev->dev.parent, devres);
2169 }
2170 return 0;
2171
2172 err_device_del:
2173 device_del(&dev->dev);
2174 err_free_vals:
2175 kfree(dev->vals);
2176 dev->vals = NULL;
2177 err_devres_free:
2178 devres_free(devres);
2179 return error;
2180 }
2181 EXPORT_SYMBOL(input_register_device);
2182
2183 /**
2184 * input_unregister_device - unregister previously registered device
2185 * @dev: device to be unregistered
2186 *
2187 * This function unregisters an input device. Once device is unregistered
2188 * the caller should not try to access it as it may get freed at any moment.
2189 */
2190 void input_unregister_device(struct input_dev *dev)
2191 {
2192 if (dev->devres_managed) {
2193 WARN_ON(devres_destroy(dev->dev.parent,
2194 devm_input_device_unregister,
2195 devm_input_device_match,
2196 dev));
2197 __input_unregister_device(dev);
2198 /*
2199 * We do not do input_put_device() here because it will be done
2200 * when 2nd devres fires up.
2201 */
2202 } else {
2203 __input_unregister_device(dev);
2204 input_put_device(dev);
2205 }
2206 }
2207 EXPORT_SYMBOL(input_unregister_device);
2208
2209 /**
2210 * input_register_handler - register a new input handler
2211 * @handler: handler to be registered
2212 *
2213 * This function registers a new input handler (interface) for input
2214 * devices in the system and attaches it to all input devices that
2215 * are compatible with the handler.
2216 */
2217 int input_register_handler(struct input_handler *handler)
2218 {
2219 struct input_dev *dev;
2220 int error;
2221
2222 error = mutex_lock_interruptible(&input_mutex);
2223 if (error)
2224 return error;
2225
2226 INIT_LIST_HEAD(&handler->h_list);
2227
2228 list_add_tail(&handler->node, &input_handler_list);
2229
2230 list_for_each_entry(dev, &input_dev_list, node)
2231 input_attach_handler(dev, handler);
2232
2233 input_wakeup_procfs_readers();
2234
2235 mutex_unlock(&input_mutex);
2236 return 0;
2237 }
2238 EXPORT_SYMBOL(input_register_handler);
2239
2240 /**
2241 * input_unregister_handler - unregisters an input handler
2242 * @handler: handler to be unregistered
2243 *
2244 * This function disconnects a handler from its input devices and
2245 * removes it from lists of known handlers.
2246 */
2247 void input_unregister_handler(struct input_handler *handler)
2248 {
2249 struct input_handle *handle, *next;
2250
2251 mutex_lock(&input_mutex);
2252
2253 list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2254 handler->disconnect(handle);
2255 WARN_ON(!list_empty(&handler->h_list));
2256
2257 list_del_init(&handler->node);
2258
2259 input_wakeup_procfs_readers();
2260
2261 mutex_unlock(&input_mutex);
2262 }
2263 EXPORT_SYMBOL(input_unregister_handler);
2264
2265 /**
2266 * input_handler_for_each_handle - handle iterator
2267 * @handler: input handler to iterate
2268 * @data: data for the callback
2269 * @fn: function to be called for each handle
2270 *
2271 * Iterate over @bus's list of devices, and call @fn for each, passing
2272 * it @data and stop when @fn returns a non-zero value. The function is
2273 * using RCU to traverse the list and therefore may be using in atomic
2274 * contexts. The @fn callback is invoked from RCU critical section and
2275 * thus must not sleep.
2276 */
2277 int input_handler_for_each_handle(struct input_handler *handler, void *data,
2278 int (*fn)(struct input_handle *, void *))
2279 {
2280 struct input_handle *handle;
2281 int retval = 0;
2282
2283 rcu_read_lock();
2284
2285 list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2286 retval = fn(handle, data);
2287 if (retval)
2288 break;
2289 }
2290
2291 rcu_read_unlock();
2292
2293 return retval;
2294 }
2295 EXPORT_SYMBOL(input_handler_for_each_handle);
2296
2297 /**
2298 * input_register_handle - register a new input handle
2299 * @handle: handle to register
2300 *
2301 * This function puts a new input handle onto device's
2302 * and handler's lists so that events can flow through
2303 * it once it is opened using input_open_device().
2304 *
2305 * This function is supposed to be called from handler's
2306 * connect() method.
2307 */
2308 int input_register_handle(struct input_handle *handle)
2309 {
2310 struct input_handler *handler = handle->handler;
2311 struct input_dev *dev = handle->dev;
2312 int error;
2313
2314 /*
2315 * We take dev->mutex here to prevent race with
2316 * input_release_device().
2317 */
2318 error = mutex_lock_interruptible(&dev->mutex);
2319 if (error)
2320 return error;
2321
2322 /*
2323 * Filters go to the head of the list, normal handlers
2324 * to the tail.
2325 */
2326 if (handler->filter)
2327 list_add_rcu(&handle->d_node, &dev->h_list);
2328 else
2329 list_add_tail_rcu(&handle->d_node, &dev->h_list);
2330
2331 mutex_unlock(&dev->mutex);
2332
2333 /*
2334 * Since we are supposed to be called from ->connect()
2335 * which is mutually exclusive with ->disconnect()
2336 * we can't be racing with input_unregister_handle()
2337 * and so separate lock is not needed here.
2338 */
2339 list_add_tail_rcu(&handle->h_node, &handler->h_list);
2340
2341 if (handler->start)
2342 handler->start(handle);
2343
2344 return 0;
2345 }
2346 EXPORT_SYMBOL(input_register_handle);
2347
2348 /**
2349 * input_unregister_handle - unregister an input handle
2350 * @handle: handle to unregister
2351 *
2352 * This function removes input handle from device's
2353 * and handler's lists.
2354 *
2355 * This function is supposed to be called from handler's
2356 * disconnect() method.
2357 */
2358 void input_unregister_handle(struct input_handle *handle)
2359 {
2360 struct input_dev *dev = handle->dev;
2361
2362 list_del_rcu(&handle->h_node);
2363
2364 /*
2365 * Take dev->mutex to prevent race with input_release_device().
2366 */
2367 mutex_lock(&dev->mutex);
2368 list_del_rcu(&handle->d_node);
2369 mutex_unlock(&dev->mutex);
2370
2371 synchronize_rcu();
2372 }
2373 EXPORT_SYMBOL(input_unregister_handle);
2374
2375 /**
2376 * input_get_new_minor - allocates a new input minor number
2377 * @legacy_base: beginning or the legacy range to be searched
2378 * @legacy_num: size of legacy range
2379 * @allow_dynamic: whether we can also take ID from the dynamic range
2380 *
2381 * This function allocates a new device minor for from input major namespace.
2382 * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2383 * parameters and whether ID can be allocated from dynamic range if there are
2384 * no free IDs in legacy range.
2385 */
2386 int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2387 bool allow_dynamic)
2388 {
2389 /*
2390 * This function should be called from input handler's ->connect()
2391 * methods, which are serialized with input_mutex, so no additional
2392 * locking is needed here.
2393 */
2394 if (legacy_base >= 0) {
2395 int minor = ida_simple_get(&input_ida,
2396 legacy_base,
2397 legacy_base + legacy_num,
2398 GFP_KERNEL);
2399 if (minor >= 0 || !allow_dynamic)
2400 return minor;
2401 }
2402
2403 return ida_simple_get(&input_ida,
2404 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2405 GFP_KERNEL);
2406 }
2407 EXPORT_SYMBOL(input_get_new_minor);
2408
2409 /**
2410 * input_free_minor - release previously allocated minor
2411 * @minor: minor to be released
2412 *
2413 * This function releases previously allocated input minor so that it can be
2414 * reused later.
2415 */
2416 void input_free_minor(unsigned int minor)
2417 {
2418 ida_simple_remove(&input_ida, minor);
2419 }
2420 EXPORT_SYMBOL(input_free_minor);
2421
2422 static int __init input_init(void)
2423 {
2424 int err;
2425
2426 err = class_register(&input_class);
2427 if (err) {
2428 pr_err("unable to register input_dev class\n");
2429 return err;
2430 }
2431
2432 err = input_proc_init();
2433 if (err)
2434 goto fail1;
2435
2436 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2437 INPUT_MAX_CHAR_DEVICES, "input");
2438 if (err) {
2439 pr_err("unable to register char major %d", INPUT_MAJOR);
2440 goto fail2;
2441 }
2442
2443 return 0;
2444
2445 fail2: input_proc_exit();
2446 fail1: class_unregister(&input_class);
2447 return err;
2448 }
2449
2450 static void __exit input_exit(void)
2451 {
2452 input_proc_exit();
2453 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2454 INPUT_MAX_CHAR_DEVICES);
2455 class_unregister(&input_class);
2456 }
2457
2458 subsys_initcall(input_init);
2459 module_exit(input_exit);