1 /* rc-ir-raw.c - handle IR pulse/space events
3 * Copyright (C) 2010 by Mauro Carvalho Chehab
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/export.h>
16 #include <linux/kthread.h>
17 #include <linux/mutex.h>
18 #include <linux/kmod.h>
19 #include <linux/sched.h>
20 #include "rc-core-priv.h"
22 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
23 static LIST_HEAD(ir_raw_client_list
);
25 /* Used to handle IR raw handler extensions */
26 static DEFINE_MUTEX(ir_raw_handler_lock
);
27 static LIST_HEAD(ir_raw_handler_list
);
28 static atomic64_t available_protocols
= ATOMIC64_INIT(0);
30 static int ir_raw_event_thread(void *data
)
32 struct ir_raw_event ev
;
33 struct ir_raw_handler
*handler
;
34 struct ir_raw_event_ctrl
*raw
= (struct ir_raw_event_ctrl
*)data
;
37 mutex_lock(&ir_raw_handler_lock
);
38 while (kfifo_out(&raw
->kfifo
, &ev
, 1)) {
39 list_for_each_entry(handler
, &ir_raw_handler_list
, list
)
40 if (raw
->dev
->enabled_protocols
&
41 handler
->protocols
|| !handler
->protocols
)
42 handler
->decode(raw
->dev
, ev
);
45 mutex_unlock(&ir_raw_handler_lock
);
47 set_current_state(TASK_INTERRUPTIBLE
);
49 if (kthread_should_stop()) {
50 __set_current_state(TASK_RUNNING
);
52 } else if (!kfifo_is_empty(&raw
->kfifo
))
53 set_current_state(TASK_RUNNING
);
62 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
63 * @dev: the struct rc_dev device descriptor
64 * @ev: the struct ir_raw_event descriptor of the pulse/space
66 * This routine (which may be called from an interrupt context) stores a
67 * pulse/space duration for the raw ir decoding state machines. Pulses are
68 * signalled as positive values and spaces as negative values. A zero value
69 * will reset the decoding state machines.
71 int ir_raw_event_store(struct rc_dev
*dev
, struct ir_raw_event
*ev
)
76 IR_dprintk(2, "sample: (%05dus %s)\n",
77 TO_US(ev
->duration
), TO_STR(ev
->pulse
));
79 if (!kfifo_put(&dev
->raw
->kfifo
, *ev
)) {
80 dev_err(&dev
->dev
, "IR event FIFO is full!\n");
86 EXPORT_SYMBOL_GPL(ir_raw_event_store
);
89 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
90 * @dev: the struct rc_dev device descriptor
91 * @pulse: true for pulse, false for space
93 * This routine (which may be called from an interrupt context) is used to
94 * store the beginning of an ir pulse or space (or the start/end of ir
95 * reception) for the raw ir decoding state machines. This is used by
96 * hardware which does not provide durations directly but only interrupts
97 * (or similar events) on state change.
99 int ir_raw_event_store_edge(struct rc_dev
*dev
, bool pulse
)
102 DEFINE_IR_RAW_EVENT(ev
);
109 ev
.duration
= ktime_to_ns(ktime_sub(now
, dev
->raw
->last_event
));
112 rc
= ir_raw_event_store(dev
, &ev
);
114 dev
->raw
->last_event
= now
;
116 /* timer could be set to timeout (125ms by default) */
117 if (!timer_pending(&dev
->raw
->edge_handle
) ||
118 time_after(dev
->raw
->edge_handle
.expires
,
119 jiffies
+ msecs_to_jiffies(15))) {
120 mod_timer(&dev
->raw
->edge_handle
,
121 jiffies
+ msecs_to_jiffies(15));
126 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge
);
129 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
130 * @dev: the struct rc_dev device descriptor
131 * @ev: the event that has occurred
133 * This routine (which may be called from an interrupt context) works
134 * in similar manner to ir_raw_event_store_edge.
135 * This routine is intended for devices with limited internal buffer
136 * It automerges samples of same type, and handles timeouts. Returns non-zero
137 * if the event was added, and zero if the event was ignored due to idle
140 int ir_raw_event_store_with_filter(struct rc_dev
*dev
, struct ir_raw_event
*ev
)
145 /* Ignore spaces in idle mode */
146 if (dev
->idle
&& !ev
->pulse
)
149 ir_raw_event_set_idle(dev
, false);
151 if (!dev
->raw
->this_ev
.duration
)
152 dev
->raw
->this_ev
= *ev
;
153 else if (ev
->pulse
== dev
->raw
->this_ev
.pulse
)
154 dev
->raw
->this_ev
.duration
+= ev
->duration
;
156 ir_raw_event_store(dev
, &dev
->raw
->this_ev
);
157 dev
->raw
->this_ev
= *ev
;
160 /* Enter idle mode if nessesary */
161 if (!ev
->pulse
&& dev
->timeout
&&
162 dev
->raw
->this_ev
.duration
>= dev
->timeout
)
163 ir_raw_event_set_idle(dev
, true);
167 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter
);
170 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
171 * @dev: the struct rc_dev device descriptor
172 * @idle: whether the device is idle or not
174 void ir_raw_event_set_idle(struct rc_dev
*dev
, bool idle
)
179 IR_dprintk(2, "%s idle mode\n", idle
? "enter" : "leave");
182 dev
->raw
->this_ev
.timeout
= true;
183 ir_raw_event_store(dev
, &dev
->raw
->this_ev
);
184 init_ir_raw_event(&dev
->raw
->this_ev
);
188 dev
->s_idle(dev
, idle
);
192 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle
);
195 * ir_raw_event_handle() - schedules the decoding of stored ir data
196 * @dev: the struct rc_dev device descriptor
198 * This routine will tell rc-core to start decoding stored ir data.
200 void ir_raw_event_handle(struct rc_dev
*dev
)
202 if (!dev
->raw
|| !dev
->raw
->thread
)
205 wake_up_process(dev
->raw
->thread
);
207 EXPORT_SYMBOL_GPL(ir_raw_event_handle
);
209 /* used internally by the sysfs interface */
211 ir_raw_get_allowed_protocols(void)
213 return atomic64_read(&available_protocols
);
216 static int change_protocol(struct rc_dev
*dev
, u64
*rc_proto
)
218 /* the caller will update dev->enabled_protocols */
222 static void ir_raw_disable_protocols(struct rc_dev
*dev
, u64 protocols
)
224 mutex_lock(&dev
->lock
);
225 dev
->enabled_protocols
&= ~protocols
;
226 mutex_unlock(&dev
->lock
);
230 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
231 * @ev: Pointer to pointer to next free event. *@ev is incremented for
232 * each raw event filled.
233 * @max: Maximum number of raw events to fill.
234 * @timings: Manchester modulation timings.
235 * @n: Number of bits of data.
236 * @data: Data bits to encode.
238 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
239 * modulation with the timing characteristics described by @timings, writing up
240 * to @max raw IR events using the *@ev pointer.
242 * Returns: 0 on success.
243 * -ENOBUFS if there isn't enough space in the array to fit the
244 * full encoded data. In this case all @max events will have been
247 int ir_raw_gen_manchester(struct ir_raw_event
**ev
, unsigned int max
,
248 const struct ir_raw_timings_manchester
*timings
,
249 unsigned int n
, u64 data
)
257 if (timings
->leader
) {
260 if (timings
->pulse_space_start
) {
261 init_ir_raw_event_duration((*ev
)++, 1, timings
->leader
);
265 init_ir_raw_event_duration((*ev
), 0, timings
->leader
);
267 init_ir_raw_event_duration((*ev
), 1, timings
->leader
);
271 /* continue existing signal */
274 /* from here on *ev will point to the last event rather than the next */
277 need_pulse
= !(data
& i
);
279 need_pulse
= !need_pulse
;
280 if (need_pulse
== !!(*ev
)->pulse
) {
281 (*ev
)->duration
+= timings
->clock
;
285 init_ir_raw_event_duration(++(*ev
), need_pulse
,
291 init_ir_raw_event_duration(++(*ev
), !need_pulse
,
296 if (timings
->trailer_space
) {
298 (*ev
)->duration
+= timings
->trailer_space
;
302 init_ir_raw_event_duration(++(*ev
), 0,
303 timings
->trailer_space
);
308 /* point to the next event rather than last event before returning */
312 EXPORT_SYMBOL(ir_raw_gen_manchester
);
315 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
316 * @ev: Pointer to pointer to next free event. *@ev is incremented for
317 * each raw event filled.
318 * @max: Maximum number of raw events to fill.
319 * @timings: Pulse distance modulation timings.
320 * @n: Number of bits of data.
321 * @data: Data bits to encode.
323 * Encodes the @n least significant bits of @data using pulse-distance
324 * modulation with the timing characteristics described by @timings, writing up
325 * to @max raw IR events using the *@ev pointer.
327 * Returns: 0 on success.
328 * -ENOBUFS if there isn't enough space in the array to fit the
329 * full encoded data. In this case all @max events will have been
332 int ir_raw_gen_pd(struct ir_raw_event
**ev
, unsigned int max
,
333 const struct ir_raw_timings_pd
*timings
,
334 unsigned int n
, u64 data
)
340 if (timings
->header_pulse
) {
341 ret
= ir_raw_gen_pulse_space(ev
, &max
, timings
->header_pulse
,
342 timings
->header_space
);
347 if (timings
->msb_first
) {
348 for (i
= n
- 1; i
>= 0; --i
) {
349 space
= timings
->bit_space
[(data
>> i
) & 1];
350 ret
= ir_raw_gen_pulse_space(ev
, &max
,
357 for (i
= 0; i
< n
; ++i
, data
>>= 1) {
358 space
= timings
->bit_space
[data
& 1];
359 ret
= ir_raw_gen_pulse_space(ev
, &max
,
367 ret
= ir_raw_gen_pulse_space(ev
, &max
, timings
->trailer_pulse
,
368 timings
->trailer_space
);
371 EXPORT_SYMBOL(ir_raw_gen_pd
);
374 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
375 * @ev: Pointer to pointer to next free event. *@ev is incremented for
376 * each raw event filled.
377 * @max: Maximum number of raw events to fill.
378 * @timings: Pulse distance modulation timings.
379 * @n: Number of bits of data.
380 * @data: Data bits to encode.
382 * Encodes the @n least significant bits of @data using space-distance
383 * modulation with the timing characteristics described by @timings, writing up
384 * to @max raw IR events using the *@ev pointer.
386 * Returns: 0 on success.
387 * -ENOBUFS if there isn't enough space in the array to fit the
388 * full encoded data. In this case all @max events will have been
391 int ir_raw_gen_pl(struct ir_raw_event
**ev
, unsigned int max
,
392 const struct ir_raw_timings_pl
*timings
,
393 unsigned int n
, u64 data
)
402 init_ir_raw_event_duration((*ev
)++, 1, timings
->header_pulse
);
404 if (timings
->msb_first
) {
405 for (i
= n
- 1; i
>= 0; --i
) {
408 init_ir_raw_event_duration((*ev
)++, 0,
412 pulse
= timings
->bit_pulse
[(data
>> i
) & 1];
413 init_ir_raw_event_duration((*ev
)++, 1, pulse
);
416 for (i
= 0; i
< n
; ++i
, data
>>= 1) {
419 init_ir_raw_event_duration((*ev
)++, 0,
423 pulse
= timings
->bit_pulse
[data
& 1];
424 init_ir_raw_event_duration((*ev
)++, 1, pulse
);
431 init_ir_raw_event_duration((*ev
)++, 0, timings
->trailer_space
);
435 EXPORT_SYMBOL(ir_raw_gen_pl
);
438 * ir_raw_encode_scancode() - Encode a scancode as raw events
440 * @protocol: protocol
441 * @scancode: scancode filter describing a single scancode
442 * @events: array of raw events to write into
443 * @max: max number of raw events
445 * Attempts to encode the scancode as raw events.
447 * Returns: The number of events written.
448 * -ENOBUFS if there isn't enough space in the array to fit the
449 * encoding. In this case all @max events will have been written.
450 * -EINVAL if the scancode is ambiguous or invalid, or if no
451 * compatible encoder was found.
453 int ir_raw_encode_scancode(enum rc_proto protocol
, u32 scancode
,
454 struct ir_raw_event
*events
, unsigned int max
)
456 struct ir_raw_handler
*handler
;
458 u64 mask
= 1ULL << protocol
;
460 mutex_lock(&ir_raw_handler_lock
);
461 list_for_each_entry(handler
, &ir_raw_handler_list
, list
) {
462 if (handler
->protocols
& mask
&& handler
->encode
) {
463 ret
= handler
->encode(protocol
, scancode
, events
, max
);
464 if (ret
>= 0 || ret
== -ENOBUFS
)
468 mutex_unlock(&ir_raw_handler_lock
);
472 EXPORT_SYMBOL(ir_raw_encode_scancode
);
474 static void edge_handle(struct timer_list
*t
)
476 struct ir_raw_event_ctrl
*raw
= from_timer(raw
, t
, edge_handle
);
477 struct rc_dev
*dev
= raw
->dev
;
478 ktime_t interval
= ktime_sub(ktime_get(), dev
->raw
->last_event
);
480 if (ktime_to_ns(interval
) >= dev
->timeout
) {
481 DEFINE_IR_RAW_EVENT(ev
);
484 ev
.duration
= ktime_to_ns(interval
);
486 ir_raw_event_store(dev
, &ev
);
488 mod_timer(&dev
->raw
->edge_handle
,
489 jiffies
+ nsecs_to_jiffies(dev
->timeout
-
490 ktime_to_ns(interval
)));
493 ir_raw_event_handle(dev
);
497 * Used to (un)register raw event clients
499 int ir_raw_event_prepare(struct rc_dev
*dev
)
501 static bool raw_init
; /* 'false' default value, raw decoders loaded? */
507 request_module("ir-lirc-codec");
511 dev
->raw
= kzalloc(sizeof(*dev
->raw
), GFP_KERNEL
);
516 dev
->change_protocol
= change_protocol
;
517 timer_setup(&dev
->raw
->edge_handle
, edge_handle
, 0);
518 INIT_KFIFO(dev
->raw
->kfifo
);
523 int ir_raw_event_register(struct rc_dev
*dev
)
525 struct ir_raw_handler
*handler
;
526 struct task_struct
*thread
;
529 * raw transmitters do not need any event registration
530 * because the event is coming from userspace
532 if (dev
->driver_type
!= RC_DRIVER_IR_RAW_TX
) {
533 thread
= kthread_run(ir_raw_event_thread
, dev
->raw
, "rc%u",
537 return PTR_ERR(thread
);
539 dev
->raw
->thread
= thread
;
542 mutex_lock(&ir_raw_handler_lock
);
543 list_add_tail(&dev
->raw
->list
, &ir_raw_client_list
);
544 list_for_each_entry(handler
, &ir_raw_handler_list
, list
)
545 if (handler
->raw_register
)
546 handler
->raw_register(dev
);
547 mutex_unlock(&ir_raw_handler_lock
);
552 void ir_raw_event_free(struct rc_dev
*dev
)
561 void ir_raw_event_unregister(struct rc_dev
*dev
)
563 struct ir_raw_handler
*handler
;
565 if (!dev
|| !dev
->raw
)
568 kthread_stop(dev
->raw
->thread
);
569 del_timer_sync(&dev
->raw
->edge_handle
);
571 mutex_lock(&ir_raw_handler_lock
);
572 list_del(&dev
->raw
->list
);
573 list_for_each_entry(handler
, &ir_raw_handler_list
, list
)
574 if (handler
->raw_unregister
)
575 handler
->raw_unregister(dev
);
576 mutex_unlock(&ir_raw_handler_lock
);
578 ir_raw_event_free(dev
);
582 * Extension interface - used to register the IR decoders
585 int ir_raw_handler_register(struct ir_raw_handler
*ir_raw_handler
)
587 struct ir_raw_event_ctrl
*raw
;
589 mutex_lock(&ir_raw_handler_lock
);
590 list_add_tail(&ir_raw_handler
->list
, &ir_raw_handler_list
);
591 if (ir_raw_handler
->raw_register
)
592 list_for_each_entry(raw
, &ir_raw_client_list
, list
)
593 ir_raw_handler
->raw_register(raw
->dev
);
594 atomic64_or(ir_raw_handler
->protocols
, &available_protocols
);
595 mutex_unlock(&ir_raw_handler_lock
);
599 EXPORT_SYMBOL(ir_raw_handler_register
);
601 void ir_raw_handler_unregister(struct ir_raw_handler
*ir_raw_handler
)
603 struct ir_raw_event_ctrl
*raw
;
604 u64 protocols
= ir_raw_handler
->protocols
;
606 mutex_lock(&ir_raw_handler_lock
);
607 list_del(&ir_raw_handler
->list
);
608 list_for_each_entry(raw
, &ir_raw_client_list
, list
) {
609 ir_raw_disable_protocols(raw
->dev
, protocols
);
610 if (ir_raw_handler
->raw_unregister
)
611 ir_raw_handler
->raw_unregister(raw
->dev
);
613 atomic64_andnot(protocols
, &available_protocols
);
614 mutex_unlock(&ir_raw_handler_lock
);
616 EXPORT_SYMBOL(ir_raw_handler_unregister
);