]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX) | |
3 | * | |
4 | * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation; either version 2 of the | |
9 | * License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * Special thanks to: | |
17 | * Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore | |
18 | * bringing to life support for transmission & learning mode. | |
19 | * | |
20 | * Charlie Andrews <charliethepilot@googlemail.com> for lots of help in | |
21 | * bringing up the support of new firmware buffer that is popular | |
22 | * on latest notebooks | |
23 | * | |
24 | * ENE for partial device documentation | |
25 | * | |
26 | */ | |
27 | ||
28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
29 | ||
30 | #include <linux/kernel.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/pnp.h> | |
33 | #include <linux/io.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/sched.h> | |
36 | #include <linux/slab.h> | |
37 | #include <media/rc-core.h> | |
38 | #include "ene_ir.h" | |
39 | ||
40 | static int sample_period; | |
41 | static bool learning_mode_force; | |
42 | static int debug; | |
43 | static bool txsim; | |
44 | ||
45 | static void ene_set_reg_addr(struct ene_device *dev, u16 reg) | |
46 | { | |
47 | outb(reg >> 8, dev->hw_io + ENE_ADDR_HI); | |
48 | outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO); | |
49 | } | |
50 | ||
51 | /* read a hardware register */ | |
52 | static u8 ene_read_reg(struct ene_device *dev, u16 reg) | |
53 | { | |
54 | u8 retval; | |
55 | ene_set_reg_addr(dev, reg); | |
56 | retval = inb(dev->hw_io + ENE_IO); | |
57 | dbg_regs("reg %04x == %02x", reg, retval); | |
58 | return retval; | |
59 | } | |
60 | ||
61 | /* write a hardware register */ | |
62 | static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value) | |
63 | { | |
64 | dbg_regs("reg %04x <- %02x", reg, value); | |
65 | ene_set_reg_addr(dev, reg); | |
66 | outb(value, dev->hw_io + ENE_IO); | |
67 | } | |
68 | ||
69 | /* Set bits in hardware register */ | |
70 | static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask) | |
71 | { | |
72 | dbg_regs("reg %04x |= %02x", reg, mask); | |
73 | ene_set_reg_addr(dev, reg); | |
74 | outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO); | |
75 | } | |
76 | ||
77 | /* Clear bits in hardware register */ | |
78 | static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask) | |
79 | { | |
80 | dbg_regs("reg %04x &= ~%02x ", reg, mask); | |
81 | ene_set_reg_addr(dev, reg); | |
82 | outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO); | |
83 | } | |
84 | ||
85 | /* A helper to set/clear a bit in register according to boolean variable */ | |
86 | static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask, | |
87 | bool set) | |
88 | { | |
89 | if (set) | |
90 | ene_set_reg_mask(dev, reg, mask); | |
91 | else | |
92 | ene_clear_reg_mask(dev, reg, mask); | |
93 | } | |
94 | ||
95 | /* detect hardware features */ | |
96 | static int ene_hw_detect(struct ene_device *dev) | |
97 | { | |
98 | u8 chip_major, chip_minor; | |
99 | u8 hw_revision, old_ver; | |
100 | u8 fw_reg2, fw_reg1; | |
101 | ||
102 | ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD); | |
103 | chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR); | |
104 | chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR); | |
105 | ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD); | |
106 | ||
107 | hw_revision = ene_read_reg(dev, ENE_ECHV); | |
108 | old_ver = ene_read_reg(dev, ENE_HW_VER_OLD); | |
109 | ||
110 | dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) + | |
111 | (ene_read_reg(dev, ENE_PLLFRL) >> 4); | |
112 | ||
113 | if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD) | |
114 | dev->rx_period_adjust = | |
115 | dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4; | |
116 | ||
117 | if (hw_revision == 0xFF) { | |
118 | pr_warn("device seems to be disabled\n"); | |
119 | pr_warn("send a mail to lirc-list@lists.sourceforge.net\n"); | |
120 | pr_warn("please attach output of acpidump and dmidecode\n"); | |
121 | return -ENODEV; | |
122 | } | |
123 | ||
124 | pr_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n", | |
125 | chip_major, chip_minor, old_ver, hw_revision); | |
126 | ||
127 | pr_notice("PLL freq = %d\n", dev->pll_freq); | |
128 | ||
129 | if (chip_major == 0x33) { | |
130 | pr_warn("chips 0x33xx aren't supported\n"); | |
131 | return -ENODEV; | |
132 | } | |
133 | ||
134 | if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) { | |
135 | dev->hw_revision = ENE_HW_C; | |
136 | pr_notice("KB3926C detected\n"); | |
137 | } else if (old_ver == 0x24 && hw_revision == 0xC0) { | |
138 | dev->hw_revision = ENE_HW_B; | |
139 | pr_notice("KB3926B detected\n"); | |
140 | } else { | |
141 | dev->hw_revision = ENE_HW_D; | |
142 | pr_notice("KB3926D or higher detected\n"); | |
143 | } | |
144 | ||
145 | /* detect features hardware supports */ | |
146 | if (dev->hw_revision < ENE_HW_C) | |
147 | return 0; | |
148 | ||
149 | fw_reg1 = ene_read_reg(dev, ENE_FW1); | |
150 | fw_reg2 = ene_read_reg(dev, ENE_FW2); | |
151 | ||
152 | pr_notice("Firmware regs: %02x %02x\n", fw_reg1, fw_reg2); | |
153 | ||
154 | dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A); | |
155 | dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING); | |
156 | dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF); | |
157 | ||
158 | if (dev->hw_learning_and_tx_capable) | |
159 | dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT); | |
160 | ||
161 | pr_notice("Hardware features:\n"); | |
162 | ||
163 | if (dev->hw_learning_and_tx_capable) { | |
164 | pr_notice("* Supports transmitting & learning mode\n"); | |
165 | pr_notice(" This feature is rare and therefore,\n"); | |
166 | pr_notice(" you are welcome to test it,\n"); | |
167 | pr_notice(" and/or contact the author via:\n"); | |
168 | pr_notice(" lirc-list@lists.sourceforge.net\n"); | |
169 | pr_notice(" or maximlevitsky@gmail.com\n"); | |
170 | ||
171 | pr_notice("* Uses GPIO %s for IR raw input\n", | |
172 | dev->hw_use_gpio_0a ? "40" : "0A"); | |
173 | ||
174 | if (dev->hw_fan_input) | |
175 | pr_notice("* Uses unused fan feedback input as source of demodulated IR data\n"); | |
176 | } | |
177 | ||
178 | if (!dev->hw_fan_input) | |
179 | pr_notice("* Uses GPIO %s for IR demodulated input\n", | |
180 | dev->hw_use_gpio_0a ? "0A" : "40"); | |
181 | ||
182 | if (dev->hw_extra_buffer) | |
183 | pr_notice("* Uses new style input buffer\n"); | |
184 | return 0; | |
185 | } | |
186 | ||
187 | /* Read properities of hw sample buffer */ | |
188 | static void ene_rx_setup_hw_buffer(struct ene_device *dev) | |
189 | { | |
190 | u16 tmp; | |
191 | ||
192 | ene_rx_read_hw_pointer(dev); | |
193 | dev->r_pointer = dev->w_pointer; | |
194 | ||
195 | if (!dev->hw_extra_buffer) { | |
196 | dev->buffer_len = ENE_FW_PACKET_SIZE * 2; | |
197 | return; | |
198 | } | |
199 | ||
200 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER); | |
201 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8; | |
202 | dev->extra_buf1_address = tmp; | |
203 | ||
204 | dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2); | |
205 | ||
206 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3); | |
207 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8; | |
208 | dev->extra_buf2_address = tmp; | |
209 | ||
210 | dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5); | |
211 | ||
212 | dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8; | |
213 | ||
214 | pr_notice("Hardware uses 2 extended buffers:\n"); | |
215 | pr_notice(" 0x%04x - len : %d\n", | |
216 | dev->extra_buf1_address, dev->extra_buf1_len); | |
217 | pr_notice(" 0x%04x - len : %d\n", | |
218 | dev->extra_buf2_address, dev->extra_buf2_len); | |
219 | ||
220 | pr_notice("Total buffer len = %d\n", dev->buffer_len); | |
221 | ||
222 | if (dev->buffer_len > 64 || dev->buffer_len < 16) | |
223 | goto error; | |
224 | ||
225 | if (dev->extra_buf1_address > 0xFBFC || | |
226 | dev->extra_buf1_address < 0xEC00) | |
227 | goto error; | |
228 | ||
229 | if (dev->extra_buf2_address > 0xFBFC || | |
230 | dev->extra_buf2_address < 0xEC00) | |
231 | goto error; | |
232 | ||
233 | if (dev->r_pointer > dev->buffer_len) | |
234 | goto error; | |
235 | ||
236 | ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | |
237 | return; | |
238 | error: | |
239 | pr_warn("Error validating extra buffers, device probably won't work\n"); | |
240 | dev->hw_extra_buffer = false; | |
241 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | |
242 | } | |
243 | ||
244 | ||
245 | /* Restore the pointers to extra buffers - to make module reload work*/ | |
246 | static void ene_rx_restore_hw_buffer(struct ene_device *dev) | |
247 | { | |
248 | if (!dev->hw_extra_buffer) | |
249 | return; | |
250 | ||
251 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0, | |
252 | dev->extra_buf1_address & 0xFF); | |
253 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1, | |
254 | dev->extra_buf1_address >> 8); | |
255 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len); | |
256 | ||
257 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3, | |
258 | dev->extra_buf2_address & 0xFF); | |
259 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4, | |
260 | dev->extra_buf2_address >> 8); | |
261 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5, | |
262 | dev->extra_buf2_len); | |
263 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | |
264 | } | |
265 | ||
266 | /* Read hardware write pointer */ | |
267 | static void ene_rx_read_hw_pointer(struct ene_device *dev) | |
268 | { | |
269 | if (dev->hw_extra_buffer) | |
270 | dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER); | |
271 | else | |
272 | dev->w_pointer = ene_read_reg(dev, ENE_FW2) | |
273 | & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE; | |
274 | ||
275 | dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x", | |
276 | dev->w_pointer, dev->r_pointer); | |
277 | } | |
278 | ||
279 | /* Gets address of next sample from HW ring buffer */ | |
280 | static int ene_rx_get_sample_reg(struct ene_device *dev) | |
281 | { | |
282 | int r_pointer; | |
283 | ||
284 | if (dev->r_pointer == dev->w_pointer) { | |
285 | dbg_verbose("RB: hit end, try update w_pointer"); | |
286 | ene_rx_read_hw_pointer(dev); | |
287 | } | |
288 | ||
289 | if (dev->r_pointer == dev->w_pointer) { | |
290 | dbg_verbose("RB: end of data at %d", dev->r_pointer); | |
291 | return 0; | |
292 | } | |
293 | ||
294 | dbg_verbose("RB: reading at offset %d", dev->r_pointer); | |
295 | r_pointer = dev->r_pointer; | |
296 | ||
297 | dev->r_pointer++; | |
298 | if (dev->r_pointer == dev->buffer_len) | |
299 | dev->r_pointer = 0; | |
300 | ||
301 | dbg_verbose("RB: next read will be from offset %d", dev->r_pointer); | |
302 | ||
303 | if (r_pointer < 8) { | |
304 | dbg_verbose("RB: read at main buffer at %d", r_pointer); | |
305 | return ENE_FW_SAMPLE_BUFFER + r_pointer; | |
306 | } | |
307 | ||
308 | r_pointer -= 8; | |
309 | ||
310 | if (r_pointer < dev->extra_buf1_len) { | |
311 | dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer); | |
312 | return dev->extra_buf1_address + r_pointer; | |
313 | } | |
314 | ||
315 | r_pointer -= dev->extra_buf1_len; | |
316 | ||
317 | if (r_pointer < dev->extra_buf2_len) { | |
318 | dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer); | |
319 | return dev->extra_buf2_address + r_pointer; | |
320 | } | |
321 | ||
322 | dbg("attempt to read beyond ring buffer end"); | |
323 | return 0; | |
324 | } | |
325 | ||
326 | /* Sense current received carrier */ | |
327 | static void ene_rx_sense_carrier(struct ene_device *dev) | |
328 | { | |
329 | DEFINE_IR_RAW_EVENT(ev); | |
330 | ||
331 | int carrier, duty_cycle; | |
332 | int period = ene_read_reg(dev, ENE_CIRCAR_PRD); | |
333 | int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD); | |
334 | ||
335 | if (!(period & ENE_CIRCAR_PRD_VALID)) | |
336 | return; | |
337 | ||
338 | period &= ~ENE_CIRCAR_PRD_VALID; | |
339 | ||
340 | if (!period) | |
341 | return; | |
342 | ||
343 | dbg("RX: hardware carrier period = %02x", period); | |
344 | dbg("RX: hardware carrier pulse period = %02x", hperiod); | |
345 | ||
346 | carrier = 2000000 / period; | |
347 | duty_cycle = (hperiod * 100) / period; | |
348 | dbg("RX: sensed carrier = %d Hz, duty cycle %d%%", | |
349 | carrier, duty_cycle); | |
350 | if (dev->carrier_detect_enabled) { | |
351 | ev.carrier_report = true; | |
352 | ev.carrier = carrier; | |
353 | ev.duty_cycle = duty_cycle; | |
354 | ir_raw_event_store(dev->rdev, &ev); | |
355 | } | |
356 | } | |
357 | ||
358 | /* this enables/disables the CIR RX engine */ | |
359 | static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable) | |
360 | { | |
361 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, | |
362 | ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable); | |
363 | } | |
364 | ||
365 | /* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/ | |
366 | static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a) | |
367 | { | |
368 | ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a); | |
369 | } | |
370 | ||
371 | /* | |
372 | * this enables alternative input via fan tachometer sensor and bypasses | |
373 | * the hw CIR engine | |
374 | */ | |
375 | static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable) | |
376 | { | |
377 | if (!dev->hw_fan_input) | |
378 | return; | |
379 | ||
380 | if (!enable) | |
381 | ene_write_reg(dev, ENE_FAN_AS_IN1, 0); | |
382 | else { | |
383 | ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN); | |
384 | ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN); | |
385 | } | |
386 | } | |
387 | ||
388 | /* setup the receiver for RX*/ | |
389 | static void ene_rx_setup(struct ene_device *dev) | |
390 | { | |
391 | bool learning_mode = dev->learning_mode_enabled || | |
392 | dev->carrier_detect_enabled; | |
393 | int sample_period_adjust = 0; | |
394 | ||
395 | dbg("RX: setup receiver, learning mode = %d", learning_mode); | |
396 | ||
397 | ||
398 | /* This selects RLC input and clears CFG2 settings */ | |
399 | ene_write_reg(dev, ENE_CIRCFG2, 0x00); | |
400 | ||
401 | /* set sample period*/ | |
402 | if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD) | |
403 | sample_period_adjust = | |
404 | dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2; | |
405 | ||
406 | ene_write_reg(dev, ENE_CIRRLC_CFG, | |
407 | (sample_period + sample_period_adjust) | | |
408 | ENE_CIRRLC_CFG_OVERFLOW); | |
409 | /* revB doesn't support inputs */ | |
410 | if (dev->hw_revision < ENE_HW_C) | |
411 | goto select_timeout; | |
412 | ||
413 | if (learning_mode) { | |
414 | ||
415 | WARN_ON(!dev->hw_learning_and_tx_capable); | |
416 | ||
417 | /* Enable the opposite of the normal input | |
418 | That means that if GPIO40 is normally used, use GPIO0A | |
419 | and vice versa. | |
420 | This input will carry non demodulated | |
421 | signal, and we will tell the hw to demodulate it itself */ | |
422 | ene_rx_select_input(dev, !dev->hw_use_gpio_0a); | |
423 | dev->rx_fan_input_inuse = false; | |
424 | ||
425 | /* Enable carrier demodulation */ | |
426 | ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); | |
427 | ||
428 | /* Enable carrier detection */ | |
429 | ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63); | |
430 | ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT, | |
431 | dev->carrier_detect_enabled || debug); | |
432 | } else { | |
433 | if (dev->hw_fan_input) | |
434 | dev->rx_fan_input_inuse = true; | |
435 | else | |
436 | ene_rx_select_input(dev, dev->hw_use_gpio_0a); | |
437 | ||
438 | /* Disable carrier detection & demodulation */ | |
439 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); | |
440 | ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT); | |
441 | } | |
442 | ||
443 | select_timeout: | |
444 | if (dev->rx_fan_input_inuse) { | |
445 | dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN); | |
446 | ||
447 | /* Fan input doesn't support timeouts, it just ends the | |
448 | input with a maximum sample */ | |
449 | dev->rdev->min_timeout = dev->rdev->max_timeout = | |
450 | US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK * | |
451 | ENE_FW_SAMPLE_PERIOD_FAN); | |
452 | } else { | |
453 | dev->rdev->rx_resolution = US_TO_NS(sample_period); | |
454 | ||
455 | /* Theoreticly timeout is unlimited, but we cap it | |
456 | * because it was seen that on one device, it | |
457 | * would stop sending spaces after around 250 msec. | |
458 | * Besides, this is close to 2^32 anyway and timeout is u32. | |
459 | */ | |
460 | dev->rdev->min_timeout = US_TO_NS(127 * sample_period); | |
461 | dev->rdev->max_timeout = US_TO_NS(200000); | |
462 | } | |
463 | ||
464 | if (dev->hw_learning_and_tx_capable) | |
465 | dev->rdev->tx_resolution = US_TO_NS(sample_period); | |
466 | ||
467 | if (dev->rdev->timeout > dev->rdev->max_timeout) | |
468 | dev->rdev->timeout = dev->rdev->max_timeout; | |
469 | if (dev->rdev->timeout < dev->rdev->min_timeout) | |
470 | dev->rdev->timeout = dev->rdev->min_timeout; | |
471 | } | |
472 | ||
473 | /* Enable the device for receive */ | |
474 | static void ene_rx_enable_hw(struct ene_device *dev) | |
475 | { | |
476 | u8 reg_value; | |
477 | ||
478 | /* Enable system interrupt */ | |
479 | if (dev->hw_revision < ENE_HW_C) { | |
480 | ene_write_reg(dev, ENEB_IRQ, dev->irq << 1); | |
481 | ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01); | |
482 | } else { | |
483 | reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0; | |
484 | reg_value |= ENE_IRQ_UNK_EN; | |
485 | reg_value &= ~ENE_IRQ_STATUS; | |
486 | reg_value |= (dev->irq & ENE_IRQ_MASK); | |
487 | ene_write_reg(dev, ENE_IRQ, reg_value); | |
488 | } | |
489 | ||
490 | /* Enable inputs */ | |
491 | ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse); | |
492 | ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse); | |
493 | ||
494 | /* ack any pending irqs - just in case */ | |
495 | ene_irq_status(dev); | |
496 | ||
497 | /* enable firmware bits */ | |
498 | ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); | |
499 | ||
500 | /* enter idle mode */ | |
501 | ir_raw_event_set_idle(dev->rdev, true); | |
502 | } | |
503 | ||
504 | /* Enable the device for receive - wrapper to track the state*/ | |
505 | static void ene_rx_enable(struct ene_device *dev) | |
506 | { | |
507 | ene_rx_enable_hw(dev); | |
508 | dev->rx_enabled = true; | |
509 | } | |
510 | ||
511 | /* Disable the device receiver */ | |
512 | static void ene_rx_disable_hw(struct ene_device *dev) | |
513 | { | |
514 | /* disable inputs */ | |
515 | ene_rx_enable_cir_engine(dev, false); | |
516 | ene_rx_enable_fan_input(dev, false); | |
517 | ||
518 | /* disable hardware IRQ and firmware flag */ | |
519 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); | |
520 | ir_raw_event_set_idle(dev->rdev, true); | |
521 | } | |
522 | ||
523 | /* Disable the device receiver - wrapper to track the state */ | |
524 | static void ene_rx_disable(struct ene_device *dev) | |
525 | { | |
526 | ene_rx_disable_hw(dev); | |
527 | dev->rx_enabled = false; | |
528 | } | |
529 | ||
530 | /* This resets the receiver. Useful to stop stream of spaces at end of | |
531 | * transmission | |
532 | */ | |
533 | static void ene_rx_reset(struct ene_device *dev) | |
534 | { | |
535 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | |
536 | ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | |
537 | } | |
538 | ||
539 | /* Set up the TX carrier frequency and duty cycle */ | |
540 | static void ene_tx_set_carrier(struct ene_device *dev) | |
541 | { | |
542 | u8 tx_puls_width; | |
543 | unsigned long flags; | |
544 | ||
545 | spin_lock_irqsave(&dev->hw_lock, flags); | |
546 | ||
547 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, | |
548 | ENE_CIRCFG_TX_CARR, dev->tx_period > 0); | |
549 | ||
550 | if (!dev->tx_period) | |
551 | goto unlock; | |
552 | ||
553 | BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0); | |
554 | ||
555 | tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle); | |
556 | ||
557 | if (!tx_puls_width) | |
558 | tx_puls_width = 1; | |
559 | ||
560 | dbg("TX: pulse distance = %d * 500 ns", dev->tx_period); | |
561 | dbg("TX: pulse width = %d * 500 ns", tx_puls_width); | |
562 | ||
563 | ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL); | |
564 | ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width); | |
565 | unlock: | |
566 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
567 | } | |
568 | ||
569 | /* Enable/disable transmitters */ | |
570 | static void ene_tx_set_transmitters(struct ene_device *dev) | |
571 | { | |
572 | unsigned long flags; | |
573 | ||
574 | spin_lock_irqsave(&dev->hw_lock, flags); | |
575 | ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41, | |
576 | !!(dev->transmitter_mask & 0x01)); | |
577 | ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D, | |
578 | !!(dev->transmitter_mask & 0x02)); | |
579 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
580 | } | |
581 | ||
582 | /* prepare transmission */ | |
583 | static void ene_tx_enable(struct ene_device *dev) | |
584 | { | |
585 | u8 conf1 = ene_read_reg(dev, ENE_CIRCFG); | |
586 | u8 fwreg2 = ene_read_reg(dev, ENE_FW2); | |
587 | ||
588 | dev->saved_conf1 = conf1; | |
589 | ||
590 | /* Show information about currently connected transmitter jacks */ | |
591 | if (fwreg2 & ENE_FW2_EMMITER1_CONN) | |
592 | dbg("TX: Transmitter #1 is connected"); | |
593 | ||
594 | if (fwreg2 & ENE_FW2_EMMITER2_CONN) | |
595 | dbg("TX: Transmitter #2 is connected"); | |
596 | ||
597 | if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN))) | |
598 | pr_warn("TX: transmitter cable isn't connected!\n"); | |
599 | ||
600 | /* disable receive on revc */ | |
601 | if (dev->hw_revision == ENE_HW_C) | |
602 | conf1 &= ~ENE_CIRCFG_RX_EN; | |
603 | ||
604 | /* Enable TX engine */ | |
605 | conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ; | |
606 | ene_write_reg(dev, ENE_CIRCFG, conf1); | |
607 | } | |
608 | ||
609 | /* end transmission */ | |
610 | static void ene_tx_disable(struct ene_device *dev) | |
611 | { | |
612 | ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1); | |
613 | dev->tx_buffer = NULL; | |
614 | } | |
615 | ||
616 | ||
617 | /* TX one sample - must be called with dev->hw_lock*/ | |
618 | static void ene_tx_sample(struct ene_device *dev) | |
619 | { | |
620 | u8 raw_tx; | |
621 | u32 sample; | |
622 | bool pulse = dev->tx_sample_pulse; | |
623 | ||
624 | if (!dev->tx_buffer) { | |
625 | pr_warn("TX: BUG: attempt to transmit NULL buffer\n"); | |
626 | return; | |
627 | } | |
628 | ||
629 | /* Grab next TX sample */ | |
630 | if (!dev->tx_sample) { | |
631 | ||
632 | if (dev->tx_pos == dev->tx_len) { | |
633 | if (!dev->tx_done) { | |
634 | dbg("TX: no more data to send"); | |
635 | dev->tx_done = true; | |
636 | goto exit; | |
637 | } else { | |
638 | dbg("TX: last sample sent by hardware"); | |
639 | ene_tx_disable(dev); | |
640 | complete(&dev->tx_complete); | |
641 | return; | |
642 | } | |
643 | } | |
644 | ||
645 | sample = dev->tx_buffer[dev->tx_pos++]; | |
646 | dev->tx_sample_pulse = !dev->tx_sample_pulse; | |
647 | ||
648 | dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period); | |
649 | ||
650 | if (!dev->tx_sample) | |
651 | dev->tx_sample = 1; | |
652 | } | |
653 | ||
654 | raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK); | |
655 | dev->tx_sample -= raw_tx; | |
656 | ||
657 | dbg("TX: sample %8d (%s)", raw_tx * sample_period, | |
658 | pulse ? "pulse" : "space"); | |
659 | if (pulse) | |
660 | raw_tx |= ENE_CIRRLC_OUT_PULSE; | |
661 | ||
662 | ene_write_reg(dev, | |
663 | dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx); | |
664 | ||
665 | dev->tx_reg = !dev->tx_reg; | |
666 | exit: | |
667 | /* simulate TX done interrupt */ | |
668 | if (txsim) | |
669 | mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500); | |
670 | } | |
671 | ||
672 | /* timer to simulate tx done interrupt */ | |
673 | static void ene_tx_irqsim(struct timer_list *t) | |
674 | { | |
675 | struct ene_device *dev = from_timer(dev, t, tx_sim_timer); | |
676 | unsigned long flags; | |
677 | ||
678 | spin_lock_irqsave(&dev->hw_lock, flags); | |
679 | ene_tx_sample(dev); | |
680 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
681 | } | |
682 | ||
683 | ||
684 | /* read irq status and ack it */ | |
685 | static int ene_irq_status(struct ene_device *dev) | |
686 | { | |
687 | u8 irq_status; | |
688 | u8 fw_flags1, fw_flags2; | |
689 | int retval = 0; | |
690 | ||
691 | fw_flags2 = ene_read_reg(dev, ENE_FW2); | |
692 | ||
693 | if (dev->hw_revision < ENE_HW_C) { | |
694 | irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS); | |
695 | ||
696 | if (!(irq_status & ENEB_IRQ_STATUS_IR)) | |
697 | return 0; | |
698 | ||
699 | ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR); | |
700 | return ENE_IRQ_RX; | |
701 | } | |
702 | ||
703 | irq_status = ene_read_reg(dev, ENE_IRQ); | |
704 | if (!(irq_status & ENE_IRQ_STATUS)) | |
705 | return 0; | |
706 | ||
707 | /* original driver does that twice - a workaround ? */ | |
708 | ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS); | |
709 | ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS); | |
710 | ||
711 | /* check RX interrupt */ | |
712 | if (fw_flags2 & ENE_FW2_RXIRQ) { | |
713 | retval |= ENE_IRQ_RX; | |
714 | ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ); | |
715 | } | |
716 | ||
717 | /* check TX interrupt */ | |
718 | fw_flags1 = ene_read_reg(dev, ENE_FW1); | |
719 | if (fw_flags1 & ENE_FW1_TXIRQ) { | |
720 | ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ); | |
721 | retval |= ENE_IRQ_TX; | |
722 | } | |
723 | ||
724 | return retval; | |
725 | } | |
726 | ||
727 | /* interrupt handler */ | |
728 | static irqreturn_t ene_isr(int irq, void *data) | |
729 | { | |
730 | u16 hw_value, reg; | |
731 | int hw_sample, irq_status; | |
732 | bool pulse; | |
733 | unsigned long flags; | |
734 | irqreturn_t retval = IRQ_NONE; | |
735 | struct ene_device *dev = (struct ene_device *)data; | |
736 | DEFINE_IR_RAW_EVENT(ev); | |
737 | ||
738 | spin_lock_irqsave(&dev->hw_lock, flags); | |
739 | ||
740 | dbg_verbose("ISR called"); | |
741 | ene_rx_read_hw_pointer(dev); | |
742 | irq_status = ene_irq_status(dev); | |
743 | ||
744 | if (!irq_status) | |
745 | goto unlock; | |
746 | ||
747 | retval = IRQ_HANDLED; | |
748 | ||
749 | if (irq_status & ENE_IRQ_TX) { | |
750 | dbg_verbose("TX interrupt"); | |
751 | if (!dev->hw_learning_and_tx_capable) { | |
752 | dbg("TX interrupt on unsupported device!"); | |
753 | goto unlock; | |
754 | } | |
755 | ene_tx_sample(dev); | |
756 | } | |
757 | ||
758 | if (!(irq_status & ENE_IRQ_RX)) | |
759 | goto unlock; | |
760 | ||
761 | dbg_verbose("RX interrupt"); | |
762 | ||
763 | if (dev->hw_learning_and_tx_capable) | |
764 | ene_rx_sense_carrier(dev); | |
765 | ||
766 | /* On hardware that don't support extra buffer we need to trust | |
767 | the interrupt and not track the read pointer */ | |
768 | if (!dev->hw_extra_buffer) | |
769 | dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0; | |
770 | ||
771 | while (1) { | |
772 | ||
773 | reg = ene_rx_get_sample_reg(dev); | |
774 | ||
775 | dbg_verbose("next sample to read at: %04x", reg); | |
776 | if (!reg) | |
777 | break; | |
778 | ||
779 | hw_value = ene_read_reg(dev, reg); | |
780 | ||
781 | if (dev->rx_fan_input_inuse) { | |
782 | ||
783 | int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER; | |
784 | ||
785 | /* read high part of the sample */ | |
786 | hw_value |= ene_read_reg(dev, reg + offset) << 8; | |
787 | pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS; | |
788 | ||
789 | /* clear space bit, and other unused bits */ | |
790 | hw_value &= ENE_FW_SMPL_BUF_FAN_MSK; | |
791 | hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN; | |
792 | ||
793 | } else { | |
794 | pulse = !(hw_value & ENE_FW_SAMPLE_SPACE); | |
795 | hw_value &= ~ENE_FW_SAMPLE_SPACE; | |
796 | hw_sample = hw_value * sample_period; | |
797 | ||
798 | if (dev->rx_period_adjust) { | |
799 | hw_sample *= 100; | |
800 | hw_sample /= (100 + dev->rx_period_adjust); | |
801 | } | |
802 | } | |
803 | ||
804 | if (!dev->hw_extra_buffer && !hw_sample) { | |
805 | dev->r_pointer = dev->w_pointer; | |
806 | continue; | |
807 | } | |
808 | ||
809 | dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space"); | |
810 | ||
811 | ev.duration = US_TO_NS(hw_sample); | |
812 | ev.pulse = pulse; | |
813 | ir_raw_event_store_with_filter(dev->rdev, &ev); | |
814 | } | |
815 | ||
816 | ir_raw_event_handle(dev->rdev); | |
817 | unlock: | |
818 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
819 | return retval; | |
820 | } | |
821 | ||
822 | /* Initialize default settings */ | |
823 | static void ene_setup_default_settings(struct ene_device *dev) | |
824 | { | |
825 | dev->tx_period = 32; | |
826 | dev->tx_duty_cycle = 50; /*%*/ | |
827 | dev->transmitter_mask = 0x03; | |
828 | dev->learning_mode_enabled = learning_mode_force; | |
829 | ||
830 | /* Set reasonable default timeout */ | |
831 | dev->rdev->timeout = US_TO_NS(150000); | |
832 | } | |
833 | ||
834 | /* Upload all hardware settings at once. Used at load and resume time */ | |
835 | static void ene_setup_hw_settings(struct ene_device *dev) | |
836 | { | |
837 | if (dev->hw_learning_and_tx_capable) { | |
838 | ene_tx_set_carrier(dev); | |
839 | ene_tx_set_transmitters(dev); | |
840 | } | |
841 | ||
842 | ene_rx_setup(dev); | |
843 | } | |
844 | ||
845 | /* outside interface: called on first open*/ | |
846 | static int ene_open(struct rc_dev *rdev) | |
847 | { | |
848 | struct ene_device *dev = rdev->priv; | |
849 | unsigned long flags; | |
850 | ||
851 | spin_lock_irqsave(&dev->hw_lock, flags); | |
852 | ene_rx_enable(dev); | |
853 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
854 | return 0; | |
855 | } | |
856 | ||
857 | /* outside interface: called on device close*/ | |
858 | static void ene_close(struct rc_dev *rdev) | |
859 | { | |
860 | struct ene_device *dev = rdev->priv; | |
861 | unsigned long flags; | |
862 | spin_lock_irqsave(&dev->hw_lock, flags); | |
863 | ||
864 | ene_rx_disable(dev); | |
865 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
866 | } | |
867 | ||
868 | /* outside interface: set transmitter mask */ | |
869 | static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask) | |
870 | { | |
871 | struct ene_device *dev = rdev->priv; | |
872 | dbg("TX: attempt to set transmitter mask %02x", tx_mask); | |
873 | ||
874 | /* invalid txmask */ | |
875 | if (!tx_mask || tx_mask & ~0x03) { | |
876 | dbg("TX: invalid mask"); | |
877 | /* return count of transmitters */ | |
878 | return 2; | |
879 | } | |
880 | ||
881 | dev->transmitter_mask = tx_mask; | |
882 | ene_tx_set_transmitters(dev); | |
883 | return 0; | |
884 | } | |
885 | ||
886 | /* outside interface : set tx carrier */ | |
887 | static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier) | |
888 | { | |
889 | struct ene_device *dev = rdev->priv; | |
890 | u32 period; | |
891 | ||
892 | dbg("TX: attempt to set tx carrier to %d kHz", carrier); | |
893 | if (carrier == 0) | |
894 | return -EINVAL; | |
895 | ||
896 | period = 2000000 / carrier; | |
897 | if (period && (period > ENE_CIRMOD_PRD_MAX || | |
898 | period < ENE_CIRMOD_PRD_MIN)) { | |
899 | ||
900 | dbg("TX: out of range %d-%d kHz carrier", | |
901 | 2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX); | |
902 | return -EINVAL; | |
903 | } | |
904 | ||
905 | dev->tx_period = period; | |
906 | ene_tx_set_carrier(dev); | |
907 | return 0; | |
908 | } | |
909 | ||
910 | /*outside interface : set tx duty cycle */ | |
911 | static int ene_set_tx_duty_cycle(struct rc_dev *rdev, u32 duty_cycle) | |
912 | { | |
913 | struct ene_device *dev = rdev->priv; | |
914 | dbg("TX: setting duty cycle to %d%%", duty_cycle); | |
915 | dev->tx_duty_cycle = duty_cycle; | |
916 | ene_tx_set_carrier(dev); | |
917 | return 0; | |
918 | } | |
919 | ||
920 | /* outside interface: enable learning mode */ | |
921 | static int ene_set_learning_mode(struct rc_dev *rdev, int enable) | |
922 | { | |
923 | struct ene_device *dev = rdev->priv; | |
924 | unsigned long flags; | |
925 | if (enable == dev->learning_mode_enabled) | |
926 | return 0; | |
927 | ||
928 | spin_lock_irqsave(&dev->hw_lock, flags); | |
929 | dev->learning_mode_enabled = enable; | |
930 | ene_rx_disable(dev); | |
931 | ene_rx_setup(dev); | |
932 | ene_rx_enable(dev); | |
933 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
934 | return 0; | |
935 | } | |
936 | ||
937 | static int ene_set_carrier_report(struct rc_dev *rdev, int enable) | |
938 | { | |
939 | struct ene_device *dev = rdev->priv; | |
940 | unsigned long flags; | |
941 | ||
942 | if (enable == dev->carrier_detect_enabled) | |
943 | return 0; | |
944 | ||
945 | spin_lock_irqsave(&dev->hw_lock, flags); | |
946 | dev->carrier_detect_enabled = enable; | |
947 | ene_rx_disable(dev); | |
948 | ene_rx_setup(dev); | |
949 | ene_rx_enable(dev); | |
950 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
951 | return 0; | |
952 | } | |
953 | ||
954 | /* outside interface: enable or disable idle mode */ | |
955 | static void ene_set_idle(struct rc_dev *rdev, bool idle) | |
956 | { | |
957 | struct ene_device *dev = rdev->priv; | |
958 | ||
959 | if (idle) { | |
960 | ene_rx_reset(dev); | |
961 | dbg("RX: end of data"); | |
962 | } | |
963 | } | |
964 | ||
965 | /* outside interface: transmit */ | |
966 | static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n) | |
967 | { | |
968 | struct ene_device *dev = rdev->priv; | |
969 | unsigned long flags; | |
970 | ||
971 | dev->tx_buffer = buf; | |
972 | dev->tx_len = n; | |
973 | dev->tx_pos = 0; | |
974 | dev->tx_reg = 0; | |
975 | dev->tx_done = 0; | |
976 | dev->tx_sample = 0; | |
977 | dev->tx_sample_pulse = false; | |
978 | ||
979 | dbg("TX: %d samples", dev->tx_len); | |
980 | ||
981 | spin_lock_irqsave(&dev->hw_lock, flags); | |
982 | ||
983 | ene_tx_enable(dev); | |
984 | ||
985 | /* Transmit first two samples */ | |
986 | ene_tx_sample(dev); | |
987 | ene_tx_sample(dev); | |
988 | ||
989 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
990 | ||
991 | if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) { | |
992 | dbg("TX: timeout"); | |
993 | spin_lock_irqsave(&dev->hw_lock, flags); | |
994 | ene_tx_disable(dev); | |
995 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
996 | } else | |
997 | dbg("TX: done"); | |
998 | return n; | |
999 | } | |
1000 | ||
1001 | /* probe entry */ | |
1002 | static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) | |
1003 | { | |
1004 | int error = -ENOMEM; | |
1005 | struct rc_dev *rdev; | |
1006 | struct ene_device *dev; | |
1007 | ||
1008 | /* allocate memory */ | |
1009 | dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL); | |
1010 | rdev = rc_allocate_device(RC_DRIVER_IR_RAW); | |
1011 | if (!dev || !rdev) | |
1012 | goto exit_free_dev_rdev; | |
1013 | ||
1014 | /* validate resources */ | |
1015 | error = -ENODEV; | |
1016 | ||
1017 | /* init these to -1, as 0 is valid for both */ | |
1018 | dev->hw_io = -1; | |
1019 | dev->irq = -1; | |
1020 | ||
1021 | if (!pnp_port_valid(pnp_dev, 0) || | |
1022 | pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE) | |
1023 | goto exit_free_dev_rdev; | |
1024 | ||
1025 | if (!pnp_irq_valid(pnp_dev, 0)) | |
1026 | goto exit_free_dev_rdev; | |
1027 | ||
1028 | spin_lock_init(&dev->hw_lock); | |
1029 | ||
1030 | dev->hw_io = pnp_port_start(pnp_dev, 0); | |
1031 | dev->irq = pnp_irq(pnp_dev, 0); | |
1032 | ||
1033 | ||
1034 | pnp_set_drvdata(pnp_dev, dev); | |
1035 | dev->pnp_dev = pnp_dev; | |
1036 | ||
1037 | /* don't allow too short/long sample periods */ | |
1038 | if (sample_period < 5 || sample_period > 0x7F) | |
1039 | sample_period = ENE_DEFAULT_SAMPLE_PERIOD; | |
1040 | ||
1041 | /* detect hardware version and features */ | |
1042 | error = ene_hw_detect(dev); | |
1043 | if (error) | |
1044 | goto exit_free_dev_rdev; | |
1045 | ||
1046 | if (!dev->hw_learning_and_tx_capable && txsim) { | |
1047 | dev->hw_learning_and_tx_capable = true; | |
1048 | timer_setup(&dev->tx_sim_timer, ene_tx_irqsim, 0); | |
1049 | pr_warn("Simulation of TX activated\n"); | |
1050 | } | |
1051 | ||
1052 | if (!dev->hw_learning_and_tx_capable) | |
1053 | learning_mode_force = false; | |
1054 | ||
1055 | rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; | |
1056 | rdev->priv = dev; | |
1057 | rdev->open = ene_open; | |
1058 | rdev->close = ene_close; | |
1059 | rdev->s_idle = ene_set_idle; | |
1060 | rdev->driver_name = ENE_DRIVER_NAME; | |
1061 | rdev->map_name = RC_MAP_RC6_MCE; | |
1062 | rdev->device_name = "ENE eHome Infrared Remote Receiver"; | |
1063 | ||
1064 | if (dev->hw_learning_and_tx_capable) { | |
1065 | rdev->s_learning_mode = ene_set_learning_mode; | |
1066 | init_completion(&dev->tx_complete); | |
1067 | rdev->tx_ir = ene_transmit; | |
1068 | rdev->s_tx_mask = ene_set_tx_mask; | |
1069 | rdev->s_tx_carrier = ene_set_tx_carrier; | |
1070 | rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle; | |
1071 | rdev->s_carrier_report = ene_set_carrier_report; | |
1072 | rdev->device_name = "ENE eHome Infrared Remote Transceiver"; | |
1073 | } | |
1074 | ||
1075 | dev->rdev = rdev; | |
1076 | ||
1077 | ene_rx_setup_hw_buffer(dev); | |
1078 | ene_setup_default_settings(dev); | |
1079 | ene_setup_hw_settings(dev); | |
1080 | ||
1081 | device_set_wakeup_capable(&pnp_dev->dev, true); | |
1082 | device_set_wakeup_enable(&pnp_dev->dev, true); | |
1083 | ||
1084 | error = rc_register_device(rdev); | |
1085 | if (error < 0) | |
1086 | goto exit_free_dev_rdev; | |
1087 | ||
1088 | /* claim the resources */ | |
1089 | error = -EBUSY; | |
1090 | if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { | |
1091 | goto exit_unregister_device; | |
1092 | } | |
1093 | ||
1094 | if (request_irq(dev->irq, ene_isr, | |
1095 | IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { | |
1096 | goto exit_release_hw_io; | |
1097 | } | |
1098 | ||
1099 | pr_notice("driver has been successfully loaded\n"); | |
1100 | return 0; | |
1101 | ||
1102 | exit_release_hw_io: | |
1103 | release_region(dev->hw_io, ENE_IO_SIZE); | |
1104 | exit_unregister_device: | |
1105 | rc_unregister_device(rdev); | |
1106 | rdev = NULL; | |
1107 | exit_free_dev_rdev: | |
1108 | rc_free_device(rdev); | |
1109 | kfree(dev); | |
1110 | return error; | |
1111 | } | |
1112 | ||
1113 | /* main unload function */ | |
1114 | static void ene_remove(struct pnp_dev *pnp_dev) | |
1115 | { | |
1116 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | |
1117 | unsigned long flags; | |
1118 | ||
1119 | spin_lock_irqsave(&dev->hw_lock, flags); | |
1120 | ene_rx_disable(dev); | |
1121 | ene_rx_restore_hw_buffer(dev); | |
1122 | spin_unlock_irqrestore(&dev->hw_lock, flags); | |
1123 | ||
1124 | free_irq(dev->irq, dev); | |
1125 | release_region(dev->hw_io, ENE_IO_SIZE); | |
1126 | rc_unregister_device(dev->rdev); | |
1127 | kfree(dev); | |
1128 | } | |
1129 | ||
1130 | /* enable wake on IR (wakes on specific button on original remote) */ | |
1131 | static void ene_enable_wake(struct ene_device *dev, bool enable) | |
1132 | { | |
1133 | dbg("wake on IR %s", enable ? "enabled" : "disabled"); | |
1134 | ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable); | |
1135 | } | |
1136 | ||
1137 | #ifdef CONFIG_PM | |
1138 | static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state) | |
1139 | { | |
1140 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | |
1141 | bool wake = device_may_wakeup(&dev->pnp_dev->dev); | |
1142 | ||
1143 | if (!wake && dev->rx_enabled) | |
1144 | ene_rx_disable_hw(dev); | |
1145 | ||
1146 | ene_enable_wake(dev, wake); | |
1147 | return 0; | |
1148 | } | |
1149 | ||
1150 | static int ene_resume(struct pnp_dev *pnp_dev) | |
1151 | { | |
1152 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | |
1153 | ene_setup_hw_settings(dev); | |
1154 | ||
1155 | if (dev->rx_enabled) | |
1156 | ene_rx_enable(dev); | |
1157 | ||
1158 | ene_enable_wake(dev, false); | |
1159 | return 0; | |
1160 | } | |
1161 | #endif | |
1162 | ||
1163 | static void ene_shutdown(struct pnp_dev *pnp_dev) | |
1164 | { | |
1165 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | |
1166 | ene_enable_wake(dev, true); | |
1167 | } | |
1168 | ||
1169 | static const struct pnp_device_id ene_ids[] = { | |
1170 | {.id = "ENE0100",}, | |
1171 | {.id = "ENE0200",}, | |
1172 | {.id = "ENE0201",}, | |
1173 | {.id = "ENE0202",}, | |
1174 | {}, | |
1175 | }; | |
1176 | ||
1177 | static struct pnp_driver ene_driver = { | |
1178 | .name = ENE_DRIVER_NAME, | |
1179 | .id_table = ene_ids, | |
1180 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, | |
1181 | ||
1182 | .probe = ene_probe, | |
1183 | .remove = ene_remove, | |
1184 | #ifdef CONFIG_PM | |
1185 | .suspend = ene_suspend, | |
1186 | .resume = ene_resume, | |
1187 | #endif | |
1188 | .shutdown = ene_shutdown, | |
1189 | }; | |
1190 | ||
1191 | module_param(sample_period, int, S_IRUGO); | |
1192 | MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)"); | |
1193 | ||
1194 | module_param(learning_mode_force, bool, S_IRUGO); | |
1195 | MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default"); | |
1196 | ||
1197 | module_param(debug, int, S_IRUGO | S_IWUSR); | |
1198 | MODULE_PARM_DESC(debug, "Debug level"); | |
1199 | ||
1200 | module_param(txsim, bool, S_IRUGO); | |
1201 | MODULE_PARM_DESC(txsim, | |
1202 | "Simulate TX features on unsupported hardware (dangerous)"); | |
1203 | ||
1204 | MODULE_DEVICE_TABLE(pnp, ene_ids); | |
1205 | MODULE_DESCRIPTION | |
1206 | ("Infrared input driver for KB3926B/C/D/E/F (aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port"); | |
1207 | ||
1208 | MODULE_AUTHOR("Maxim Levitsky"); | |
1209 | MODULE_LICENSE("GPL"); | |
1210 | ||
1211 | module_pnp_driver(ene_driver); |