]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/media/IR/nuvoton-cir.c
[media] saa7134: add test after for loop
[mirror_ubuntu-jammy-kernel.git] / drivers / media / IR / nuvoton-cir.c
CommitLineData
6d2f5c27
JW
1/*
2 * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
3 *
4 * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
5 * Copyright (C) 2009 Nuvoton PS Team
6 *
7 * Special thanks to Nuvoton for providing hardware, spec sheets and
8 * sample code upon which portions of this driver are based. Indirect
9 * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
10 * modeled after.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
25 * USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pnp.h>
31#include <linux/io.h>
32#include <linux/interrupt.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/input.h>
36#include <media/ir-core.h>
37#include <linux/pci_ids.h>
38
39#include "nuvoton-cir.h"
40
41static char *chip_id = "w836x7hg";
42
43/* write val to config reg */
44static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
45{
46 outb(reg, nvt->cr_efir);
47 outb(val, nvt->cr_efdr);
48}
49
50/* read val from config reg */
51static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
52{
53 outb(reg, nvt->cr_efir);
54 return inb(nvt->cr_efdr);
55}
56
57/* update config register bit without changing other bits */
58static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
59{
60 u8 tmp = nvt_cr_read(nvt, reg) | val;
61 nvt_cr_write(nvt, tmp, reg);
62}
63
64/* clear config register bit without changing other bits */
65static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
66{
67 u8 tmp = nvt_cr_read(nvt, reg) & ~val;
68 nvt_cr_write(nvt, tmp, reg);
69}
70
71/* enter extended function mode */
72static inline void nvt_efm_enable(struct nvt_dev *nvt)
73{
74 /* Enabling Extended Function Mode explicitly requires writing 2x */
75 outb(EFER_EFM_ENABLE, nvt->cr_efir);
76 outb(EFER_EFM_ENABLE, nvt->cr_efir);
77}
78
79/* exit extended function mode */
80static inline void nvt_efm_disable(struct nvt_dev *nvt)
81{
82 outb(EFER_EFM_DISABLE, nvt->cr_efir);
83}
84
85/*
86 * When you want to address a specific logical device, write its logical
87 * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
88 * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
89 */
90static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
91{
92 outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
93 outb(ldev, nvt->cr_efdr);
94}
95
96/* write val to cir config register */
97static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
98{
99 outb(val, nvt->cir_addr + offset);
100}
101
102/* read val from cir config register */
103static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
104{
105 u8 val;
106
107 val = inb(nvt->cir_addr + offset);
108
109 return val;
110}
111
112/* write val to cir wake register */
113static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
114 u8 val, u8 offset)
115{
116 outb(val, nvt->cir_wake_addr + offset);
117}
118
119/* read val from cir wake config register */
120static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
121{
122 u8 val;
123
124 val = inb(nvt->cir_wake_addr + offset);
125
126 return val;
127}
128
129/* dump current cir register contents */
130static void cir_dump_regs(struct nvt_dev *nvt)
131{
132 nvt_efm_enable(nvt);
133 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
134
135 printk("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
136 printk(" * CR CIR ACTIVE : 0x%x\n",
137 nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
138 printk(" * CR CIR BASE ADDR: 0x%x\n",
139 (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
140 nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
141 printk(" * CR CIR IRQ NUM: 0x%x\n",
142 nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
143
144 nvt_efm_disable(nvt);
145
146 printk("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
147 printk(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
148 printk(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
149 printk(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
150 printk(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
151 printk(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
152 printk(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
153 printk(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
154 printk(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
155 printk(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
156 printk(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
157 printk(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
158 printk(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
159 printk(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
160 printk(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
161 printk(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
162 printk(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
163}
164
165/* dump current cir wake register contents */
166static void cir_wake_dump_regs(struct nvt_dev *nvt)
167{
168 u8 i, fifo_len;
169
170 nvt_efm_enable(nvt);
171 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
172
173 printk("%s: Dump CIR WAKE logical device registers:\n",
174 NVT_DRIVER_NAME);
175 printk(" * CR CIR WAKE ACTIVE : 0x%x\n",
176 nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
177 printk(" * CR CIR WAKE BASE ADDR: 0x%x\n",
178 (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
179 nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
180 printk(" * CR CIR WAKE IRQ NUM: 0x%x\n",
181 nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
182
183 nvt_efm_disable(nvt);
184
185 printk("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
186 printk(" * IRCON: 0x%x\n",
187 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
188 printk(" * IRSTS: 0x%x\n",
189 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
190 printk(" * IREN: 0x%x\n",
191 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
192 printk(" * FIFO CMP DEEP: 0x%x\n",
193 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
194 printk(" * FIFO CMP TOL: 0x%x\n",
195 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
196 printk(" * FIFO COUNT: 0x%x\n",
197 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
198 printk(" * SLCH: 0x%x\n",
199 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
200 printk(" * SLCL: 0x%x\n",
201 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
202 printk(" * FIFOCON: 0x%x\n",
203 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
204 printk(" * SRXFSTS: 0x%x\n",
205 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
206 printk(" * SAMPLE RX FIFO: 0x%x\n",
207 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
208 printk(" * WR FIFO DATA: 0x%x\n",
209 nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
210 printk(" * RD FIFO ONLY: 0x%x\n",
211 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
212 printk(" * RD FIFO ONLY IDX: 0x%x\n",
213 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
214 printk(" * FIFO IGNORE: 0x%x\n",
215 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
216 printk(" * IRFSM: 0x%x\n",
217 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
218
219 fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
220 printk("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
221 printk("* Contents = ");
222 for (i = 0; i < fifo_len; i++)
223 printk("%02x ",
224 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
225 printk("\n");
226}
227
228/* detect hardware features */
229static int nvt_hw_detect(struct nvt_dev *nvt)
230{
231 unsigned long flags;
232 u8 chip_major, chip_minor;
233 int ret = 0;
234
235 nvt_efm_enable(nvt);
236
237 /* Check if we're wired for the alternate EFER setup */
238 chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
239 if (chip_major == 0xff) {
240 nvt->cr_efir = CR_EFIR2;
241 nvt->cr_efdr = CR_EFDR2;
242 nvt_efm_enable(nvt);
243 chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
244 }
245
246 chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
247 nvt_dbg("%s: chip id: 0x%02x 0x%02x", chip_id, chip_major, chip_minor);
248
249 if (chip_major != CHIP_ID_HIGH &&
250 (chip_minor != CHIP_ID_LOW || chip_minor != CHIP_ID_LOW2))
251 ret = -ENODEV;
252
253 nvt_efm_disable(nvt);
254
255 spin_lock_irqsave(&nvt->nvt_lock, flags);
256 nvt->chip_major = chip_major;
257 nvt->chip_minor = chip_minor;
258 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
259
260 return ret;
261}
262
263static void nvt_cir_ldev_init(struct nvt_dev *nvt)
264{
265 u8 val;
266
267 /* output pin selection (Pin95=CIRRX, Pin96=CIRTX1, WB enabled */
268 val = nvt_cr_read(nvt, CR_OUTPUT_PIN_SEL);
269 val &= OUTPUT_PIN_SEL_MASK;
270 val |= (OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB);
271 nvt_cr_write(nvt, val, CR_OUTPUT_PIN_SEL);
272
273 /* Select CIR logical device and enable */
274 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
275 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
276
277 nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
278 nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
279
280 nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
281
282 nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
283 nvt->cir_addr, nvt->cir_irq);
284}
285
286static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
287{
288 /* Select ACPI logical device, enable it and CIR Wake */
289 nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
290 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
291
292 /* Enable CIR Wake via PSOUT# (Pin60) */
293 nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
294
295 /* enable cir interrupt of mouse/keyboard IRQ event */
296 nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
297
298 /* enable pme interrupt of cir wakeup event */
299 nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
300
301 /* Select CIR Wake logical device and enable */
302 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
303 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
304
305 nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
306 nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
307
308 nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
309
310 nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
311 nvt->cir_wake_addr, nvt->cir_wake_irq);
312}
313
314/* clear out the hardware's cir rx fifo */
315static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
316{
317 u8 val;
318
319 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
320 nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
321}
322
323/* clear out the hardware's cir wake rx fifo */
324static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
325{
326 u8 val;
327
328 val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
329 nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
330 CIR_WAKE_FIFOCON);
331}
332
333/* clear out the hardware's cir tx fifo */
334static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
335{
336 u8 val;
337
338 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
339 nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
340}
341
fbdc781c
JW
342/* enable RX Trigger Level Reach and Packet End interrupts */
343static void nvt_set_cir_iren(struct nvt_dev *nvt)
344{
345 u8 iren;
346
347 iren = CIR_IREN_RTR | CIR_IREN_PE;
348 nvt_cir_reg_write(nvt, iren, CIR_IREN);
349}
350
6d2f5c27
JW
351static void nvt_cir_regs_init(struct nvt_dev *nvt)
352{
353 /* set sample limit count (PE interrupt raised when reached) */
354 nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
355 nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
356
357 /* set fifo irq trigger levels */
358 nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
359 CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
360
361 /*
362 * Enable TX and RX, specify carrier on = low, off = high, and set
363 * sample period (currently 50us)
364 */
365 nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | CIR_IRCON_RXINV |
366 CIR_IRCON_SAMPLE_PERIOD_SEL, CIR_IRCON);
367
368 /* clear hardware rx and tx fifos */
369 nvt_clear_cir_fifo(nvt);
370 nvt_clear_tx_fifo(nvt);
371
372 /* clear any and all stray interrupts */
373 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
374
fbdc781c
JW
375 /* and finally, enable interrupts */
376 nvt_set_cir_iren(nvt);
6d2f5c27
JW
377}
378
379static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
380{
381 /* set number of bytes needed for wake key comparison (default 67) */
382 nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
383
384 /* set tolerance/variance allowed per byte during wake compare */
385 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
386 CIR_WAKE_FIFO_CMP_TOL);
387
388 /* set sample limit count (PE interrupt raised when reached) */
389 nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
390 nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
391
392 /* set cir wake fifo rx trigger level (currently 67) */
393 nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
394 CIR_WAKE_FIFOCON);
395
396 /*
397 * Enable TX and RX, specific carrier on = low, off = high, and set
398 * sample period (currently 50us)
399 */
400 nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
401 CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
402 CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
403 CIR_WAKE_IRCON);
404
405 /* clear cir wake rx fifo */
406 nvt_clear_cir_wake_fifo(nvt);
407
408 /* clear any and all stray interrupts */
409 nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
410}
411
412static void nvt_enable_wake(struct nvt_dev *nvt)
413{
414 nvt_efm_enable(nvt);
415
416 nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
417 nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
418 nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
419 nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
420
421 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
422 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
423
424 nvt_efm_disable(nvt);
425
426 nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
427 CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
428 CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON);
429 nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
430 nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
431}
432
433/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
434static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
435{
436 u32 count, carrier, duration = 0;
437 int i;
438
439 count = nvt_cir_reg_read(nvt, CIR_FCCL) |
440 nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
441
442 for (i = 0; i < nvt->pkts; i++) {
443 if (nvt->buf[i] & BUF_PULSE_BIT)
444 duration += nvt->buf[i] & BUF_LEN_MASK;
445 }
446
447 duration *= SAMPLE_PERIOD;
448
449 if (!count || !duration) {
450 nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
451 count, duration);
452 return 0;
453 }
454
455 carrier = (count * 1000000) / duration;
456
457 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
458 nvt_dbg("WTF? Carrier frequency out of range!");
459
460 nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
461 carrier, count, duration);
462
463 return carrier;
464}
465
466/*
467 * set carrier frequency
468 *
469 * set carrier on 2 registers: CP & CC
470 * always set CP as 0x81
471 * set CC by SPEC, CC = 3MHz/carrier - 1
472 */
473static int nvt_set_tx_carrier(void *data, u32 carrier)
474{
475 struct nvt_dev *nvt = data;
476 u16 val;
477
478 nvt_cir_reg_write(nvt, 1, CIR_CP);
479 val = 3000000 / (carrier) - 1;
480 nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
481
482 nvt_dbg("cp: 0x%x cc: 0x%x\n",
483 nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
484
485 return 0;
486}
487
488/*
489 * nvt_tx_ir
490 *
491 * 1) clean TX fifo first (handled by AP)
492 * 2) copy data from user space
493 * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
494 * 4) send 9 packets to TX FIFO to open TTR
495 * in interrupt_handler:
496 * 5) send all data out
497 * go back to write():
498 * 6) disable TX interrupts, re-enable RX interupts
499 *
500 * The key problem of this function is user space data may larger than
501 * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
502 * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
503 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
504 * set TXFCONT as 0xff, until buf_count less than 0xff.
505 */
506static int nvt_tx_ir(void *priv, int *txbuf, u32 n)
507{
508 struct nvt_dev *nvt = priv;
509 unsigned long flags;
510 size_t cur_count;
511 unsigned int i;
512 u8 iren;
513 int ret;
514
515 spin_lock_irqsave(&nvt->tx.lock, flags);
516
517 if (n >= TX_BUF_LEN) {
518 nvt->tx.buf_count = cur_count = TX_BUF_LEN;
519 ret = TX_BUF_LEN;
520 } else {
521 nvt->tx.buf_count = cur_count = n;
522 ret = n;
523 }
524
525 memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
526
527 nvt->tx.cur_buf_num = 0;
528
529 /* save currently enabled interrupts */
530 iren = nvt_cir_reg_read(nvt, CIR_IREN);
531
532 /* now disable all interrupts, save TFU & TTR */
533 nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
534
535 nvt->tx.tx_state = ST_TX_REPLY;
536
537 nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
538 CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
539
540 /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
541 for (i = 0; i < 9; i++)
542 nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
543
544 spin_unlock_irqrestore(&nvt->tx.lock, flags);
545
546 wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
547
548 spin_lock_irqsave(&nvt->tx.lock, flags);
549 nvt->tx.tx_state = ST_TX_NONE;
550 spin_unlock_irqrestore(&nvt->tx.lock, flags);
551
552 /* restore enabled interrupts to prior state */
553 nvt_cir_reg_write(nvt, iren, CIR_IREN);
554
555 return ret;
556}
557
558/* dump contents of the last rx buffer we got from the hw rx fifo */
559static void nvt_dump_rx_buf(struct nvt_dev *nvt)
560{
561 int i;
562
563 printk("%s (len %d): ", __func__, nvt->pkts);
564 for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
565 printk("0x%02x ", nvt->buf[i]);
566 printk("\n");
567}
568
569/*
570 * Process raw data in rx driver buffer, store it in raw IR event kfifo,
571 * trigger decode when appropriate.
572 *
573 * We get IR data samples one byte at a time. If the msb is set, its a pulse,
574 * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
575 * (default 50us) intervals for that pulse/space. A discrete signal is
576 * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
577 * to signal more IR coming (repeats) or end of IR, respectively. We store
578 * sample data in the raw event kfifo until we see 0x7<something> (except f)
579 * or 0x80, at which time, we trigger a decode operation.
580 */
581static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
582{
583 struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
584 unsigned int count;
585 u32 carrier;
586 u8 sample;
587 int i;
588
589 nvt_dbg_verbose("%s firing", __func__);
590
591 if (debug)
592 nvt_dump_rx_buf(nvt);
593
594 if (nvt->carrier_detect_enabled)
595 carrier = nvt_rx_carrier_detect(nvt);
596
597 count = nvt->pkts;
598 nvt_dbg_verbose("Processing buffer of len %d", count);
599
600 for (i = 0; i < count; i++) {
601 nvt->pkts--;
602 sample = nvt->buf[i];
603
604 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
605 rawir.duration = (sample & BUF_LEN_MASK)
606 * SAMPLE_PERIOD * 1000;
607
608 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
609 if (nvt->rawir.pulse == rawir.pulse)
610 nvt->rawir.duration += rawir.duration;
611 else {
612 nvt->rawir.duration = rawir.duration;
613 nvt->rawir.pulse = rawir.pulse;
614 }
615 continue;
616 }
617
618 rawir.duration += nvt->rawir.duration;
619 nvt->rawir.duration = 0;
620 nvt->rawir.pulse = rawir.pulse;
621
622 if (sample == BUF_PULSE_BIT)
623 rawir.pulse = false;
624
625 if (rawir.duration) {
626 nvt_dbg("Storing %s with duration %d",
627 rawir.pulse ? "pulse" : "space",
628 rawir.duration);
629
630 ir_raw_event_store(nvt->rdev, &rawir);
631 }
632
633 /*
634 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
635 * indicates end of IR signal, but new data incoming. In both
636 * cases, it means we're ready to call ir_raw_event_handle
637 */
638 if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) &&
639 (sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE))
640 ir_raw_event_handle(nvt->rdev);
641 }
642
643 if (nvt->pkts) {
644 nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
645 nvt->pkts = 0;
646 }
647
648 nvt_dbg_verbose("%s done", __func__);
649}
650
fbdc781c
JW
651static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
652{
653 nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
654
655 nvt->pkts = 0;
656 nvt_clear_cir_fifo(nvt);
657 ir_raw_event_reset(nvt->rdev);
658}
659
6d2f5c27
JW
660/* copy data from hardware rx fifo into driver buffer */
661static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
662{
663 unsigned long flags;
664 u8 fifocount, val;
665 unsigned int b_idx;
fbdc781c 666 bool overrun = false;
6d2f5c27
JW
667 int i;
668
669 /* Get count of how many bytes to read from RX FIFO */
670 fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
671 /* if we get 0xff, probably means the logical dev is disabled */
672 if (fifocount == 0xff)
673 return;
fbdc781c 674 /* watch out for a fifo overrun condition */
6d2f5c27 675 else if (fifocount > RX_BUF_LEN) {
fbdc781c
JW
676 overrun = true;
677 fifocount = RX_BUF_LEN;
6d2f5c27
JW
678 }
679
680 nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
681
682 spin_lock_irqsave(&nvt->nvt_lock, flags);
683
684 b_idx = nvt->pkts;
685
686 /* This should never happen, but lets check anyway... */
687 if (b_idx + fifocount > RX_BUF_LEN) {
688 nvt_process_rx_ir_data(nvt);
689 b_idx = 0;
690 }
691
692 /* Read fifocount bytes from CIR Sample RX FIFO register */
693 for (i = 0; i < fifocount; i++) {
694 val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
695 nvt->buf[b_idx + i] = val;
696 }
697
698 nvt->pkts += fifocount;
699 nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
700
701 nvt_process_rx_ir_data(nvt);
702
fbdc781c
JW
703 if (overrun)
704 nvt_handle_rx_fifo_overrun(nvt);
705
6d2f5c27
JW
706 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
707}
708
709static void nvt_cir_log_irqs(u8 status, u8 iren)
710{
711 nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
712 status, iren,
713 status & CIR_IRSTS_RDR ? " RDR" : "",
714 status & CIR_IRSTS_RTR ? " RTR" : "",
715 status & CIR_IRSTS_PE ? " PE" : "",
716 status & CIR_IRSTS_RFO ? " RFO" : "",
717 status & CIR_IRSTS_TE ? " TE" : "",
718 status & CIR_IRSTS_TTR ? " TTR" : "",
719 status & CIR_IRSTS_TFU ? " TFU" : "",
720 status & CIR_IRSTS_GH ? " GH" : "",
721 status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
722 CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
723 CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
724}
725
726static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
727{
728 unsigned long flags;
729 bool tx_inactive;
730 u8 tx_state;
731
732 spin_lock_irqsave(&nvt->tx.lock, flags);
733 tx_state = nvt->tx.tx_state;
734 spin_unlock_irqrestore(&nvt->tx.lock, flags);
735
736 tx_inactive = (tx_state == ST_TX_NONE);
737
738 return tx_inactive;
739}
740
741/* interrupt service routine for incoming and outgoing CIR data */
742static irqreturn_t nvt_cir_isr(int irq, void *data)
743{
744 struct nvt_dev *nvt = data;
745 u8 status, iren, cur_state;
746 unsigned long flags;
747
748 nvt_dbg_verbose("%s firing", __func__);
749
750 nvt_efm_enable(nvt);
751 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
752 nvt_efm_disable(nvt);
753
754 /*
755 * Get IR Status register contents. Write 1 to ack/clear
756 *
757 * bit: reg name - description
758 * 7: CIR_IRSTS_RDR - RX Data Ready
759 * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
760 * 5: CIR_IRSTS_PE - Packet End
761 * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
762 * 3: CIR_IRSTS_TE - TX FIFO Empty
763 * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
764 * 1: CIR_IRSTS_TFU - TX FIFO Underrun
765 * 0: CIR_IRSTS_GH - Min Length Detected
766 */
767 status = nvt_cir_reg_read(nvt, CIR_IRSTS);
768 if (!status) {
769 nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
770 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
771 return IRQ_RETVAL(IRQ_NONE);
772 }
773
774 /* ack/clear all irq flags we've got */
775 nvt_cir_reg_write(nvt, status, CIR_IRSTS);
776 nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
777
778 /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
779 iren = nvt_cir_reg_read(nvt, CIR_IREN);
780 if (!iren) {
781 nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
782 return IRQ_RETVAL(IRQ_NONE);
783 }
784
785 if (debug)
786 nvt_cir_log_irqs(status, iren);
787
788 if (status & CIR_IRSTS_RTR) {
789 /* FIXME: add code for study/learn mode */
790 /* We only do rx if not tx'ing */
791 if (nvt_cir_tx_inactive(nvt))
792 nvt_get_rx_ir_data(nvt);
793 }
794
795 if (status & CIR_IRSTS_PE) {
796 if (nvt_cir_tx_inactive(nvt))
797 nvt_get_rx_ir_data(nvt);
798
799 spin_lock_irqsave(&nvt->nvt_lock, flags);
800
801 cur_state = nvt->study_state;
802
803 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
804
805 if (cur_state == ST_STUDY_NONE)
806 nvt_clear_cir_fifo(nvt);
807 }
808
809 if (status & CIR_IRSTS_TE)
810 nvt_clear_tx_fifo(nvt);
811
812 if (status & CIR_IRSTS_TTR) {
813 unsigned int pos, count;
814 u8 tmp;
815
816 spin_lock_irqsave(&nvt->tx.lock, flags);
817
818 pos = nvt->tx.cur_buf_num;
819 count = nvt->tx.buf_count;
820
821 /* Write data into the hardware tx fifo while pos < count */
822 if (pos < count) {
823 nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
824 nvt->tx.cur_buf_num++;
825 /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
826 } else {
827 tmp = nvt_cir_reg_read(nvt, CIR_IREN);
828 nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
829 }
830
831 spin_unlock_irqrestore(&nvt->tx.lock, flags);
832
833 }
834
835 if (status & CIR_IRSTS_TFU) {
836 spin_lock_irqsave(&nvt->tx.lock, flags);
837 if (nvt->tx.tx_state == ST_TX_REPLY) {
838 nvt->tx.tx_state = ST_TX_REQUEST;
839 wake_up(&nvt->tx.queue);
840 }
841 spin_unlock_irqrestore(&nvt->tx.lock, flags);
842 }
843
844 nvt_dbg_verbose("%s done", __func__);
845 return IRQ_RETVAL(IRQ_HANDLED);
846}
847
848/* Interrupt service routine for CIR Wake */
849static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
850{
851 u8 status, iren, val;
852 struct nvt_dev *nvt = data;
853 unsigned long flags;
854
855 nvt_dbg_wake("%s firing", __func__);
856
857 status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
858 if (!status)
859 return IRQ_RETVAL(IRQ_NONE);
860
861 if (status & CIR_WAKE_IRSTS_IR_PENDING)
862 nvt_clear_cir_wake_fifo(nvt);
863
864 nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
865 nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
866
867 /* Interrupt may be shared with CIR, bail if Wake not enabled */
868 iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
869 if (!iren) {
870 nvt_dbg_wake("%s exiting, wake not enabled", __func__);
871 return IRQ_RETVAL(IRQ_HANDLED);
872 }
873
874 if ((status & CIR_WAKE_IRSTS_PE) &&
875 (nvt->wake_state == ST_WAKE_START)) {
876 while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
877 val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
878 nvt_dbg("setting wake up key: 0x%x", val);
879 }
880
881 nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
882 spin_lock_irqsave(&nvt->nvt_lock, flags);
883 nvt->wake_state = ST_WAKE_FINISH;
884 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
885 }
886
887 nvt_dbg_wake("%s done", __func__);
888 return IRQ_RETVAL(IRQ_HANDLED);
889}
890
891static void nvt_enable_cir(struct nvt_dev *nvt)
892{
893 /* set function enable flags */
894 nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
895 CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
896 CIR_IRCON);
897
898 nvt_efm_enable(nvt);
899
900 /* enable the CIR logical device */
901 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
902 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
903
904 nvt_efm_disable(nvt);
905
906 /* clear all pending interrupts */
907 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
908
909 /* enable interrupts */
fbdc781c 910 nvt_set_cir_iren(nvt);
6d2f5c27
JW
911}
912
913static void nvt_disable_cir(struct nvt_dev *nvt)
914{
915 /* disable CIR interrupts */
916 nvt_cir_reg_write(nvt, 0, CIR_IREN);
917
918 /* clear any and all pending interrupts */
919 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
920
921 /* clear all function enable flags */
922 nvt_cir_reg_write(nvt, 0, CIR_IRCON);
923
924 /* clear hardware rx and tx fifos */
925 nvt_clear_cir_fifo(nvt);
926 nvt_clear_tx_fifo(nvt);
927
928 nvt_efm_enable(nvt);
929
930 /* disable the CIR logical device */
931 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
932 nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
933
934 nvt_efm_disable(nvt);
935}
936
937static int nvt_open(void *data)
938{
939 struct nvt_dev *nvt = (struct nvt_dev *)data;
940 unsigned long flags;
941
942 spin_lock_irqsave(&nvt->nvt_lock, flags);
943 nvt->in_use = true;
944 nvt_enable_cir(nvt);
945 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
946
947 return 0;
948}
949
950static void nvt_close(void *data)
951{
952 struct nvt_dev *nvt = (struct nvt_dev *)data;
953 unsigned long flags;
954
955 spin_lock_irqsave(&nvt->nvt_lock, flags);
956 nvt->in_use = false;
957 nvt_disable_cir(nvt);
958 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
959}
960
961/* Allocate memory, probe hardware, and initialize everything */
962static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
963{
964 struct nvt_dev *nvt = NULL;
965 struct input_dev *rdev = NULL;
966 struct ir_dev_props *props = NULL;
967 int ret = -ENOMEM;
968
969 nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
970 if (!nvt)
971 return ret;
972
973 props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
974 if (!props)
975 goto failure;
976
977 /* input device for IR remote (and tx) */
978 rdev = input_allocate_device();
979 if (!rdev)
980 goto failure;
981
982 ret = -ENODEV;
983 /* validate pnp resources */
984 if (!pnp_port_valid(pdev, 0) ||
985 pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
986 dev_err(&pdev->dev, "IR PNP Port not valid!\n");
987 goto failure;
988 }
989
990 if (!pnp_irq_valid(pdev, 0)) {
991 dev_err(&pdev->dev, "PNP IRQ not valid!\n");
992 goto failure;
993 }
994
995 if (!pnp_port_valid(pdev, 1) ||
996 pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
997 dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
998 goto failure;
999 }
1000
1001 nvt->cir_addr = pnp_port_start(pdev, 0);
1002 nvt->cir_irq = pnp_irq(pdev, 0);
1003
1004 nvt->cir_wake_addr = pnp_port_start(pdev, 1);
1005 /* irq is always shared between cir and cir wake */
1006 nvt->cir_wake_irq = nvt->cir_irq;
1007
1008 nvt->cr_efir = CR_EFIR;
1009 nvt->cr_efdr = CR_EFDR;
1010
1011 spin_lock_init(&nvt->nvt_lock);
1012 spin_lock_init(&nvt->tx.lock);
1013
1014 ret = -EBUSY;
1015 /* now claim resources */
1016 if (!request_region(nvt->cir_addr,
1017 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1018 goto failure;
1019
1020 if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
1021 NVT_DRIVER_NAME, (void *)nvt))
1022 goto failure;
1023
1024 if (!request_region(nvt->cir_wake_addr,
1025 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1026 goto failure;
1027
1028 if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
1029 NVT_DRIVER_NAME, (void *)nvt))
1030 goto failure;
1031
1032 pnp_set_drvdata(pdev, nvt);
1033 nvt->pdev = pdev;
1034
1035 init_waitqueue_head(&nvt->tx.queue);
1036
1037 ret = nvt_hw_detect(nvt);
1038 if (ret)
1039 goto failure;
1040
1041 /* Initialize CIR & CIR Wake Logical Devices */
1042 nvt_efm_enable(nvt);
1043 nvt_cir_ldev_init(nvt);
1044 nvt_cir_wake_ldev_init(nvt);
1045 nvt_efm_disable(nvt);
1046
1047 /* Initialize CIR & CIR Wake Config Registers */
1048 nvt_cir_regs_init(nvt);
1049 nvt_cir_wake_regs_init(nvt);
1050
1051 /* Set up ir-core props */
1052 props->priv = nvt;
1053 props->driver_type = RC_DRIVER_IR_RAW;
1054 props->allowed_protos = IR_TYPE_ALL;
1055 props->open = nvt_open;
1056 props->close = nvt_close;
1057#if 0
1058 props->min_timeout = XYZ;
1059 props->max_timeout = XYZ;
1060 props->timeout = XYZ;
1061 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
1062 props->rx_resolution = XYZ;
1063
1064 /* tx bits */
1065 props->tx_resolution = XYZ;
1066#endif
1067 props->tx_ir = nvt_tx_ir;
1068 props->s_tx_carrier = nvt_set_tx_carrier;
1069
1070 rdev->name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1071 rdev->id.bustype = BUS_HOST;
1072 rdev->id.vendor = PCI_VENDOR_ID_WINBOND2;
1073 rdev->id.product = nvt->chip_major;
1074 rdev->id.version = nvt->chip_minor;
1075
1076 nvt->props = props;
1077 nvt->rdev = rdev;
1078
1079 device_set_wakeup_capable(&pdev->dev, 1);
1080 device_set_wakeup_enable(&pdev->dev, 1);
1081
1082 ret = ir_input_register(rdev, RC_MAP_RC6_MCE, props, NVT_DRIVER_NAME);
1083 if (ret)
1084 goto failure;
1085
1086 nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
1087 if (debug) {
1088 cir_dump_regs(nvt);
1089 cir_wake_dump_regs(nvt);
1090 }
1091
1092 return 0;
1093
1094failure:
1095 if (nvt->cir_irq)
1096 free_irq(nvt->cir_irq, nvt);
1097 if (nvt->cir_addr)
1098 release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
1099
1100 if (nvt->cir_wake_irq)
1101 free_irq(nvt->cir_wake_irq, nvt);
1102 if (nvt->cir_wake_addr)
1103 release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
1104
1105 input_free_device(rdev);
1106 kfree(props);
1107 kfree(nvt);
1108
1109 return ret;
1110}
1111
1112static void __devexit nvt_remove(struct pnp_dev *pdev)
1113{
1114 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1115 unsigned long flags;
1116
1117 spin_lock_irqsave(&nvt->nvt_lock, flags);
1118 /* disable CIR */
1119 nvt_cir_reg_write(nvt, 0, CIR_IREN);
1120 nvt_disable_cir(nvt);
1121 /* enable CIR Wake (for IR power-on) */
1122 nvt_enable_wake(nvt);
1123 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1124
1125 /* free resources */
1126 free_irq(nvt->cir_irq, nvt);
1127 free_irq(nvt->cir_wake_irq, nvt);
1128 release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
1129 release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
1130
1131 ir_input_unregister(nvt->rdev);
1132
1133 kfree(nvt->props);
1134 kfree(nvt);
1135}
1136
1137static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
1138{
1139 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1140 unsigned long flags;
1141
1142 nvt_dbg("%s called", __func__);
1143
1144 /* zero out misc state tracking */
1145 spin_lock_irqsave(&nvt->nvt_lock, flags);
1146 nvt->study_state = ST_STUDY_NONE;
1147 nvt->wake_state = ST_WAKE_NONE;
1148 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1149
1150 spin_lock_irqsave(&nvt->tx.lock, flags);
1151 nvt->tx.tx_state = ST_TX_NONE;
1152 spin_unlock_irqrestore(&nvt->tx.lock, flags);
1153
1154 /* disable all CIR interrupts */
1155 nvt_cir_reg_write(nvt, 0, CIR_IREN);
1156
1157 nvt_efm_enable(nvt);
1158
1159 /* disable cir logical dev */
1160 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
1161 nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
1162
1163 nvt_efm_disable(nvt);
1164
1165 /* make sure wake is enabled */
1166 nvt_enable_wake(nvt);
1167
1168 return 0;
1169}
1170
1171static int nvt_resume(struct pnp_dev *pdev)
1172{
1173 int ret = 0;
1174 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1175
1176 nvt_dbg("%s called", __func__);
1177
1178 /* open interrupt */
fbdc781c 1179 nvt_set_cir_iren(nvt);
6d2f5c27
JW
1180
1181 /* Enable CIR logical device */
1182 nvt_efm_enable(nvt);
1183 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
1184 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
1185
1186 nvt_efm_disable(nvt);
1187
1188 nvt_cir_regs_init(nvt);
1189 nvt_cir_wake_regs_init(nvt);
1190
1191 return ret;
1192}
1193
1194static void nvt_shutdown(struct pnp_dev *pdev)
1195{
1196 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1197 nvt_enable_wake(nvt);
1198}
1199
1200static const struct pnp_device_id nvt_ids[] = {
1201 { "WEC0530", 0 }, /* CIR */
1202 { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
1203 { "", 0 },
1204};
1205
1206static struct pnp_driver nvt_driver = {
1207 .name = NVT_DRIVER_NAME,
1208 .id_table = nvt_ids,
1209 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
1210 .probe = nvt_probe,
1211 .remove = __devexit_p(nvt_remove),
1212 .suspend = nvt_suspend,
1213 .resume = nvt_resume,
1214 .shutdown = nvt_shutdown,
1215};
1216
1217int nvt_init(void)
1218{
1219 return pnp_register_driver(&nvt_driver);
1220}
1221
1222void nvt_exit(void)
1223{
1224 pnp_unregister_driver(&nvt_driver);
1225}
1226
1227module_param(debug, int, S_IRUGO | S_IWUSR);
1228MODULE_PARM_DESC(debug, "Enable debugging output");
1229
1230MODULE_DEVICE_TABLE(pnp, nvt_ids);
1231MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
1232
1233MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
1234MODULE_LICENSE("GPL");
1235
1236module_init(nvt_init);
1237module_exit(nvt_exit);