]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/media/rc/nuvoton-cir.c
Merge branch 'WIP.x86/boot' into x86/boot, to pick up ready branch
[mirror_ubuntu-focal-kernel.git] / drivers / media / rc / nuvoton-cir.c
1 /*
2 * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
3 *
4 * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
5 * Copyright (C) 2009 Nuvoton PS Team
6 *
7 * Special thanks to Nuvoton for providing hardware, spec sheets and
8 * sample code upon which portions of this driver are based. Indirect
9 * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
10 * modeled after.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/pnp.h>
28 #include <linux/io.h>
29 #include <linux/interrupt.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <media/rc-core.h>
33 #include <linux/pci_ids.h>
34
35 #include "nuvoton-cir.h"
36
37 static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
38
39 static const struct nvt_chip nvt_chips[] = {
40 { "w83667hg", NVT_W83667HG },
41 { "NCT6775F", NVT_6775F },
42 { "NCT6776F", NVT_6776F },
43 { "NCT6779D", NVT_6779D },
44 };
45
46 static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
47 {
48 return nvt->rdev->dev.parent;
49 }
50
51 static inline bool is_w83667hg(struct nvt_dev *nvt)
52 {
53 return nvt->chip_ver == NVT_W83667HG;
54 }
55
56 /* write val to config reg */
57 static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
58 {
59 outb(reg, nvt->cr_efir);
60 outb(val, nvt->cr_efdr);
61 }
62
63 /* read val from config reg */
64 static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
65 {
66 outb(reg, nvt->cr_efir);
67 return inb(nvt->cr_efdr);
68 }
69
70 /* update config register bit without changing other bits */
71 static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
72 {
73 u8 tmp = nvt_cr_read(nvt, reg) | val;
74 nvt_cr_write(nvt, tmp, reg);
75 }
76
77 /* clear config register bit without changing other bits */
78 static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
79 {
80 u8 tmp = nvt_cr_read(nvt, reg) & ~val;
81 nvt_cr_write(nvt, tmp, reg);
82 }
83
84 /* enter extended function mode */
85 static inline int nvt_efm_enable(struct nvt_dev *nvt)
86 {
87 if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
88 return -EBUSY;
89
90 /* Enabling Extended Function Mode explicitly requires writing 2x */
91 outb(EFER_EFM_ENABLE, nvt->cr_efir);
92 outb(EFER_EFM_ENABLE, nvt->cr_efir);
93
94 return 0;
95 }
96
97 /* exit extended function mode */
98 static inline void nvt_efm_disable(struct nvt_dev *nvt)
99 {
100 outb(EFER_EFM_DISABLE, nvt->cr_efir);
101
102 release_region(nvt->cr_efir, 2);
103 }
104
105 /*
106 * When you want to address a specific logical device, write its logical
107 * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
108 * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
109 */
110 static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
111 {
112 nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
113 }
114
115 /* select and enable logical device with setting EFM mode*/
116 static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
117 {
118 nvt_efm_enable(nvt);
119 nvt_select_logical_dev(nvt, ldev);
120 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
121 nvt_efm_disable(nvt);
122 }
123
124 /* select and disable logical device with setting EFM mode*/
125 static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
126 {
127 nvt_efm_enable(nvt);
128 nvt_select_logical_dev(nvt, ldev);
129 nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
130 nvt_efm_disable(nvt);
131 }
132
133 /* write val to cir config register */
134 static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
135 {
136 outb(val, nvt->cir_addr + offset);
137 }
138
139 /* read val from cir config register */
140 static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
141 {
142 return inb(nvt->cir_addr + offset);
143 }
144
145 /* write val to cir wake register */
146 static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
147 u8 val, u8 offset)
148 {
149 outb(val, nvt->cir_wake_addr + offset);
150 }
151
152 /* read val from cir wake config register */
153 static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
154 {
155 return inb(nvt->cir_wake_addr + offset);
156 }
157
158 /* don't override io address if one is set already */
159 static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
160 {
161 unsigned long old_addr;
162
163 old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
164 old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
165
166 if (old_addr)
167 *ioaddr = old_addr;
168 else {
169 nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
170 nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
171 }
172 }
173
174 static void nvt_write_wakeup_codes(struct rc_dev *dev,
175 const u8 *wbuf, int count)
176 {
177 u8 tolerance, config;
178 struct nvt_dev *nvt = dev->priv;
179 int i;
180
181 /* hardcode the tolerance to 10% */
182 tolerance = DIV_ROUND_UP(count, 10);
183
184 spin_lock(&nvt->lock);
185
186 nvt_clear_cir_wake_fifo(nvt);
187 nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
188 nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
189
190 config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
191
192 /* enable writes to wake fifo */
193 nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
194 CIR_WAKE_IRCON);
195
196 if (count)
197 pr_info("Wake samples (%d) =", count);
198 else
199 pr_info("Wake sample fifo cleared");
200
201 for (i = 0; i < count; i++)
202 nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
203
204 nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
205
206 spin_unlock(&nvt->lock);
207 }
208
209 static ssize_t wakeup_data_show(struct device *dev,
210 struct device_attribute *attr,
211 char *buf)
212 {
213 struct rc_dev *rc_dev = to_rc_dev(dev);
214 struct nvt_dev *nvt = rc_dev->priv;
215 int fifo_len, duration;
216 unsigned long flags;
217 ssize_t buf_len = 0;
218 int i;
219
220 spin_lock_irqsave(&nvt->lock, flags);
221
222 fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
223 fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
224
225 /* go to first element to be read */
226 while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
227 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
228
229 for (i = 0; i < fifo_len; i++) {
230 duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
231 duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
232 buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
233 "%d ", duration);
234 }
235 buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
236
237 spin_unlock_irqrestore(&nvt->lock, flags);
238
239 return buf_len;
240 }
241
242 static ssize_t wakeup_data_store(struct device *dev,
243 struct device_attribute *attr,
244 const char *buf, size_t len)
245 {
246 struct rc_dev *rc_dev = to_rc_dev(dev);
247 u8 wake_buf[WAKEUP_MAX_SIZE];
248 char **argv;
249 int i, count;
250 unsigned int val;
251 ssize_t ret;
252
253 argv = argv_split(GFP_KERNEL, buf, &count);
254 if (!argv)
255 return -ENOMEM;
256 if (!count || count > WAKEUP_MAX_SIZE) {
257 ret = -EINVAL;
258 goto out;
259 }
260
261 for (i = 0; i < count; i++) {
262 ret = kstrtouint(argv[i], 10, &val);
263 if (ret)
264 goto out;
265 val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
266 if (!val || val > 0x7f) {
267 ret = -EINVAL;
268 goto out;
269 }
270 wake_buf[i] = val;
271 /* sequence must start with a pulse */
272 if (i % 2 == 0)
273 wake_buf[i] |= BUF_PULSE_BIT;
274 }
275
276 nvt_write_wakeup_codes(rc_dev, wake_buf, count);
277
278 ret = len;
279 out:
280 argv_free(argv);
281 return ret;
282 }
283 static DEVICE_ATTR_RW(wakeup_data);
284
285 /* dump current cir register contents */
286 static void cir_dump_regs(struct nvt_dev *nvt)
287 {
288 nvt_efm_enable(nvt);
289 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
290
291 pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
292 pr_info(" * CR CIR ACTIVE : 0x%x\n",
293 nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
294 pr_info(" * CR CIR BASE ADDR: 0x%x\n",
295 (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
296 nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
297 pr_info(" * CR CIR IRQ NUM: 0x%x\n",
298 nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
299
300 nvt_efm_disable(nvt);
301
302 pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
303 pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
304 pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
305 pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
306 pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
307 pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
308 pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
309 pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
310 pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
311 pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
312 pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
313 pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
314 pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
315 pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
316 pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
317 pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
318 pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
319 }
320
321 /* dump current cir wake register contents */
322 static void cir_wake_dump_regs(struct nvt_dev *nvt)
323 {
324 u8 i, fifo_len;
325
326 nvt_efm_enable(nvt);
327 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
328
329 pr_info("%s: Dump CIR WAKE logical device registers:\n",
330 NVT_DRIVER_NAME);
331 pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
332 nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
333 pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
334 (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
335 nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
336 pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
337 nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
338
339 nvt_efm_disable(nvt);
340
341 pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
342 pr_info(" * IRCON: 0x%x\n",
343 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
344 pr_info(" * IRSTS: 0x%x\n",
345 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
346 pr_info(" * IREN: 0x%x\n",
347 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
348 pr_info(" * FIFO CMP DEEP: 0x%x\n",
349 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
350 pr_info(" * FIFO CMP TOL: 0x%x\n",
351 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
352 pr_info(" * FIFO COUNT: 0x%x\n",
353 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
354 pr_info(" * SLCH: 0x%x\n",
355 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
356 pr_info(" * SLCL: 0x%x\n",
357 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
358 pr_info(" * FIFOCON: 0x%x\n",
359 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
360 pr_info(" * SRXFSTS: 0x%x\n",
361 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
362 pr_info(" * SAMPLE RX FIFO: 0x%x\n",
363 nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
364 pr_info(" * WR FIFO DATA: 0x%x\n",
365 nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
366 pr_info(" * RD FIFO ONLY: 0x%x\n",
367 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
368 pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
369 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
370 pr_info(" * FIFO IGNORE: 0x%x\n",
371 nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
372 pr_info(" * IRFSM: 0x%x\n",
373 nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
374
375 fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
376 pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
377 pr_info("* Contents =");
378 for (i = 0; i < fifo_len; i++)
379 pr_cont(" %02x",
380 nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
381 pr_cont("\n");
382 }
383
384 static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
385 {
386 int i;
387
388 for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
389 if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
390 nvt->chip_ver = nvt_chips[i].chip_ver;
391 return nvt_chips[i].name;
392 }
393
394 return NULL;
395 }
396
397
398 /* detect hardware features */
399 static int nvt_hw_detect(struct nvt_dev *nvt)
400 {
401 struct device *dev = nvt_get_dev(nvt);
402 const char *chip_name;
403 int chip_id;
404
405 nvt_efm_enable(nvt);
406
407 /* Check if we're wired for the alternate EFER setup */
408 nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
409 if (nvt->chip_major == 0xff) {
410 nvt_efm_disable(nvt);
411 nvt->cr_efir = CR_EFIR2;
412 nvt->cr_efdr = CR_EFDR2;
413 nvt_efm_enable(nvt);
414 nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
415 }
416 nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
417
418 nvt_efm_disable(nvt);
419
420 chip_id = nvt->chip_major << 8 | nvt->chip_minor;
421 if (chip_id == NVT_INVALID) {
422 dev_err(dev, "No device found on either EFM port\n");
423 return -ENODEV;
424 }
425
426 chip_name = nvt_find_chip(nvt, chip_id);
427
428 /* warn, but still let the driver load, if we don't know this chip */
429 if (!chip_name)
430 dev_warn(dev,
431 "unknown chip, id: 0x%02x 0x%02x, it may not work...",
432 nvt->chip_major, nvt->chip_minor);
433 else
434 dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
435 chip_name, nvt->chip_major, nvt->chip_minor);
436
437 return 0;
438 }
439
440 static void nvt_cir_ldev_init(struct nvt_dev *nvt)
441 {
442 u8 val, psreg, psmask, psval;
443
444 if (is_w83667hg(nvt)) {
445 psreg = CR_MULTIFUNC_PIN_SEL;
446 psmask = MULTIFUNC_PIN_SEL_MASK;
447 psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
448 } else {
449 psreg = CR_OUTPUT_PIN_SEL;
450 psmask = OUTPUT_PIN_SEL_MASK;
451 psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
452 }
453
454 /* output pin selection: enable CIR, with WB sensor enabled */
455 val = nvt_cr_read(nvt, psreg);
456 val &= psmask;
457 val |= psval;
458 nvt_cr_write(nvt, val, psreg);
459
460 /* Select CIR logical device */
461 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
462
463 nvt_set_ioaddr(nvt, &nvt->cir_addr);
464
465 nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
466
467 nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
468 nvt->cir_addr, nvt->cir_irq);
469 }
470
471 static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
472 {
473 /* Select ACPI logical device and anable it */
474 nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
475 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
476
477 /* Enable CIR Wake via PSOUT# (Pin60) */
478 nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
479
480 /* enable pme interrupt of cir wakeup event */
481 nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
482
483 /* Select CIR Wake logical device */
484 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
485
486 nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
487
488 nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
489 nvt->cir_wake_addr);
490 }
491
492 /* clear out the hardware's cir rx fifo */
493 static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
494 {
495 u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
496 nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
497 }
498
499 /* clear out the hardware's cir wake rx fifo */
500 static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
501 {
502 u8 val, config;
503
504 config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
505
506 /* clearing wake fifo works in learning mode only */
507 nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
508 CIR_WAKE_IRCON);
509
510 val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
511 nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
512 CIR_WAKE_FIFOCON);
513
514 nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
515 }
516
517 /* clear out the hardware's cir tx fifo */
518 static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
519 {
520 u8 val;
521
522 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
523 nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
524 }
525
526 /* enable RX Trigger Level Reach and Packet End interrupts */
527 static void nvt_set_cir_iren(struct nvt_dev *nvt)
528 {
529 u8 iren;
530
531 iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
532 nvt_cir_reg_write(nvt, iren, CIR_IREN);
533 }
534
535 static void nvt_cir_regs_init(struct nvt_dev *nvt)
536 {
537 /* set sample limit count (PE interrupt raised when reached) */
538 nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
539 nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
540
541 /* set fifo irq trigger levels */
542 nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
543 CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
544
545 /*
546 * Enable TX and RX, specify carrier on = low, off = high, and set
547 * sample period (currently 50us)
548 */
549 nvt_cir_reg_write(nvt,
550 CIR_IRCON_TXEN | CIR_IRCON_RXEN |
551 CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
552 CIR_IRCON);
553
554 /* clear hardware rx and tx fifos */
555 nvt_clear_cir_fifo(nvt);
556 nvt_clear_tx_fifo(nvt);
557
558 /* clear any and all stray interrupts */
559 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
560
561 /* and finally, enable interrupts */
562 nvt_set_cir_iren(nvt);
563
564 /* enable the CIR logical device */
565 nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
566 }
567
568 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
569 {
570 /*
571 * Disable RX, set specific carrier on = low, off = high,
572 * and sample period (currently 50us)
573 */
574 nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
575 CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
576 CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
577 CIR_WAKE_IRCON);
578
579 /* clear any and all stray interrupts */
580 nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
581
582 /* enable the CIR WAKE logical device */
583 nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
584 }
585
586 static void nvt_enable_wake(struct nvt_dev *nvt)
587 {
588 unsigned long flags;
589
590 nvt_efm_enable(nvt);
591
592 nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
593 nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
594 nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
595
596 nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
597 nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
598
599 nvt_efm_disable(nvt);
600
601 spin_lock_irqsave(&nvt->lock, flags);
602
603 nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
604 CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
605 CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
606 CIR_WAKE_IRCON);
607 nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
608 nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
609
610 spin_unlock_irqrestore(&nvt->lock, flags);
611 }
612
613 #if 0 /* Currently unused */
614 /* rx carrier detect only works in learning mode, must be called w/lock */
615 static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
616 {
617 u32 count, carrier, duration = 0;
618 int i;
619
620 count = nvt_cir_reg_read(nvt, CIR_FCCL) |
621 nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
622
623 for (i = 0; i < nvt->pkts; i++) {
624 if (nvt->buf[i] & BUF_PULSE_BIT)
625 duration += nvt->buf[i] & BUF_LEN_MASK;
626 }
627
628 duration *= SAMPLE_PERIOD;
629
630 if (!count || !duration) {
631 dev_notice(nvt_get_dev(nvt),
632 "Unable to determine carrier! (c:%u, d:%u)",
633 count, duration);
634 return 0;
635 }
636
637 carrier = MS_TO_NS(count) / duration;
638
639 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
640 nvt_dbg("WTF? Carrier frequency out of range!");
641
642 nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
643 carrier, count, duration);
644
645 return carrier;
646 }
647 #endif
648 /*
649 * set carrier frequency
650 *
651 * set carrier on 2 registers: CP & CC
652 * always set CP as 0x81
653 * set CC by SPEC, CC = 3MHz/carrier - 1
654 */
655 static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
656 {
657 struct nvt_dev *nvt = dev->priv;
658 u16 val;
659
660 if (carrier == 0)
661 return -EINVAL;
662
663 nvt_cir_reg_write(nvt, 1, CIR_CP);
664 val = 3000000 / (carrier) - 1;
665 nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
666
667 nvt_dbg("cp: 0x%x cc: 0x%x\n",
668 nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
669
670 return 0;
671 }
672
673 static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
674 struct rc_scancode_filter *sc_filter)
675 {
676 u8 buf_val;
677 int i, ret, count;
678 unsigned int val;
679 struct ir_raw_event *raw;
680 u8 wake_buf[WAKEUP_MAX_SIZE];
681 bool complete;
682
683 /* Require mask to be set */
684 if (!sc_filter->mask)
685 return 0;
686
687 raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
688 if (!raw)
689 return -ENOMEM;
690
691 ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
692 raw, WAKEUP_MAX_SIZE);
693 complete = (ret != -ENOBUFS);
694 if (!complete)
695 ret = WAKEUP_MAX_SIZE;
696 else if (ret < 0)
697 goto out_raw;
698
699 /* Inspect the ir samples */
700 for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
701 /* NS to US */
702 val = DIV_ROUND_UP(raw[i].duration, 1000L) / SAMPLE_PERIOD;
703
704 /* Split too large values into several smaller ones */
705 while (val > 0 && count < WAKEUP_MAX_SIZE) {
706 /* Skip last value for better comparison tolerance */
707 if (complete && i == ret - 1 && val < BUF_LEN_MASK)
708 break;
709
710 /* Clamp values to BUF_LEN_MASK at most */
711 buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
712
713 wake_buf[count] = buf_val;
714 val -= buf_val;
715 if ((raw[i]).pulse)
716 wake_buf[count] |= BUF_PULSE_BIT;
717 count++;
718 }
719 }
720
721 nvt_write_wakeup_codes(dev, wake_buf, count);
722 ret = 0;
723 out_raw:
724 kfree(raw);
725
726 return ret;
727 }
728
729 /*
730 * nvt_tx_ir
731 *
732 * 1) clean TX fifo first (handled by AP)
733 * 2) copy data from user space
734 * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
735 * 4) send 9 packets to TX FIFO to open TTR
736 * in interrupt_handler:
737 * 5) send all data out
738 * go back to write():
739 * 6) disable TX interrupts, re-enable RX interupts
740 *
741 * The key problem of this function is user space data may larger than
742 * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
743 * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
744 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
745 * set TXFCONT as 0xff, until buf_count less than 0xff.
746 */
747 static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
748 {
749 struct nvt_dev *nvt = dev->priv;
750 unsigned long flags;
751 unsigned int i;
752 u8 iren;
753 int ret;
754
755 spin_lock_irqsave(&nvt->lock, flags);
756
757 ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
758 nvt->tx.buf_count = (ret * sizeof(unsigned));
759
760 memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
761
762 nvt->tx.cur_buf_num = 0;
763
764 /* save currently enabled interrupts */
765 iren = nvt_cir_reg_read(nvt, CIR_IREN);
766
767 /* now disable all interrupts, save TFU & TTR */
768 nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
769
770 nvt->tx.tx_state = ST_TX_REPLY;
771
772 nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
773 CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
774
775 /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
776 for (i = 0; i < 9; i++)
777 nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
778
779 spin_unlock_irqrestore(&nvt->lock, flags);
780
781 wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
782
783 spin_lock_irqsave(&nvt->lock, flags);
784 nvt->tx.tx_state = ST_TX_NONE;
785 spin_unlock_irqrestore(&nvt->lock, flags);
786
787 /* restore enabled interrupts to prior state */
788 nvt_cir_reg_write(nvt, iren, CIR_IREN);
789
790 return ret;
791 }
792
793 /* dump contents of the last rx buffer we got from the hw rx fifo */
794 static void nvt_dump_rx_buf(struct nvt_dev *nvt)
795 {
796 int i;
797
798 printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
799 for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
800 printk(KERN_CONT "0x%02x ", nvt->buf[i]);
801 printk(KERN_CONT "\n");
802 }
803
804 /*
805 * Process raw data in rx driver buffer, store it in raw IR event kfifo,
806 * trigger decode when appropriate.
807 *
808 * We get IR data samples one byte at a time. If the msb is set, its a pulse,
809 * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
810 * (default 50us) intervals for that pulse/space. A discrete signal is
811 * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
812 * to signal more IR coming (repeats) or end of IR, respectively. We store
813 * sample data in the raw event kfifo until we see 0x7<something> (except f)
814 * or 0x80, at which time, we trigger a decode operation.
815 */
816 static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
817 {
818 DEFINE_IR_RAW_EVENT(rawir);
819 u8 sample;
820 int i;
821
822 nvt_dbg_verbose("%s firing", __func__);
823
824 if (debug)
825 nvt_dump_rx_buf(nvt);
826
827 nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
828
829 for (i = 0; i < nvt->pkts; i++) {
830 sample = nvt->buf[i];
831
832 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
833 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
834 * SAMPLE_PERIOD);
835
836 nvt_dbg("Storing %s with duration %d",
837 rawir.pulse ? "pulse" : "space", rawir.duration);
838
839 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
840 }
841
842 nvt->pkts = 0;
843
844 nvt_dbg("Calling ir_raw_event_handle\n");
845 ir_raw_event_handle(nvt->rdev);
846
847 nvt_dbg_verbose("%s done", __func__);
848 }
849
850 static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
851 {
852 dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
853
854 nvt->pkts = 0;
855 nvt_clear_cir_fifo(nvt);
856 ir_raw_event_reset(nvt->rdev);
857 }
858
859 /* copy data from hardware rx fifo into driver buffer */
860 static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
861 {
862 u8 fifocount;
863 int i;
864
865 /* Get count of how many bytes to read from RX FIFO */
866 fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
867
868 nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
869
870 /* Read fifocount bytes from CIR Sample RX FIFO register */
871 for (i = 0; i < fifocount; i++)
872 nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
873
874 nvt->pkts = fifocount;
875 nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
876
877 nvt_process_rx_ir_data(nvt);
878 }
879
880 static void nvt_cir_log_irqs(u8 status, u8 iren)
881 {
882 nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
883 status, iren,
884 status & CIR_IRSTS_RDR ? " RDR" : "",
885 status & CIR_IRSTS_RTR ? " RTR" : "",
886 status & CIR_IRSTS_PE ? " PE" : "",
887 status & CIR_IRSTS_RFO ? " RFO" : "",
888 status & CIR_IRSTS_TE ? " TE" : "",
889 status & CIR_IRSTS_TTR ? " TTR" : "",
890 status & CIR_IRSTS_TFU ? " TFU" : "",
891 status & CIR_IRSTS_GH ? " GH" : "",
892 status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
893 CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
894 CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
895 }
896
897 static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
898 {
899 return nvt->tx.tx_state == ST_TX_NONE;
900 }
901
902 /* interrupt service routine for incoming and outgoing CIR data */
903 static irqreturn_t nvt_cir_isr(int irq, void *data)
904 {
905 struct nvt_dev *nvt = data;
906 u8 status, iren;
907
908 nvt_dbg_verbose("%s firing", __func__);
909
910 spin_lock(&nvt->lock);
911
912 /*
913 * Get IR Status register contents. Write 1 to ack/clear
914 *
915 * bit: reg name - description
916 * 7: CIR_IRSTS_RDR - RX Data Ready
917 * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
918 * 5: CIR_IRSTS_PE - Packet End
919 * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
920 * 3: CIR_IRSTS_TE - TX FIFO Empty
921 * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
922 * 1: CIR_IRSTS_TFU - TX FIFO Underrun
923 * 0: CIR_IRSTS_GH - Min Length Detected
924 */
925 status = nvt_cir_reg_read(nvt, CIR_IRSTS);
926 iren = nvt_cir_reg_read(nvt, CIR_IREN);
927
928 /* At least NCT6779D creates a spurious interrupt when the
929 * logical device is being disabled.
930 */
931 if (status == 0xff && iren == 0xff) {
932 spin_unlock(&nvt->lock);
933 nvt_dbg_verbose("Spurious interrupt detected");
934 return IRQ_HANDLED;
935 }
936
937 /* IRQ may be shared with CIR WAKE, therefore check for each
938 * status bit whether the related interrupt source is enabled
939 */
940 if (!(status & iren)) {
941 spin_unlock(&nvt->lock);
942 nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
943 return IRQ_NONE;
944 }
945
946 /* ack/clear all irq flags we've got */
947 nvt_cir_reg_write(nvt, status, CIR_IRSTS);
948 nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
949
950 nvt_cir_log_irqs(status, iren);
951
952 if (status & CIR_IRSTS_RFO)
953 nvt_handle_rx_fifo_overrun(nvt);
954
955 else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) {
956 /* We only do rx if not tx'ing */
957 if (nvt_cir_tx_inactive(nvt))
958 nvt_get_rx_ir_data(nvt);
959 }
960
961 if (status & CIR_IRSTS_TE)
962 nvt_clear_tx_fifo(nvt);
963
964 if (status & CIR_IRSTS_TTR) {
965 unsigned int pos, count;
966 u8 tmp;
967
968 pos = nvt->tx.cur_buf_num;
969 count = nvt->tx.buf_count;
970
971 /* Write data into the hardware tx fifo while pos < count */
972 if (pos < count) {
973 nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
974 nvt->tx.cur_buf_num++;
975 /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
976 } else {
977 tmp = nvt_cir_reg_read(nvt, CIR_IREN);
978 nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
979 }
980 }
981
982 if (status & CIR_IRSTS_TFU) {
983 if (nvt->tx.tx_state == ST_TX_REPLY) {
984 nvt->tx.tx_state = ST_TX_REQUEST;
985 wake_up(&nvt->tx.queue);
986 }
987 }
988
989 spin_unlock(&nvt->lock);
990
991 nvt_dbg_verbose("%s done", __func__);
992 return IRQ_HANDLED;
993 }
994
995 static void nvt_disable_cir(struct nvt_dev *nvt)
996 {
997 unsigned long flags;
998
999 spin_lock_irqsave(&nvt->lock, flags);
1000
1001 /* disable CIR interrupts */
1002 nvt_cir_reg_write(nvt, 0, CIR_IREN);
1003
1004 /* clear any and all pending interrupts */
1005 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
1006
1007 /* clear all function enable flags */
1008 nvt_cir_reg_write(nvt, 0, CIR_IRCON);
1009
1010 /* clear hardware rx and tx fifos */
1011 nvt_clear_cir_fifo(nvt);
1012 nvt_clear_tx_fifo(nvt);
1013
1014 spin_unlock_irqrestore(&nvt->lock, flags);
1015
1016 /* disable the CIR logical device */
1017 nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
1018 }
1019
1020 static int nvt_open(struct rc_dev *dev)
1021 {
1022 struct nvt_dev *nvt = dev->priv;
1023 unsigned long flags;
1024
1025 spin_lock_irqsave(&nvt->lock, flags);
1026
1027 /* set function enable flags */
1028 nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
1029 CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
1030 CIR_IRCON);
1031
1032 /* clear all pending interrupts */
1033 nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
1034
1035 /* enable interrupts */
1036 nvt_set_cir_iren(nvt);
1037
1038 spin_unlock_irqrestore(&nvt->lock, flags);
1039
1040 /* enable the CIR logical device */
1041 nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
1042
1043 return 0;
1044 }
1045
1046 static void nvt_close(struct rc_dev *dev)
1047 {
1048 struct nvt_dev *nvt = dev->priv;
1049
1050 nvt_disable_cir(nvt);
1051 }
1052
1053 /* Allocate memory, probe hardware, and initialize everything */
1054 static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1055 {
1056 struct nvt_dev *nvt;
1057 struct rc_dev *rdev;
1058 int ret;
1059
1060 nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
1061 if (!nvt)
1062 return -ENOMEM;
1063
1064 /* input device for IR remote (and tx) */
1065 nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
1066 if (!nvt->rdev)
1067 return -ENOMEM;
1068 rdev = nvt->rdev;
1069
1070 /* activate pnp device */
1071 ret = pnp_activate_dev(pdev);
1072 if (ret) {
1073 dev_err(&pdev->dev, "Could not activate PNP device!\n");
1074 return ret;
1075 }
1076
1077 /* validate pnp resources */
1078 if (!pnp_port_valid(pdev, 0) ||
1079 pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
1080 dev_err(&pdev->dev, "IR PNP Port not valid!\n");
1081 return -EINVAL;
1082 }
1083
1084 if (!pnp_irq_valid(pdev, 0)) {
1085 dev_err(&pdev->dev, "PNP IRQ not valid!\n");
1086 return -EINVAL;
1087 }
1088
1089 if (!pnp_port_valid(pdev, 1) ||
1090 pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
1091 dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
1092 return -EINVAL;
1093 }
1094
1095 nvt->cir_addr = pnp_port_start(pdev, 0);
1096 nvt->cir_irq = pnp_irq(pdev, 0);
1097
1098 nvt->cir_wake_addr = pnp_port_start(pdev, 1);
1099
1100 nvt->cr_efir = CR_EFIR;
1101 nvt->cr_efdr = CR_EFDR;
1102
1103 spin_lock_init(&nvt->lock);
1104
1105 pnp_set_drvdata(pdev, nvt);
1106
1107 init_waitqueue_head(&nvt->tx.queue);
1108
1109 ret = nvt_hw_detect(nvt);
1110 if (ret)
1111 return ret;
1112
1113 /* Initialize CIR & CIR Wake Logical Devices */
1114 nvt_efm_enable(nvt);
1115 nvt_cir_ldev_init(nvt);
1116 nvt_cir_wake_ldev_init(nvt);
1117 nvt_efm_disable(nvt);
1118
1119 /*
1120 * Initialize CIR & CIR Wake Config Registers
1121 * and enable logical devices
1122 */
1123 nvt_cir_regs_init(nvt);
1124 nvt_cir_wake_regs_init(nvt);
1125
1126 /* Set up the rc device */
1127 rdev->priv = nvt;
1128 rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
1129 rdev->allowed_wakeup_protocols = RC_BIT_ALL_IR_ENCODER;
1130 rdev->encode_wakeup = true;
1131 rdev->open = nvt_open;
1132 rdev->close = nvt_close;
1133 rdev->tx_ir = nvt_tx_ir;
1134 rdev->s_tx_carrier = nvt_set_tx_carrier;
1135 rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
1136 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1137 rdev->input_phys = "nuvoton/cir0";
1138 rdev->input_id.bustype = BUS_HOST;
1139 rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
1140 rdev->input_id.product = nvt->chip_major;
1141 rdev->input_id.version = nvt->chip_minor;
1142 rdev->driver_name = NVT_DRIVER_NAME;
1143 rdev->map_name = RC_MAP_RC6_MCE;
1144 rdev->timeout = MS_TO_NS(100);
1145 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
1146 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
1147 #if 0
1148 rdev->min_timeout = XYZ;
1149 rdev->max_timeout = XYZ;
1150 /* tx bits */
1151 rdev->tx_resolution = XYZ;
1152 #endif
1153 ret = devm_rc_register_device(&pdev->dev, rdev);
1154 if (ret)
1155 return ret;
1156
1157 /* now claim resources */
1158 if (!devm_request_region(&pdev->dev, nvt->cir_addr,
1159 CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1160 return -EBUSY;
1161
1162 ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
1163 IRQF_SHARED, NVT_DRIVER_NAME, nvt);
1164 if (ret)
1165 return ret;
1166
1167 if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
1168 CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
1169 return -EBUSY;
1170
1171 ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
1172 if (ret)
1173 return ret;
1174
1175 device_init_wakeup(&pdev->dev, true);
1176
1177 dev_notice(&pdev->dev, "driver has been successfully loaded\n");
1178 if (debug) {
1179 cir_dump_regs(nvt);
1180 cir_wake_dump_regs(nvt);
1181 }
1182
1183 return 0;
1184 }
1185
1186 static void nvt_remove(struct pnp_dev *pdev)
1187 {
1188 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1189
1190 device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
1191
1192 nvt_disable_cir(nvt);
1193
1194 /* enable CIR Wake (for IR power-on) */
1195 nvt_enable_wake(nvt);
1196 }
1197
1198 static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
1199 {
1200 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1201 unsigned long flags;
1202
1203 nvt_dbg("%s called", __func__);
1204
1205 spin_lock_irqsave(&nvt->lock, flags);
1206
1207 nvt->tx.tx_state = ST_TX_NONE;
1208
1209 /* disable all CIR interrupts */
1210 nvt_cir_reg_write(nvt, 0, CIR_IREN);
1211
1212 spin_unlock_irqrestore(&nvt->lock, flags);
1213
1214 /* disable cir logical dev */
1215 nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
1216
1217 /* make sure wake is enabled */
1218 nvt_enable_wake(nvt);
1219
1220 return 0;
1221 }
1222
1223 static int nvt_resume(struct pnp_dev *pdev)
1224 {
1225 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1226
1227 nvt_dbg("%s called", __func__);
1228
1229 nvt_cir_regs_init(nvt);
1230 nvt_cir_wake_regs_init(nvt);
1231
1232 return 0;
1233 }
1234
1235 static void nvt_shutdown(struct pnp_dev *pdev)
1236 {
1237 struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1238
1239 nvt_enable_wake(nvt);
1240 }
1241
1242 static const struct pnp_device_id nvt_ids[] = {
1243 { "WEC0530", 0 }, /* CIR */
1244 { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
1245 { "", 0 },
1246 };
1247
1248 static struct pnp_driver nvt_driver = {
1249 .name = NVT_DRIVER_NAME,
1250 .id_table = nvt_ids,
1251 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
1252 .probe = nvt_probe,
1253 .remove = nvt_remove,
1254 .suspend = nvt_suspend,
1255 .resume = nvt_resume,
1256 .shutdown = nvt_shutdown,
1257 };
1258
1259 module_param(debug, int, S_IRUGO | S_IWUSR);
1260 MODULE_PARM_DESC(debug, "Enable debugging output");
1261
1262 MODULE_DEVICE_TABLE(pnp, nvt_ids);
1263 MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
1264
1265 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
1266 MODULE_LICENSE("GPL");
1267
1268 module_pnp_driver(nvt_driver);