]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/pci/cx23885/cx23885-core.c
media: cx23885: Ryzen DMA related RiSC engine stall fixes
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / pci / cx23885 / cx23885-core.c
CommitLineData
d19770e5
ST
1/*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
6d897616 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
d19770e5
ST
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
d19770e5
ST
16 */
17
e39682b5
MCC
18#include "cx23885.h"
19
d19770e5
ST
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kmod.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <asm/div64.h>
78db8547 30#include <linux/firmware.h>
d19770e5 31
5a23b076 32#include "cimax2.h"
78db8547 33#include "altera-ci.h"
29f8a0a5 34#include "cx23888-ir.h"
f59ad611 35#include "cx23885-ir.h"
e5514f10 36#include "cx23885-av.h"
dbda8f70 37#include "cx23885-input.h"
d19770e5
ST
38
39MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
6d897616 40MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
d19770e5 41MODULE_LICENSE("GPL");
1990d50b 42MODULE_VERSION(CX23885_VERSION);
d19770e5 43
4513fc69 44static unsigned int debug;
9c8ced51
ST
45module_param(debug, int, 0644);
46MODULE_PARM_DESC(debug, "enable debug messages");
d19770e5
ST
47
48static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49module_param_array(card, int, NULL, 0444);
9c8ced51 50MODULE_PARM_DESC(card, "card type");
d19770e5 51
4513fc69
ST
52#define dprintk(level, fmt, arg...)\
53 do { if (debug >= level)\
e39682b5
MCC
54 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
55 __func__, ##arg); \
4513fc69 56 } while (0)
d19770e5
ST
57
58static unsigned int cx23885_devcount;
59
d19770e5
ST
60#define NO_SYNC_LINE (-1U)
61
d19770e5
ST
62/* FIXME, these allocations will change when
63 * analog arrives. The be reviewed.
64 * CX23887 Assumptions
65 * 1 line = 16 bytes of CDT
66 * cmds size = 80
67 * cdt size = 16 * linesize
68 * iqsize = 64
69 * maxlines = 6
70 *
71 * Address Space:
72 * 0x00000000 0x00008fff FIFO clusters
73 * 0x00010000 0x000104af Channel Management Data Structures
74 * 0x000104b0 0x000104ff Free
75 * 0x00010500 0x000108bf 15 channels * iqsize
76 * 0x000108c0 0x000108ff Free
77 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
78 * 15 channels * (iqsize + (maxlines * linesize))
79 * 0x00010ea0 0x00010xxx Free
80 */
81
7e994302 82static struct sram_channel cx23885_sram_channels[] = {
d19770e5 83 [SRAM_CH01] = {
69ad6e56
ST
84 .name = "VID A",
85 .cmds_start = 0x10000,
d8d12b43
ST
86 .ctrl_start = 0x10380,
87 .cdt = 0x104c0,
69ad6e56
ST
88 .fifo_start = 0x40,
89 .fifo_size = 0x2800,
d19770e5
ST
90 .ptr1_reg = DMA1_PTR1,
91 .ptr2_reg = DMA1_PTR2,
92 .cnt1_reg = DMA1_CNT1,
93 .cnt2_reg = DMA1_CNT2,
94 },
95 [SRAM_CH02] = {
96 .name = "ch2",
97 .cmds_start = 0x0,
98 .ctrl_start = 0x0,
99 .cdt = 0x0,
100 .fifo_start = 0x0,
101 .fifo_size = 0x0,
102 .ptr1_reg = DMA2_PTR1,
103 .ptr2_reg = DMA2_PTR2,
104 .cnt1_reg = DMA2_CNT1,
105 .cnt2_reg = DMA2_CNT2,
106 },
107 [SRAM_CH03] = {
69ad6e56
ST
108 .name = "TS1 B",
109 .cmds_start = 0x100A0,
d8d12b43
ST
110 .ctrl_start = 0x10400,
111 .cdt = 0x10580,
69ad6e56
ST
112 .fifo_start = 0x5000,
113 .fifo_size = 0x1000,
d19770e5
ST
114 .ptr1_reg = DMA3_PTR1,
115 .ptr2_reg = DMA3_PTR2,
116 .cnt1_reg = DMA3_CNT1,
117 .cnt2_reg = DMA3_CNT2,
118 },
119 [SRAM_CH04] = {
120 .name = "ch4",
121 .cmds_start = 0x0,
122 .ctrl_start = 0x0,
123 .cdt = 0x0,
124 .fifo_start = 0x0,
125 .fifo_size = 0x0,
126 .ptr1_reg = DMA4_PTR1,
127 .ptr2_reg = DMA4_PTR2,
128 .cnt1_reg = DMA4_CNT1,
129 .cnt2_reg = DMA4_CNT2,
130 },
131 [SRAM_CH05] = {
132 .name = "ch5",
133 .cmds_start = 0x0,
134 .ctrl_start = 0x0,
135 .cdt = 0x0,
136 .fifo_start = 0x0,
137 .fifo_size = 0x0,
138 .ptr1_reg = DMA5_PTR1,
139 .ptr2_reg = DMA5_PTR2,
140 .cnt1_reg = DMA5_CNT1,
141 .cnt2_reg = DMA5_CNT2,
142 },
143 [SRAM_CH06] = {
144 .name = "TS2 C",
145 .cmds_start = 0x10140,
d8d12b43
ST
146 .ctrl_start = 0x10440,
147 .cdt = 0x105e0,
d19770e5
ST
148 .fifo_start = 0x6000,
149 .fifo_size = 0x1000,
150 .ptr1_reg = DMA5_PTR1,
151 .ptr2_reg = DMA5_PTR2,
152 .cnt1_reg = DMA5_CNT1,
153 .cnt2_reg = DMA5_CNT2,
154 },
155 [SRAM_CH07] = {
9e44d632
MM
156 .name = "TV Audio",
157 .cmds_start = 0x10190,
158 .ctrl_start = 0x10480,
159 .cdt = 0x10a00,
160 .fifo_start = 0x7000,
161 .fifo_size = 0x1000,
d19770e5
ST
162 .ptr1_reg = DMA6_PTR1,
163 .ptr2_reg = DMA6_PTR2,
164 .cnt1_reg = DMA6_CNT1,
165 .cnt2_reg = DMA6_CNT2,
166 },
167 [SRAM_CH08] = {
168 .name = "ch8",
169 .cmds_start = 0x0,
170 .ctrl_start = 0x0,
171 .cdt = 0x0,
172 .fifo_start = 0x0,
173 .fifo_size = 0x0,
174 .ptr1_reg = DMA7_PTR1,
175 .ptr2_reg = DMA7_PTR2,
176 .cnt1_reg = DMA7_CNT1,
177 .cnt2_reg = DMA7_CNT2,
178 },
179 [SRAM_CH09] = {
180 .name = "ch9",
181 .cmds_start = 0x0,
182 .ctrl_start = 0x0,
183 .cdt = 0x0,
184 .fifo_start = 0x0,
185 .fifo_size = 0x0,
186 .ptr1_reg = DMA8_PTR1,
187 .ptr2_reg = DMA8_PTR2,
188 .cnt1_reg = DMA8_CNT1,
189 .cnt2_reg = DMA8_CNT2,
190 },
191};
192
7e994302
ST
193static struct sram_channel cx23887_sram_channels[] = {
194 [SRAM_CH01] = {
195 .name = "VID A",
196 .cmds_start = 0x10000,
197 .ctrl_start = 0x105b0,
198 .cdt = 0x107b0,
199 .fifo_start = 0x40,
200 .fifo_size = 0x2800,
201 .ptr1_reg = DMA1_PTR1,
202 .ptr2_reg = DMA1_PTR2,
203 .cnt1_reg = DMA1_CNT1,
204 .cnt2_reg = DMA1_CNT2,
205 },
206 [SRAM_CH02] = {
35045137
ST
207 .name = "VID A (VBI)",
208 .cmds_start = 0x10050,
209 .ctrl_start = 0x105F0,
210 .cdt = 0x10810,
211 .fifo_start = 0x3000,
212 .fifo_size = 0x1000,
7e994302
ST
213 .ptr1_reg = DMA2_PTR1,
214 .ptr2_reg = DMA2_PTR2,
215 .cnt1_reg = DMA2_CNT1,
216 .cnt2_reg = DMA2_CNT2,
217 },
218 [SRAM_CH03] = {
219 .name = "TS1 B",
220 .cmds_start = 0x100A0,
221 .ctrl_start = 0x10630,
222 .cdt = 0x10870,
223 .fifo_start = 0x5000,
224 .fifo_size = 0x1000,
225 .ptr1_reg = DMA3_PTR1,
226 .ptr2_reg = DMA3_PTR2,
227 .cnt1_reg = DMA3_CNT1,
228 .cnt2_reg = DMA3_CNT2,
229 },
230 [SRAM_CH04] = {
231 .name = "ch4",
232 .cmds_start = 0x0,
233 .ctrl_start = 0x0,
234 .cdt = 0x0,
235 .fifo_start = 0x0,
236 .fifo_size = 0x0,
237 .ptr1_reg = DMA4_PTR1,
238 .ptr2_reg = DMA4_PTR2,
239 .cnt1_reg = DMA4_CNT1,
240 .cnt2_reg = DMA4_CNT2,
241 },
242 [SRAM_CH05] = {
243 .name = "ch5",
244 .cmds_start = 0x0,
245 .ctrl_start = 0x0,
246 .cdt = 0x0,
247 .fifo_start = 0x0,
248 .fifo_size = 0x0,
249 .ptr1_reg = DMA5_PTR1,
250 .ptr2_reg = DMA5_PTR2,
251 .cnt1_reg = DMA5_CNT1,
252 .cnt2_reg = DMA5_CNT2,
253 },
254 [SRAM_CH06] = {
255 .name = "TS2 C",
256 .cmds_start = 0x10140,
257 .ctrl_start = 0x10670,
258 .cdt = 0x108d0,
259 .fifo_start = 0x6000,
260 .fifo_size = 0x1000,
261 .ptr1_reg = DMA5_PTR1,
262 .ptr2_reg = DMA5_PTR2,
263 .cnt1_reg = DMA5_CNT1,
264 .cnt2_reg = DMA5_CNT2,
265 },
266 [SRAM_CH07] = {
35045137
ST
267 .name = "TV Audio",
268 .cmds_start = 0x10190,
269 .ctrl_start = 0x106B0,
270 .cdt = 0x10930,
271 .fifo_start = 0x7000,
272 .fifo_size = 0x1000,
7e994302
ST
273 .ptr1_reg = DMA6_PTR1,
274 .ptr2_reg = DMA6_PTR2,
275 .cnt1_reg = DMA6_CNT1,
276 .cnt2_reg = DMA6_CNT2,
277 },
278 [SRAM_CH08] = {
279 .name = "ch8",
280 .cmds_start = 0x0,
281 .ctrl_start = 0x0,
282 .cdt = 0x0,
283 .fifo_start = 0x0,
284 .fifo_size = 0x0,
285 .ptr1_reg = DMA7_PTR1,
286 .ptr2_reg = DMA7_PTR2,
287 .cnt1_reg = DMA7_CNT1,
288 .cnt2_reg = DMA7_CNT2,
289 },
290 [SRAM_CH09] = {
291 .name = "ch9",
292 .cmds_start = 0x0,
293 .ctrl_start = 0x0,
294 .cdt = 0x0,
295 .fifo_start = 0x0,
296 .fifo_size = 0x0,
297 .ptr1_reg = DMA8_PTR1,
298 .ptr2_reg = DMA8_PTR2,
299 .cnt1_reg = DMA8_CNT1,
300 .cnt2_reg = DMA8_CNT2,
301 },
302};
303
ada73eee 304static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
dbe83a3b
AW
305{
306 unsigned long flags;
307 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
308
309 dev->pci_irqmask |= mask;
310
311 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
312}
313
314void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
315{
316 unsigned long flags;
317 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
318
319 dev->pci_irqmask |= mask;
320 cx_set(PCI_INT_MSK, mask);
321
322 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
323}
324
325void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
326{
327 u32 v;
328 unsigned long flags;
329 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
330
331 v = mask & dev->pci_irqmask;
332 if (v)
333 cx_set(PCI_INT_MSK, v);
334
335 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
336}
337
338static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
339{
340 cx23885_irq_enable(dev, 0xffffffff);
341}
342
343void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
344{
345 unsigned long flags;
346 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
347
348 cx_clear(PCI_INT_MSK, mask);
349
350 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
351}
352
353static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
354{
355 cx23885_irq_disable(dev, 0xffffffff);
356}
357
358void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
359{
360 unsigned long flags;
361 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
362
363 dev->pci_irqmask &= ~mask;
364 cx_clear(PCI_INT_MSK, mask);
365
366 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
367}
368
369static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
370{
371 u32 v;
372 unsigned long flags;
373 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
374
375 v = cx_read(PCI_INT_MSK);
376
377 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
378 return v;
379}
380
d19770e5
ST
381static int cx23885_risc_decode(u32 risc)
382{
383 static char *instr[16] = {
b1b81f1d
ST
384 [RISC_SYNC >> 28] = "sync",
385 [RISC_WRITE >> 28] = "write",
386 [RISC_WRITEC >> 28] = "writec",
387 [RISC_READ >> 28] = "read",
388 [RISC_READC >> 28] = "readc",
389 [RISC_JUMP >> 28] = "jump",
390 [RISC_SKIP >> 28] = "skip",
391 [RISC_WRITERM >> 28] = "writerm",
392 [RISC_WRITECM >> 28] = "writecm",
393 [RISC_WRITECR >> 28] = "writecr",
d19770e5
ST
394 };
395 static int incr[16] = {
b1b81f1d
ST
396 [RISC_WRITE >> 28] = 3,
397 [RISC_JUMP >> 28] = 3,
398 [RISC_SKIP >> 28] = 1,
399 [RISC_SYNC >> 28] = 1,
400 [RISC_WRITERM >> 28] = 3,
401 [RISC_WRITECM >> 28] = 3,
402 [RISC_WRITECR >> 28] = 4,
d19770e5
ST
403 };
404 static char *bits[] = {
405 "12", "13", "14", "resync",
406 "cnt0", "cnt1", "18", "19",
407 "20", "21", "22", "23",
408 "irq1", "irq2", "eol", "sol",
409 };
410 int i;
411
09f8be26 412 printk(KERN_DEBUG "0x%08x [ %s", risc,
d19770e5 413 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
44a6481d 414 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
d19770e5 415 if (risc & (1 << (i + 12)))
e39682b5
MCC
416 pr_cont(" %s", bits[i]);
417 pr_cont(" count=%d ]\n", risc & 0xfff);
d19770e5
ST
418 return incr[risc >> 28] ? incr[risc >> 28] : 1;
419}
420
453afdd9 421static void cx23885_wakeup(struct cx23885_tsport *port,
39e75cfe 422 struct cx23885_dmaqueue *q, u32 count)
d19770e5 423{
d19770e5 424 struct cx23885_buffer *buf;
9a7dc2b0
BL
425 int count_delta;
426 int max_buf_done = 5; /* service maximum five buffers */
427
428 do {
429 if (list_empty(&q->active))
430 return;
431 buf = list_entry(q->active.next,
432 struct cx23885_buffer, queue);
433
434 buf->vb.vb2_buf.timestamp = ktime_get_ns();
435 buf->vb.sequence = q->count++;
436 if (count != (q->count % 65536)) {
437 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
438 buf->vb.vb2_buf.index, count, q->count);
439 } else {
440 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
441 buf->vb.vb2_buf.index, count, q->count);
442 }
443 list_del(&buf->queue);
444 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
445 max_buf_done--;
446 /* count register is 16 bits so apply modulo appropriately */
447 count_delta = ((int)count - (int)(q->count % 65536));
448 } while ((count_delta > 0) && (max_buf_done > 0));
d19770e5 449}
d19770e5 450
7b888014 451int cx23885_sram_channel_setup(struct cx23885_dev *dev,
39e75cfe
AB
452 struct sram_channel *ch,
453 unsigned int bpl, u32 risc)
d19770e5 454{
44a6481d 455 unsigned int i, lines;
d19770e5
ST
456 u32 cdt;
457
9c8ced51 458 if (ch->cmds_start == 0) {
22b4e64f 459 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
44a6481d 460 ch->name);
d19770e5
ST
461 cx_write(ch->ptr1_reg, 0);
462 cx_write(ch->ptr2_reg, 0);
463 cx_write(ch->cnt2_reg, 0);
464 cx_write(ch->cnt1_reg, 0);
465 return 0;
466 } else {
22b4e64f 467 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
44a6481d 468 ch->name);
d19770e5
ST
469 }
470
471 bpl = (bpl + 7) & ~7; /* alignment */
472 cdt = ch->cdt;
473 lines = ch->fifo_size / bpl;
474 if (lines > 6)
475 lines = 6;
476 BUG_ON(lines < 2);
477
453afdd9
HV
478 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
479 cx_write(8 + 4, 12);
86ecc027 480 cx_write(8 + 8, 0);
d19770e5
ST
481
482 /* write CDT */
483 for (i = 0; i < lines; i++) {
22b4e64f 484 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
44a6481d 485 ch->fifo_start + bpl*i);
d19770e5
ST
486 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
487 cx_write(cdt + 16*i + 4, 0);
488 cx_write(cdt + 16*i + 8, 0);
489 cx_write(cdt + 16*i + 12, 0);
490 }
491
492 /* write CMDS */
493 if (ch->jumponly)
9c8ced51 494 cx_write(ch->cmds_start + 0, 8);
d19770e5 495 else
9c8ced51 496 cx_write(ch->cmds_start + 0, risc);
d19770e5
ST
497 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
498 cx_write(ch->cmds_start + 8, cdt);
499 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
500 cx_write(ch->cmds_start + 16, ch->ctrl_start);
501 if (ch->jumponly)
9c8ced51 502 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
d19770e5
ST
503 else
504 cx_write(ch->cmds_start + 20, 64 >> 2);
505 for (i = 24; i < 80; i += 4)
506 cx_write(ch->cmds_start + i, 0);
507
508 /* fill registers */
509 cx_write(ch->ptr1_reg, ch->fifo_start);
510 cx_write(ch->ptr2_reg, cdt);
511 cx_write(ch->cnt2_reg, (lines*16) >> 3);
9c8ced51 512 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
d19770e5 513
9c8ced51 514 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
e133be0f 515 dev->bridge,
d19770e5
ST
516 ch->name,
517 bpl,
518 lines);
519
520 return 0;
521}
522
7b888014 523void cx23885_sram_channel_dump(struct cx23885_dev *dev,
39e75cfe 524 struct sram_channel *ch)
d19770e5
ST
525{
526 static char *name[] = {
527 "init risc lo",
528 "init risc hi",
529 "cdt base",
530 "cdt size",
531 "iq base",
532 "iq size",
533 "risc pc lo",
534 "risc pc hi",
535 "iq wr ptr",
536 "iq rd ptr",
537 "cdt current",
538 "pci target lo",
539 "pci target hi",
540 "line / byte",
541 };
542 u32 risc;
44a6481d 543 unsigned int i, j, n;
d19770e5 544
e39682b5
MCC
545 pr_warn("%s: %s - dma channel status dump\n",
546 dev->name, ch->name);
d19770e5 547 for (i = 0; i < ARRAY_SIZE(name); i++)
e39682b5
MCC
548 pr_warn("%s: cmds: %-15s: 0x%08x\n",
549 dev->name, name[i],
550 cx_read(ch->cmds_start + 4*i));
d19770e5
ST
551
552 for (i = 0; i < 4; i++) {
44a6481d 553 risc = cx_read(ch->cmds_start + 4 * (i + 14));
e39682b5 554 pr_warn("%s: risc%d: ", dev->name, i);
d19770e5
ST
555 cx23885_risc_decode(risc);
556 }
557 for (i = 0; i < (64 >> 2); i += n) {
44a6481d
MK
558 risc = cx_read(ch->ctrl_start + 4 * i);
559 /* No consideration for bits 63-32 */
560
e39682b5
MCC
561 pr_warn("%s: (0x%08x) iq %x: ", dev->name,
562 ch->ctrl_start + 4 * i, i);
d19770e5
ST
563 n = cx23885_risc_decode(risc);
564 for (j = 1; j < n; j++) {
44a6481d 565 risc = cx_read(ch->ctrl_start + 4 * (i + j));
e39682b5
MCC
566 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
567 dev->name, i+j, risc, j);
d19770e5
ST
568 }
569 }
570
e39682b5
MCC
571 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
572 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
573 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
574 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
575 pr_warn("%s: ptr1_reg: 0x%08x\n",
576 dev->name, cx_read(ch->ptr1_reg));
577 pr_warn("%s: ptr2_reg: 0x%08x\n",
578 dev->name, cx_read(ch->ptr2_reg));
579 pr_warn("%s: cnt1_reg: 0x%08x\n",
580 dev->name, cx_read(ch->cnt1_reg));
581 pr_warn("%s: cnt2_reg: 0x%08x\n",
582 dev->name, cx_read(ch->cnt2_reg));
d19770e5
ST
583}
584
39e75cfe 585static void cx23885_risc_disasm(struct cx23885_tsport *port,
4d63a25c 586 struct cx23885_riscmem *risc)
d19770e5
ST
587{
588 struct cx23885_dev *dev = port->dev;
44a6481d 589 unsigned int i, j, n;
d19770e5 590
e39682b5 591 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
d19770e5
ST
592 dev->name, risc->cpu, (unsigned long)risc->dma);
593 for (i = 0; i < (risc->size >> 2); i += n) {
e39682b5 594 pr_info("%s: %04d: ", dev->name, i);
86ecc027 595 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
d19770e5 596 for (j = 1; j < n; j++)
e39682b5
MCC
597 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
598 dev->name, i + j, risc->cpu[i + j], j);
86ecc027 599 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
d19770e5
ST
600 break;
601 }
602}
603
95f408bb
BL
604static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
605{
606 uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
607 uint32_t reg2_val = cx_read(TC_REQ_SET);
608
609 if (reg1_val && reg2_val) {
610 cx_write(TC_REQ, reg1_val);
611 cx_write(TC_REQ_SET, reg2_val);
612 cx_read(VID_B_DMA);
613 cx_read(VBI_B_DMA);
614 cx_read(VID_C_DMA);
615 cx_read(VBI_C_DMA);
616
617 dev_info(&dev->pci->dev,
618 "dma in progress detected 0x%08x 0x%08x, clearing\n",
619 reg1_val, reg2_val);
620 }
621}
622
39e75cfe 623static void cx23885_shutdown(struct cx23885_dev *dev)
d19770e5
ST
624{
625 /* disable RISC controller */
626 cx_write(DEV_CNTRL2, 0);
627
628 /* Disable all IR activity */
629 cx_write(IR_CNTRL_REG, 0);
630
631 /* Disable Video A/B activity */
632 cx_write(VID_A_DMA_CTL, 0);
633 cx_write(VID_B_DMA_CTL, 0);
634 cx_write(VID_C_DMA_CTL, 0);
635
636 /* Disable Audio activity */
637 cx_write(AUD_INT_DMA_CTL, 0);
638 cx_write(AUD_EXT_DMA_CTL, 0);
639
640 /* Disable Serial port */
641 cx_write(UART_CTL, 0);
642
643 /* Disable Interrupts */
dbe83a3b 644 cx23885_irq_disable_all(dev);
d19770e5
ST
645 cx_write(VID_A_INT_MSK, 0);
646 cx_write(VID_B_INT_MSK, 0);
647 cx_write(VID_C_INT_MSK, 0);
648 cx_write(AUDIO_INT_INT_MSK, 0);
649 cx_write(AUDIO_EXT_INT_MSK, 0);
650
651}
652
39e75cfe 653static void cx23885_reset(struct cx23885_dev *dev)
d19770e5 654{
22b4e64f 655 dprintk(1, "%s()\n", __func__);
d19770e5
ST
656
657 cx23885_shutdown(dev);
658
659 cx_write(PCI_INT_STAT, 0xffffffff);
660 cx_write(VID_A_INT_STAT, 0xffffffff);
661 cx_write(VID_B_INT_STAT, 0xffffffff);
662 cx_write(VID_C_INT_STAT, 0xffffffff);
663 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
664 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
665 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
ecda5966 666 cx_write(PAD_CTRL, 0x00500300);
d19770e5 667
95f408bb
BL
668 /* clear dma in progress */
669 cx23885_clear_bridge_error(dev);
d19770e5
ST
670 mdelay(100);
671
7b888014
ST
672 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
673 720*4, 0);
674 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
675 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
676 188*4, 0);
677 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
678 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
679 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
680 188*4, 0);
681 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
d19770e5 684
a6a3f140 685 cx23885_gpio_setup(dev);
95f408bb
BL
686
687 cx23885_irq_get_mask(dev);
688
689 /* clear dma in progress */
690 cx23885_clear_bridge_error(dev);
d19770e5
ST
691}
692
693
694static int cx23885_pci_quirks(struct cx23885_dev *dev)
695{
22b4e64f 696 dprintk(1, "%s()\n", __func__);
d19770e5 697
2df9a4c2
ST
698 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
699 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
700 * occur on the cx23887 bridge.
701 */
9c8ced51 702 if (dev->bridge == CX23885_BRIDGE_885)
d19770e5 703 cx_clear(RDR_TLCTL0, 1 << 4);
4823e9ee 704
95f408bb
BL
705 /* clear dma in progress */
706 cx23885_clear_bridge_error(dev);
d19770e5
ST
707 return 0;
708}
709
710static int get_resources(struct cx23885_dev *dev)
711{
9c8ced51
ST
712 if (request_mem_region(pci_resource_start(dev->pci, 0),
713 pci_resource_len(dev->pci, 0),
44a6481d 714 dev->name))
d19770e5
ST
715 return 0;
716
e39682b5
MCC
717 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
718 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
d19770e5
ST
719
720 return -EBUSY;
721}
722
9c8ced51
ST
723static int cx23885_init_tsport(struct cx23885_dev *dev,
724 struct cx23885_tsport *port, int portno)
d19770e5 725{
22b4e64f 726 dprintk(1, "%s(portno=%d)\n", __func__, portno);
a6a3f140
ST
727
728 /* Transport bus init dma queue - Common settings */
729 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
730 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
b1b81f1d
ST
731 port->vld_misc_val = 0x0;
732 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
a6a3f140
ST
733
734 spin_lock_init(&port->slock);
735 port->dev = dev;
736 port->nr = portno;
737
738 INIT_LIST_HEAD(&port->mpegq.active);
d782ffa2 739 mutex_init(&port->frontends.lock);
7bdf84fc 740 INIT_LIST_HEAD(&port->frontends.felist);
d782ffa2
ST
741 port->frontends.active_fe_id = 0;
742
a739a7e4
ST
743 /* This should be hardcoded allow a single frontend
744 * attachment to this tsport, keeping the -dvb.c
745 * code clean and safe.
746 */
9c8ced51 747 if (!port->num_frontends)
a739a7e4
ST
748 port->num_frontends = 1;
749
9c8ced51 750 switch (portno) {
a6a3f140
ST
751 case 1:
752 port->reg_gpcnt = VID_B_GPCNT;
753 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
754 port->reg_dma_ctl = VID_B_DMA_CTL;
755 port->reg_lngth = VID_B_LNGTH;
756 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
757 port->reg_gen_ctrl = VID_B_GEN_CTL;
758 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
759 port->reg_sop_status = VID_B_SOP_STATUS;
760 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
761 port->reg_vld_misc = VID_B_VLD_MISC;
762 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
763 port->reg_src_sel = VID_B_SRC_SEL;
764 port->reg_ts_int_msk = VID_B_INT_MSK;
b1b81f1d 765 port->reg_ts_int_stat = VID_B_INT_STAT;
a6a3f140
ST
766 port->sram_chno = SRAM_CH03; /* VID_B */
767 port->pci_irqmask = 0x02; /* VID_B bit1 */
768 break;
769 case 2:
770 port->reg_gpcnt = VID_C_GPCNT;
771 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
772 port->reg_dma_ctl = VID_C_DMA_CTL;
773 port->reg_lngth = VID_C_LNGTH;
774 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
775 port->reg_gen_ctrl = VID_C_GEN_CTL;
776 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
777 port->reg_sop_status = VID_C_SOP_STATUS;
778 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
779 port->reg_vld_misc = VID_C_VLD_MISC;
780 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
781 port->reg_src_sel = 0;
782 port->reg_ts_int_msk = VID_C_INT_MSK;
783 port->reg_ts_int_stat = VID_C_INT_STAT;
784 port->sram_chno = SRAM_CH06; /* VID_C */
785 port->pci_irqmask = 0x04; /* VID_C bit2 */
d19770e5 786 break;
a6a3f140
ST
787 default:
788 BUG();
d19770e5
ST
789 }
790
791 return 0;
792}
793
0ac5881a
ST
794static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
795{
796 switch (cx_read(RDR_CFG2) & 0xff) {
797 case 0x00:
798 /* cx23885 */
799 dev->hwrevision = 0xa0;
800 break;
801 case 0x01:
802 /* CX23885-12Z */
803 dev->hwrevision = 0xa1;
804 break;
805 case 0x02:
25ea66e2 806 /* CX23885-13Z/14Z */
0ac5881a
ST
807 dev->hwrevision = 0xb0;
808 break;
809 case 0x03:
25ea66e2
ST
810 if (dev->pci->device == 0x8880) {
811 /* CX23888-21Z/22Z */
812 dev->hwrevision = 0xc0;
813 } else {
814 /* CX23885-14Z */
815 dev->hwrevision = 0xa4;
816 }
817 break;
818 case 0x04:
819 if (dev->pci->device == 0x8880) {
820 /* CX23888-31Z */
821 dev->hwrevision = 0xd0;
822 } else {
823 /* CX23885-15Z, CX23888-31Z */
824 dev->hwrevision = 0xa5;
825 }
0ac5881a
ST
826 break;
827 case 0x0e:
828 /* CX23887-15Z */
829 dev->hwrevision = 0xc0;
abe1def4 830 break;
0ac5881a
ST
831 case 0x0f:
832 /* CX23887-14Z */
833 dev->hwrevision = 0xb1;
834 break;
835 default:
e39682b5
MCC
836 pr_err("%s() New hardware revision found 0x%x\n",
837 __func__, dev->hwrevision);
0ac5881a
ST
838 }
839 if (dev->hwrevision)
e39682b5 840 pr_info("%s() Hardware revision = 0x%02x\n",
22b4e64f 841 __func__, dev->hwrevision);
0ac5881a 842 else
e39682b5
MCC
843 pr_err("%s() Hardware revision unknown 0x%x\n",
844 __func__, dev->hwrevision);
0ac5881a
ST
845}
846
29f8a0a5
AW
847/* Find the first v4l2_subdev member of the group id in hw */
848struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
849{
850 struct v4l2_subdev *result = NULL;
851 struct v4l2_subdev *sd;
852
853 spin_lock(&dev->v4l2_dev.lock);
854 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
855 if (sd->grp_id == hw) {
856 result = sd;
857 break;
858 }
859 }
860 spin_unlock(&dev->v4l2_dev.lock);
861 return result;
862}
863
d19770e5
ST
864static int cx23885_dev_setup(struct cx23885_dev *dev)
865{
866 int i;
867
dbe83a3b 868 spin_lock_init(&dev->pci_irqmask_lock);
af7f388e 869 spin_lock_init(&dev->slock);
dbe83a3b 870
d19770e5 871 mutex_init(&dev->lock);
8386c27f 872 mutex_init(&dev->gpio_lock);
d19770e5
ST
873
874 atomic_inc(&dev->refcount);
875
876 dev->nr = cx23885_devcount++;
579f1163
ST
877 sprintf(dev->name, "cx23885[%d]", dev->nr);
878
579f1163 879 /* Configure the internal memory */
9c8ced51 880 if (dev->pci->device == 0x8880) {
5da1a682
BL
881 /* Could be 887 or 888, assume an 888 default */
882 dev->bridge = CX23885_BRIDGE_888;
c7712613 883 /* Apply a sensible clock frequency for the PCIe bridge */
5da1a682 884 dev->clk_freq = 50000000;
7e994302 885 dev->sram_channels = cx23887_sram_channels;
579f1163 886 } else
9c8ced51 887 if (dev->pci->device == 0x8852) {
579f1163 888 dev->bridge = CX23885_BRIDGE_885;
c7712613
ST
889 /* Apply a sensible clock frequency for the PCIe bridge */
890 dev->clk_freq = 28000000;
7e994302 891 dev->sram_channels = cx23885_sram_channels;
579f1163
ST
892 } else
893 BUG();
894
895 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
22b4e64f 896 __func__, dev->bridge);
579f1163
ST
897
898 /* board config */
899 dev->board = UNSET;
900 if (card[dev->nr] < cx23885_bcount)
901 dev->board = card[dev->nr];
902 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
903 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
904 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
905 dev->board = cx23885_subids[i].card;
906 if (UNSET == dev->board) {
907 dev->board = CX23885_BOARD_UNKNOWN;
908 cx23885_card_list(dev);
909 }
910
c00ba2c1
BL
911 if (dev->pci->device == 0x8852) {
912 /* no DIF on cx23885, so no analog tuner support possible */
913 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
914 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
915 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
916 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
917 }
918
c7712613
ST
919 /* If the user specific a clk freq override, apply it */
920 if (cx23885_boards[dev->board].clk_freq > 0)
921 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
922
779c79d4
BL
923 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
924 dev->pci->subsystem_device == 0x7137) {
925 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
926 * with an 888, and a 25Mhz crystal, instead of the
927 * usual third overtone 50Mhz. The default clock rate must
928 * be overridden so the cx25840 is properly configured
929 */
930 dev->clk_freq = 25000000;
931 }
932
d19770e5
ST
933 dev->pci_bus = dev->pci->bus->number;
934 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dbe83a3b 935 cx23885_irq_add(dev, 0x001f00);
d19770e5
ST
936
937 /* External Master 1 Bus */
938 dev->i2c_bus[0].nr = 0;
939 dev->i2c_bus[0].dev = dev;
940 dev->i2c_bus[0].reg_stat = I2C1_STAT;
941 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
942 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
943 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
944 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
945 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
946
947 /* External Master 2 Bus */
948 dev->i2c_bus[1].nr = 1;
949 dev->i2c_bus[1].dev = dev;
950 dev->i2c_bus[1].reg_stat = I2C2_STAT;
951 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
952 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
953 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
954 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
955 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
956
957 /* Internal Master 3 Bus */
958 dev->i2c_bus[2].nr = 2;
959 dev->i2c_bus[2].dev = dev;
960 dev->i2c_bus[2].reg_stat = I2C3_STAT;
961 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
a2129af5 962 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
d19770e5
ST
963 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
964 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
965 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
966
b1b81f1d
ST
967 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
968 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
a6a3f140 969 cx23885_init_tsport(dev, &dev->ts1, 1);
579f1163 970
b1b81f1d
ST
971 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
972 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
a6a3f140 973 cx23885_init_tsport(dev, &dev->ts2, 2);
d19770e5 974
d19770e5 975 if (get_resources(dev) < 0) {
e39682b5 976 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
44a6481d
MK
977 dev->name, dev->pci->subsystem_vendor,
978 dev->pci->subsystem_device);
d19770e5
ST
979
980 cx23885_devcount--;
fcf94c89 981 return -ENODEV;
d19770e5
ST
982 }
983
d19770e5 984 /* PCIe stuff */
9c8ced51
ST
985 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
986 pci_resource_len(dev->pci, 0));
d19770e5
ST
987
988 dev->bmmio = (u8 __iomem *)dev->lmmio;
989
e39682b5
MCC
990 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
991 dev->name, dev->pci->subsystem_vendor,
992 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
993 dev->board, card[dev->nr] == dev->board ?
994 "insmod option" : "autodetected");
d19770e5 995
4823e9ee
ST
996 cx23885_pci_quirks(dev);
997
7b888014
ST
998 /* Assume some sensible defaults */
999 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1000 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
557f48d5 1001 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
7b888014
ST
1002 dev->radio_type = cx23885_boards[dev->board].radio_type;
1003 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1004
557f48d5
IL
1005 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1006 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
7b888014 1007 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
22b4e64f 1008 __func__, dev->radio_type, dev->radio_addr);
7b888014 1009
f659c513
ST
1010 /* The cx23417 encoder has GPIO's that need to be initialised
1011 * before DVB, so that demodulators and tuners are out of
1012 * reset before DVB uses them.
1013 */
1014 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1015 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1016 cx23885_mc417_init(dev);
1017
d19770e5
ST
1018 /* init hardware */
1019 cx23885_reset(dev);
1020
1021 cx23885_i2c_register(&dev->i2c_bus[0]);
1022 cx23885_i2c_register(&dev->i2c_bus[1]);
1023 cx23885_i2c_register(&dev->i2c_bus[2]);
d19770e5 1024 cx23885_card_setup(dev);
3aab15af 1025 call_all(dev, tuner, standby);
d19770e5
ST
1026 cx23885_ir_init(dev);
1027
6c43a217
HV
1028 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1029 /*
1030 * GPIOs 9/8 are input detection bits for the breakout video
1031 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1032 * this gpios are pulled high. Make sure these GPIOs are marked
1033 * as inputs.
1034 */
1035 cx23885_gpio_enable(dev, 0x300, 0);
1036 }
1037
7b888014
ST
1038 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1039 if (cx23885_video_register(dev) < 0) {
e39682b5 1040 pr_err("%s() Failed to register analog video adapters on VID_A\n",
07ab29e1 1041 __func__);
7b888014
ST
1042 }
1043 }
1044
1045 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
10d0dcd7
IL
1046 if (cx23885_boards[dev->board].num_fds_portb)
1047 dev->ts1.num_frontends =
1048 cx23885_boards[dev->board].num_fds_portb;
a6a3f140 1049 if (cx23885_dvb_register(&dev->ts1) < 0) {
e39682b5 1050 pr_err("%s() Failed to register dvb adapters on VID_B\n",
22b4e64f 1051 __func__);
a6a3f140 1052 }
b1b81f1d
ST
1053 } else
1054 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1055 if (cx23885_417_register(dev) < 0) {
e39682b5 1056 pr_err("%s() Failed to register 417 on VID_B\n",
b1b81f1d
ST
1057 __func__);
1058 }
579f1163
ST
1059 }
1060
7b888014 1061 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
10d0dcd7
IL
1062 if (cx23885_boards[dev->board].num_fds_portc)
1063 dev->ts2.num_frontends =
1064 cx23885_boards[dev->board].num_fds_portc;
a6a3f140 1065 if (cx23885_dvb_register(&dev->ts2) < 0) {
e39682b5 1066 pr_err("%s() Failed to register dvb on VID_C\n",
b1b81f1d
ST
1067 __func__);
1068 }
1069 } else
1070 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1071 if (cx23885_417_register(dev) < 0) {
e39682b5 1072 pr_err("%s() Failed to register 417 on VID_C\n",
22b4e64f 1073 __func__);
a6a3f140 1074 }
d19770e5
ST
1075 }
1076
0ac5881a
ST
1077 cx23885_dev_checkrevision(dev);
1078
702dd790
IL
1079 /* disable MSI for NetUP cards, otherwise CI is not working */
1080 if (cx23885_boards[dev->board].ci_type > 0)
1081 cx_clear(RDR_RDRCTL1, 1 << 8);
1082
7b134e85
IL
1083 switch (dev->board) {
1084 case CX23885_BOARD_TEVII_S470:
1085 case CX23885_BOARD_TEVII_S471:
1086 cx_clear(RDR_RDRCTL1, 1 << 8);
1087 break;
1088 }
1089
d19770e5 1090 return 0;
d19770e5
ST
1091}
1092
39e75cfe 1093static void cx23885_dev_unregister(struct cx23885_dev *dev)
d19770e5 1094{
9c8ced51
ST
1095 release_mem_region(pci_resource_start(dev->pci, 0),
1096 pci_resource_len(dev->pci, 0));
d19770e5
ST
1097
1098 if (!atomic_dec_and_test(&dev->refcount))
1099 return;
1100
7b888014
ST
1101 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1102 cx23885_video_unregister(dev);
1103
b1b81f1d 1104 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
a6a3f140
ST
1105 cx23885_dvb_unregister(&dev->ts1);
1106
b1b81f1d
ST
1107 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1108 cx23885_417_unregister(dev);
1109
1110 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
a6a3f140
ST
1111 cx23885_dvb_unregister(&dev->ts2);
1112
b1b81f1d
ST
1113 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1114 cx23885_417_unregister(dev);
1115
d19770e5
ST
1116 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1117 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1118 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1119
1120 iounmap(dev->lmmio);
1121}
1122
9c8ced51 1123static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
44a6481d
MK
1124 unsigned int offset, u32 sync_line,
1125 unsigned int bpl, unsigned int padding,
453afdd9 1126 unsigned int lines, unsigned int lpi, bool jump)
d19770e5
ST
1127{
1128 struct scatterlist *sg;
9e44d632 1129 unsigned int line, todo, sol;
d19770e5 1130
453afdd9
HV
1131
1132 if (jump) {
1133 *(rp++) = cpu_to_le32(RISC_JUMP);
1134 *(rp++) = cpu_to_le32(0);
1135 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1136 }
1137
d19770e5
ST
1138 /* sync instruction */
1139 if (sync_line != NO_SYNC_LINE)
1140 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1141
1142 /* scan lines */
1143 sg = sglist;
1144 for (line = 0; line < lines; line++) {
1145 while (offset && offset >= sg_dma_len(sg)) {
1146 offset -= sg_dma_len(sg);
7675fe99 1147 sg = sg_next(sg);
d19770e5 1148 }
9e44d632
MM
1149
1150 if (lpi && line > 0 && !(line % lpi))
1151 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1152 else
1153 sol = RISC_SOL;
1154
d19770e5
ST
1155 if (bpl <= sg_dma_len(sg)-offset) {
1156 /* fits into current chunk */
9e44d632 1157 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
9c8ced51
ST
1158 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1159 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1160 offset += bpl;
d19770e5
ST
1161 } else {
1162 /* scanline needs to be split */
1163 todo = bpl;
9e44d632 1164 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
d19770e5 1165 (sg_dma_len(sg)-offset));
9c8ced51
ST
1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1168 todo -= (sg_dma_len(sg)-offset);
1169 offset = 0;
7675fe99 1170 sg = sg_next(sg);
d19770e5 1171 while (todo > sg_dma_len(sg)) {
9c8ced51 1172 *(rp++) = cpu_to_le32(RISC_WRITE|
d19770e5 1173 sg_dma_len(sg));
9c8ced51
ST
1174 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5 1176 todo -= sg_dma_len(sg);
7675fe99 1177 sg = sg_next(sg);
d19770e5 1178 }
9c8ced51
ST
1179 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1180 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1181 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1182 offset += todo;
1183 }
1184 offset += padding;
1185 }
1186
1187 return rp;
1188}
1189
4d63a25c 1190int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
7b888014
ST
1191 struct scatterlist *sglist, unsigned int top_offset,
1192 unsigned int bottom_offset, unsigned int bpl,
1193 unsigned int padding, unsigned int lines)
1194{
1195 u32 instructions, fields;
d8eaa58b 1196 __le32 *rp;
7b888014
ST
1197
1198 fields = 0;
1199 if (UNSET != top_offset)
1200 fields++;
1201 if (UNSET != bottom_offset)
1202 fields++;
1203
1204 /* estimate risc mem: worst case is one write per page border +
1205 one write per scan line + syncs + jump (all 2 dwords). Padding
1206 can cause next bpl to start close to a page border. First DMA
1207 region may be smaller than PAGE_SIZE */
1208 /* write and jump need and extra dword */
9c8ced51
ST
1209 instructions = fields * (1 + ((bpl + padding) * lines)
1210 / PAGE_SIZE + lines);
453afdd9 1211 instructions += 5;
4d63a25c
HV
1212 risc->size = instructions * 12;
1213 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1214 if (risc->cpu == NULL)
1215 return -ENOMEM;
7b888014
ST
1216
1217 /* write risc instructions */
1218 rp = risc->cpu;
1219 if (UNSET != top_offset)
1220 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1221 bpl, padding, lines, 0, true);
7b888014
ST
1222 if (UNSET != bottom_offset)
1223 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1224 bpl, padding, lines, 0, UNSET == top_offset);
7b888014
ST
1225
1226 /* save pointer to jmp instruction address */
1227 risc->jmp = rp;
9c8ced51 1228 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
7b888014
ST
1229 return 0;
1230}
d19770e5 1231
9e44d632 1232int cx23885_risc_databuffer(struct pci_dev *pci,
4d63a25c 1233 struct cx23885_riscmem *risc,
39e75cfe
AB
1234 struct scatterlist *sglist,
1235 unsigned int bpl,
9e44d632 1236 unsigned int lines, unsigned int lpi)
d19770e5
ST
1237{
1238 u32 instructions;
d8eaa58b 1239 __le32 *rp;
d19770e5
ST
1240
1241 /* estimate risc mem: worst case is one write per page border +
1242 one write per scan line + syncs + jump (all 2 dwords). Here
1243 there is no padding and no sync. First DMA region may be smaller
1244 than PAGE_SIZE */
1245 /* Jump and write need an extra dword */
1246 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
453afdd9 1247 instructions += 4;
d19770e5 1248
4d63a25c
HV
1249 risc->size = instructions * 12;
1250 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1251 if (risc->cpu == NULL)
1252 return -ENOMEM;
d19770e5
ST
1253
1254 /* write risc instructions */
1255 rp = risc->cpu;
9e44d632 1256 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
453afdd9 1257 bpl, 0, lines, lpi, lpi == 0);
d19770e5
ST
1258
1259 /* save pointer to jmp instruction address */
1260 risc->jmp = rp;
9c8ced51 1261 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
d19770e5
ST
1262 return 0;
1263}
1264
4d63a25c 1265int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
5ab27e6d
ST
1266 struct scatterlist *sglist, unsigned int top_offset,
1267 unsigned int bottom_offset, unsigned int bpl,
1268 unsigned int padding, unsigned int lines)
1269{
1270 u32 instructions, fields;
1271 __le32 *rp;
5ab27e6d
ST
1272
1273 fields = 0;
1274 if (UNSET != top_offset)
1275 fields++;
1276 if (UNSET != bottom_offset)
1277 fields++;
1278
1279 /* estimate risc mem: worst case is one write per page border +
1280 one write per scan line + syncs + jump (all 2 dwords). Padding
1281 can cause next bpl to start close to a page border. First DMA
1282 region may be smaller than PAGE_SIZE */
1283 /* write and jump need and extra dword */
1284 instructions = fields * (1 + ((bpl + padding) * lines)
1285 / PAGE_SIZE + lines);
453afdd9 1286 instructions += 5;
4d63a25c
HV
1287 risc->size = instructions * 12;
1288 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1289 if (risc->cpu == NULL)
1290 return -ENOMEM;
5ab27e6d
ST
1291 /* write risc instructions */
1292 rp = risc->cpu;
1293
1294 /* Sync to line 6, so US CC line 21 will appear in line '12'
1295 * in the userland vbi payload */
1296 if (UNSET != top_offset)
420b2176 1297 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1298 bpl, padding, lines, 0, true);
5ab27e6d
ST
1299
1300 if (UNSET != bottom_offset)
420b2176 1301 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1302 bpl, padding, lines, 0, UNSET == top_offset);
5ab27e6d
ST
1303
1304
1305
1306 /* save pointer to jmp instruction address */
1307 risc->jmp = rp;
1308 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1309 return 0;
1310}
1311
1312
453afdd9 1313void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
d19770e5 1314{
4d63a25c
HV
1315 struct cx23885_riscmem *risc = &buf->risc;
1316
d19770e5 1317 BUG_ON(in_interrupt());
4d63a25c 1318 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
d19770e5
ST
1319}
1320
7b888014
ST
1321static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1322{
1323 struct cx23885_dev *dev = port->dev;
1324
22b4e64f
HH
1325 dprintk(1, "%s() Register Dump\n", __func__);
1326 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
7b888014 1327 cx_read(DEV_CNTRL2));
22b4e64f 1328 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
dbe83a3b 1329 cx23885_irq_get_mask(dev));
22b4e64f 1330 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
7b888014 1331 cx_read(AUDIO_INT_INT_MSK));
22b4e64f 1332 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
7b888014 1333 cx_read(AUD_INT_DMA_CTL));
22b4e64f 1334 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
7b888014 1335 cx_read(AUDIO_EXT_INT_MSK));
22b4e64f 1336 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
7b888014 1337 cx_read(AUD_EXT_DMA_CTL));
22b4e64f 1338 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
7b888014 1339 cx_read(PAD_CTRL));
22b4e64f 1340 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
7b888014 1341 cx_read(ALT_PIN_OUT_SEL));
22b4e64f 1342 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
7b888014 1343 cx_read(GPIO2));
22b4e64f 1344 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
7b888014 1345 port->reg_gpcnt, cx_read(port->reg_gpcnt));
22b4e64f 1346 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1347 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
22b4e64f 1348 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1349 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
7b913908
ST
1350 if (port->reg_src_sel)
1351 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1352 port->reg_src_sel, cx_read(port->reg_src_sel));
22b4e64f 1353 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
7b888014 1354 port->reg_lngth, cx_read(port->reg_lngth));
22b4e64f 1355 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1356 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
22b4e64f 1357 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1358 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
22b4e64f 1359 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
7b888014 1360 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
22b4e64f 1361 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
7b888014 1362 port->reg_sop_status, cx_read(port->reg_sop_status));
22b4e64f 1363 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
7b888014 1364 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
22b4e64f 1365 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
7b888014 1366 port->reg_vld_misc, cx_read(port->reg_vld_misc));
22b4e64f 1367 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
7b888014 1368 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
22b4e64f 1369 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
7b888014
ST
1370 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1371}
1372
453afdd9 1373int cx23885_start_dma(struct cx23885_tsport *port,
44a6481d
MK
1374 struct cx23885_dmaqueue *q,
1375 struct cx23885_buffer *buf)
d19770e5
ST
1376{
1377 struct cx23885_dev *dev = port->dev;
a589b665 1378 u32 reg;
d19770e5 1379
22b4e64f 1380 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
453afdd9 1381 dev->width, dev->height, dev->field);
d19770e5 1382
95f408bb
BL
1383 /* clear dma in progress */
1384 cx23885_clear_bridge_error(dev);
1385
d8d12b43
ST
1386 /* Stop the fifo and risc engine for this port */
1387 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1388
d19770e5
ST
1389 /* setup fifo + format */
1390 cx23885_sram_channel_setup(dev,
9c8ced51 1391 &dev->sram_channels[port->sram_chno],
44a6481d 1392 port->ts_packet_size, buf->risc.dma);
9c8ced51
ST
1393 if (debug > 5) {
1394 cx23885_sram_channel_dump(dev,
1395 &dev->sram_channels[port->sram_chno]);
d19770e5 1396 cx23885_risc_disasm(port, &buf->risc);
3328e4fb 1397 }
d19770e5
ST
1398
1399 /* write TS length to chip */
453afdd9 1400 cx_write(port->reg_lngth, port->ts_packet_size);
d19770e5 1401
9c8ced51
ST
1402 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1403 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
e39682b5 1404 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
22b4e64f 1405 __func__,
661c7e44 1406 cx23885_boards[dev->board].portb,
9c8ced51 1407 cx23885_boards[dev->board].portc);
d19770e5
ST
1408 return -EINVAL;
1409 }
1410
a589b665
ST
1411 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1412 cx23885_av_clk(dev, 0);
1413
d19770e5
ST
1414 udelay(100);
1415
579f1163 1416 /* If the port supports SRC SELECT, configure it */
9c8ced51 1417 if (port->reg_src_sel)
579f1163
ST
1418 cx_write(port->reg_src_sel, port->src_sel_val);
1419
b1b81f1d 1420 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
d19770e5 1421 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
b1b81f1d 1422 cx_write(port->reg_vld_misc, port->vld_misc_val);
d19770e5
ST
1423 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1424 udelay(100);
1425
9c8ced51 1426 /* NOTE: this is 2 (reserved) for portb, does it matter? */
d19770e5
ST
1427 /* reset counter to zero */
1428 cx_write(port->reg_gpcnt_ctl, 3);
453afdd9 1429 q->count = 0;
d19770e5 1430
52ce27bf
ST
1431 /* Set VIDB pins to input */
1432 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1433 reg = cx_read(PAD_CTRL);
1434 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1435 cx_write(PAD_CTRL, reg);
1436 }
1437
1438 /* Set VIDC pins to input */
1439 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1440 reg = cx_read(PAD_CTRL);
1441 reg &= ~0x4; /* Clear TS2_SOP_OE */
1442 cx_write(PAD_CTRL, reg);
1443 }
1444
1445 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1446
1447 reg = cx_read(PAD_CTRL);
1448 reg = reg & ~0x1; /* Clear TS1_OE */
1449
1450 /* FIXME, bit 2 writing here is questionable */
1451 /* set TS1_SOP_OE and TS1_OE_HI */
1452 reg = reg | 0xa;
1453 cx_write(PAD_CTRL, reg);
1454
1455 /* FIXME and these two registers should be documented. */
1456 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1457 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1458 }
1459
9c8ced51 1460 switch (dev->bridge) {
d19770e5 1461 case CX23885_BRIDGE_885:
3bd40659 1462 case CX23885_BRIDGE_887:
25ea66e2 1463 case CX23885_BRIDGE_888:
d19770e5 1464 /* enable irqs */
9c8ced51 1465 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
95f408bb
BL
1466 /* clear dma in progress */
1467 cx23885_clear_bridge_error(dev);
d19770e5
ST
1468 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1469 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
95f408bb
BL
1470
1471 /* clear dma in progress */
1472 cx23885_clear_bridge_error(dev);
dbe83a3b
AW
1473 cx23885_irq_add(dev, port->pci_irqmask);
1474 cx23885_irq_enable_all(dev);
95f408bb
BL
1475
1476 /* clear dma in progress */
1477 cx23885_clear_bridge_error(dev);
d19770e5 1478 break;
d19770e5 1479 default:
579f1163 1480 BUG();
d19770e5
ST
1481 }
1482
d19770e5 1483 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
95f408bb
BL
1484 /* clear dma in progress */
1485 cx23885_clear_bridge_error(dev);
d19770e5 1486
a589b665
ST
1487 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1488 cx23885_av_clk(dev, 1);
1489
7b888014
ST
1490 if (debug > 4)
1491 cx23885_tsport_reg_dump(port);
1492
95f408bb
BL
1493 cx23885_irq_get_mask(dev);
1494
1495 /* clear dma in progress */
1496 cx23885_clear_bridge_error(dev);
1497
d19770e5
ST
1498 return 0;
1499}
1500
1501static int cx23885_stop_dma(struct cx23885_tsport *port)
1502{
1503 struct cx23885_dev *dev = port->dev;
a589b665 1504 u32 reg;
95f408bb
BL
1505 int delay = 0;
1506 uint32_t reg1_val;
1507 uint32_t reg2_val;
a589b665 1508
22b4e64f 1509 dprintk(1, "%s()\n", __func__);
d19770e5
ST
1510
1511 /* Stop interrupts and DMA */
1512 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1513 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
95f408bb
BL
1514 /* just in case wait for any dma to complete before allowing dealloc */
1515 mdelay(20);
1516 for (delay = 0; delay < 100; delay++) {
1517 reg1_val = cx_read(TC_REQ);
1518 reg2_val = cx_read(TC_REQ_SET);
1519 if (reg1_val == 0 || reg2_val == 0)
1520 break;
1521 mdelay(1);
1522 }
1523 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1524 delay, reg1_val, reg2_val);
d19770e5 1525
52ce27bf 1526 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1527 reg = cx_read(PAD_CTRL);
1528
1529 /* Set TS1_OE */
1530 reg = reg | 0x1;
1531
1532 /* clear TS1_SOP_OE and TS1_OE_HI */
1533 reg = reg & ~0xa;
1534 cx_write(PAD_CTRL, reg);
1535 cx_write(port->reg_src_sel, 0);
1536 cx_write(port->reg_gen_ctrl, 8);
a589b665
ST
1537 }
1538
1539 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1540 cx23885_av_clk(dev, 0);
1541
d19770e5
ST
1542 return 0;
1543}
1544
d19770e5
ST
1545/* ------------------------------------------------------------------ */
1546
453afdd9 1547int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
d19770e5
ST
1548{
1549 struct cx23885_dev *dev = port->dev;
1550 int size = port->ts_packet_size * port->ts_packet_count;
2d700715 1551 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
d19770e5 1552
22b4e64f 1553 dprintk(1, "%s: %p\n", __func__, buf);
2d700715 1554 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
d19770e5 1555 return -EINVAL;
2d700715 1556 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
d19770e5 1557
453afdd9
HV
1558 cx23885_risc_databuffer(dev->pci, &buf->risc,
1559 sgt->sgl,
1560 port->ts_packet_size, port->ts_packet_count, 0);
1561 return 0;
d19770e5
ST
1562}
1563
453afdd9
HV
1564/*
1565 * The risc program for each buffer works as follows: it starts with a simple
1566 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1567 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1568 * the initial JUMP).
1569 *
1570 * This is the risc program of the first buffer to be queued if the active list
1571 * is empty and it just keeps DMAing this buffer without generating any
1572 * interrupts.
1573 *
1574 * If a new buffer is added then the initial JUMP in the code for that buffer
1575 * will generate an interrupt which signals that the previous buffer has been
1576 * DMAed successfully and that it can be returned to userspace.
1577 *
1578 * It also sets the final jump of the previous buffer to the start of the new
1579 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1580 * atomic u32 write, so there is no race condition.
1581 *
1582 * The end-result of all this that you only get an interrupt when a buffer
1583 * is ready, so the control flow is very easy.
1584 */
d19770e5
ST
1585void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1586{
1587 struct cx23885_buffer *prev;
1588 struct cx23885_dev *dev = port->dev;
1589 struct cx23885_dmaqueue *cx88q = &port->mpegq;
453afdd9 1590 unsigned long flags;
d19770e5 1591
453afdd9
HV
1592 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1593 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1594 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
d19770e5
ST
1595 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1596
453afdd9 1597 spin_lock_irqsave(&dev->slock, flags);
d19770e5 1598 if (list_empty(&cx88q->active)) {
453afdd9 1599 list_add_tail(&buf->queue, &cx88q->active);
44a6481d 1600 dprintk(1, "[%p/%d] %s - first active\n",
2d700715 1601 buf, buf->vb.vb2_buf.index, __func__);
d19770e5 1602 } else {
453afdd9 1603 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
44a6481d 1604 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
453afdd9
HV
1605 queue);
1606 list_add_tail(&buf->queue, &cx88q->active);
d19770e5 1607 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
9c8ced51 1608 dprintk(1, "[%p/%d] %s - append to active\n",
2d700715 1609 buf, buf->vb.vb2_buf.index, __func__);
d19770e5 1610 }
453afdd9 1611 spin_unlock_irqrestore(&dev->slock, flags);
d19770e5
ST
1612}
1613
1614/* ----------------------------------------------------------- */
1615
453afdd9 1616static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
d19770e5 1617{
d19770e5
ST
1618 struct cx23885_dmaqueue *q = &port->mpegq;
1619 struct cx23885_buffer *buf;
1620 unsigned long flags;
1621
44a6481d 1622 spin_lock_irqsave(&port->slock, flags);
d19770e5 1623 while (!list_empty(&q->active)) {
44a6481d 1624 buf = list_entry(q->active.next, struct cx23885_buffer,
453afdd9
HV
1625 queue);
1626 list_del(&buf->queue);
2d700715 1627 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
44a6481d 1628 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
2d700715
JS
1629 buf, buf->vb.vb2_buf.index, reason,
1630 (unsigned long)buf->risc.dma);
d19770e5 1631 }
44a6481d 1632 spin_unlock_irqrestore(&port->slock, flags);
d19770e5
ST
1633}
1634
b1b81f1d
ST
1635void cx23885_cancel_buffers(struct cx23885_tsport *port)
1636{
9c8ced51 1637 dprintk(1, "%s()\n", __func__);
d19770e5 1638 cx23885_stop_dma(port);
453afdd9 1639 do_cancel_buffers(port, "cancel");
d19770e5
ST
1640}
1641
b1b81f1d
ST
1642int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1643{
1644 /* FIXME: port1 assumption here. */
1645 struct cx23885_tsport *port = &dev->ts1;
1646 int count = 0;
1647 int handled = 0;
1648
1649 if (status == 0)
1650 return handled;
1651
1652 count = cx_read(port->reg_gpcnt);
1653 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1654 status, cx_read(port->reg_ts_int_msk), count);
1655
1656 if ((status & VID_B_MSK_BAD_PKT) ||
1657 (status & VID_B_MSK_OPC_ERR) ||
1658 (status & VID_B_MSK_VBI_OPC_ERR) ||
1659 (status & VID_B_MSK_SYNC) ||
1660 (status & VID_B_MSK_VBI_SYNC) ||
1661 (status & VID_B_MSK_OF) ||
1662 (status & VID_B_MSK_VBI_OF)) {
e39682b5 1663 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
07ab29e1 1664 dev->name, status);
b1b81f1d
ST
1665 if (status & VID_B_MSK_BAD_PKT)
1666 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1667 if (status & VID_B_MSK_OPC_ERR)
1668 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1669 if (status & VID_B_MSK_VBI_OPC_ERR)
1670 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1671 if (status & VID_B_MSK_SYNC)
1672 dprintk(1, " VID_B_MSK_SYNC\n");
1673 if (status & VID_B_MSK_VBI_SYNC)
1674 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1675 if (status & VID_B_MSK_OF)
1676 dprintk(1, " VID_B_MSK_OF\n");
1677 if (status & VID_B_MSK_VBI_OF)
1678 dprintk(1, " VID_B_MSK_VBI_OF\n");
1679
1680 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1681 cx23885_sram_channel_dump(dev,
1682 &dev->sram_channels[port->sram_chno]);
1683 cx23885_417_check_encoder(dev);
1684 } else if (status & VID_B_MSK_RISCI1) {
1685 dprintk(7, " VID_B_MSK_RISCI1\n");
1686 spin_lock(&port->slock);
1687 cx23885_wakeup(port, &port->mpegq, count);
1688 spin_unlock(&port->slock);
b1b81f1d
ST
1689 }
1690 if (status) {
1691 cx_write(port->reg_ts_int_stat, status);
1692 handled = 1;
1693 }
1694
1695 return handled;
1696}
1697
a6a3f140
ST
1698static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1699{
1700 struct cx23885_dev *dev = port->dev;
1701 int handled = 0;
1702 u32 count;
1703
b1b81f1d
ST
1704 if ((status & VID_BC_MSK_OPC_ERR) ||
1705 (status & VID_BC_MSK_BAD_PKT) ||
1706 (status & VID_BC_MSK_SYNC) ||
9c8ced51
ST
1707 (status & VID_BC_MSK_OF)) {
1708
a6a3f140 1709 if (status & VID_BC_MSK_OPC_ERR)
9c8ced51
ST
1710 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1711 VID_BC_MSK_OPC_ERR);
1712
a6a3f140 1713 if (status & VID_BC_MSK_BAD_PKT)
9c8ced51
ST
1714 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1715 VID_BC_MSK_BAD_PKT);
1716
a6a3f140 1717 if (status & VID_BC_MSK_SYNC)
9c8ced51
ST
1718 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1719 VID_BC_MSK_SYNC);
1720
a6a3f140 1721 if (status & VID_BC_MSK_OF)
9c8ced51
ST
1722 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1723 VID_BC_MSK_OF);
a6a3f140 1724
e39682b5 1725 pr_err("%s: mpeg risc op code error\n", dev->name);
a6a3f140
ST
1726
1727 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
b1b81f1d
ST
1728 cx23885_sram_channel_dump(dev,
1729 &dev->sram_channels[port->sram_chno]);
a6a3f140
ST
1730
1731 } else if (status & VID_BC_MSK_RISCI1) {
1732
1733 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1734
1735 spin_lock(&port->slock);
1736 count = cx_read(port->reg_gpcnt);
1737 cx23885_wakeup(port, &port->mpegq, count);
1738 spin_unlock(&port->slock);
1739
a6a3f140
ST
1740 }
1741 if (status) {
1742 cx_write(port->reg_ts_int_stat, status);
1743 handled = 1;
1744 }
1745
1746 return handled;
1747}
1748
03121f05 1749static irqreturn_t cx23885_irq(int irq, void *dev_id)
d19770e5
ST
1750{
1751 struct cx23885_dev *dev = dev_id;
a6a3f140
ST
1752 struct cx23885_tsport *ts1 = &dev->ts1;
1753 struct cx23885_tsport *ts2 = &dev->ts2;
d19770e5 1754 u32 pci_status, pci_mask;
7b888014 1755 u32 vida_status, vida_mask;
9e44d632 1756 u32 audint_status, audint_mask;
6f074abb 1757 u32 ts1_status, ts1_mask;
d19770e5 1758 u32 ts2_status, ts2_mask;
7b888014 1759 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
9e44d632 1760 int audint_count = 0;
98d109f9 1761 bool subdev_handled;
d19770e5
ST
1762
1763 pci_status = cx_read(PCI_INT_STAT);
dbe83a3b 1764 pci_mask = cx23885_irq_get_mask(dev);
3b8315f3
BL
1765 if ((pci_status & pci_mask) == 0) {
1766 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1767 pci_status, pci_mask);
1768 goto out;
1769 }
1770
7b888014
ST
1771 vida_status = cx_read(VID_A_INT_STAT);
1772 vida_mask = cx_read(VID_A_INT_MSK);
9e44d632
MM
1773 audint_status = cx_read(AUDIO_INT_INT_STAT);
1774 audint_mask = cx_read(AUDIO_INT_INT_MSK);
6f074abb
ST
1775 ts1_status = cx_read(VID_B_INT_STAT);
1776 ts1_mask = cx_read(VID_B_INT_MSK);
d19770e5
ST
1777 ts2_status = cx_read(VID_C_INT_STAT);
1778 ts2_mask = cx_read(VID_C_INT_MSK);
1779
3b8315f3
BL
1780 if (((pci_status & pci_mask) == 0) &&
1781 ((ts2_status & ts2_mask) == 0) &&
1782 ((ts1_status & ts1_mask) == 0))
d19770e5
ST
1783 goto out;
1784
7b888014 1785 vida_count = cx_read(VID_A_GPCNT);
9e44d632 1786 audint_count = cx_read(AUD_INT_A_GPCNT);
a6a3f140
ST
1787 ts1_count = cx_read(ts1->reg_gpcnt);
1788 ts2_count = cx_read(ts2->reg_gpcnt);
7b888014
ST
1789 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1790 pci_status, pci_mask);
1791 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1792 vida_status, vida_mask, vida_count);
9e44d632
MM
1793 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1794 audint_status, audint_mask, audint_count);
7b888014
ST
1795 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1796 ts1_status, ts1_mask, ts1_count);
1797 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1798 ts2_status, ts2_mask, ts2_count);
d19770e5 1799
f59ad611
AW
1800 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1801 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1802 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1803 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1804 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
98d109f9 1805 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
d19770e5
ST
1806
1807 if (pci_status & PCI_MSK_RISC_RD)
9c8ced51
ST
1808 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1809 PCI_MSK_RISC_RD);
1810
d19770e5 1811 if (pci_status & PCI_MSK_RISC_WR)
9c8ced51
ST
1812 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1813 PCI_MSK_RISC_WR);
1814
d19770e5 1815 if (pci_status & PCI_MSK_AL_RD)
9c8ced51
ST
1816 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1817 PCI_MSK_AL_RD);
1818
d19770e5 1819 if (pci_status & PCI_MSK_AL_WR)
9c8ced51
ST
1820 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1821 PCI_MSK_AL_WR);
1822
d19770e5 1823 if (pci_status & PCI_MSK_APB_DMA)
9c8ced51
ST
1824 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1825 PCI_MSK_APB_DMA);
1826
d19770e5 1827 if (pci_status & PCI_MSK_VID_C)
9c8ced51
ST
1828 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1829 PCI_MSK_VID_C);
1830
d19770e5 1831 if (pci_status & PCI_MSK_VID_B)
9c8ced51
ST
1832 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1833 PCI_MSK_VID_B);
1834
d19770e5 1835 if (pci_status & PCI_MSK_VID_A)
9c8ced51
ST
1836 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1837 PCI_MSK_VID_A);
1838
d19770e5 1839 if (pci_status & PCI_MSK_AUD_INT)
9c8ced51
ST
1840 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1841 PCI_MSK_AUD_INT);
1842
d19770e5 1843 if (pci_status & PCI_MSK_AUD_EXT)
9c8ced51
ST
1844 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1845 PCI_MSK_AUD_EXT);
d19770e5 1846
5a23b076
IL
1847 if (pci_status & PCI_MSK_GPIO0)
1848 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1849 PCI_MSK_GPIO0);
1850
1851 if (pci_status & PCI_MSK_GPIO1)
1852 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1853 PCI_MSK_GPIO1);
f59ad611 1854
98d109f9
AW
1855 if (pci_status & PCI_MSK_AV_CORE)
1856 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1857 PCI_MSK_AV_CORE);
1858
f59ad611
AW
1859 if (pci_status & PCI_MSK_IR)
1860 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1861 PCI_MSK_IR);
d19770e5
ST
1862 }
1863
78db8547
IL
1864 if (cx23885_boards[dev->board].ci_type == 1 &&
1865 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1866 handled += netup_ci_slot_status(dev, pci_status);
a26ccc9d 1867
78db8547
IL
1868 if (cx23885_boards[dev->board].ci_type == 2 &&
1869 (pci_status & PCI_MSK_GPIO0))
1870 handled += altera_ci_irq(dev);
5a23b076 1871
7b888014
ST
1872 if (ts1_status) {
1873 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1874 handled += cx23885_irq_ts(ts1, ts1_status);
b1b81f1d
ST
1875 else
1876 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1877 handled += cx23885_irq_417(dev, ts1_status);
7b888014
ST
1878 }
1879
1880 if (ts2_status) {
1881 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1882 handled += cx23885_irq_ts(ts2, ts2_status);
b1b81f1d
ST
1883 else
1884 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1885 handled += cx23885_irq_417(dev, ts2_status);
7b888014 1886 }
6f074abb 1887
7b888014
ST
1888 if (vida_status)
1889 handled += cx23885_video_irq(dev, vida_status);
6f074abb 1890
9e44d632
MM
1891 if (audint_status)
1892 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1893
f59ad611 1894 if (pci_status & PCI_MSK_IR) {
98d109f9 1895 subdev_handled = false;
260e689b 1896 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
98d109f9
AW
1897 pci_status, &subdev_handled);
1898 if (subdev_handled)
1899 handled++;
1900 }
1901
e5514f10
AW
1902 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1903 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
c21412f5 1904 schedule_work(&dev->cx25840_work);
e5514f10 1905 handled++;
f59ad611
AW
1906 }
1907
6f074abb 1908 if (handled)
3b8315f3 1909 cx_write(PCI_INT_STAT, pci_status & pci_mask);
d19770e5
ST
1910out:
1911 return IRQ_RETVAL(handled);
1912}
1913
f59ad611
AW
1914static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1915 unsigned int notification, void *arg)
1916{
1917 struct cx23885_dev *dev;
1918
1919 if (sd == NULL)
1920 return;
1921
1922 dev = to_cx23885(sd->v4l2_dev);
1923
1924 switch (notification) {
e5514f10 1925 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1926 if (sd == dev->sd_ir)
1927 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1928 break;
e5514f10 1929 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1930 if (sd == dev->sd_ir)
1931 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1932 break;
1933 }
1934}
1935
1936static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1937{
e5514f10 1938 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
f59ad611
AW
1939 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1940 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1941 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1942}
1943
6de72bd6 1944static inline int encoder_on_portb(struct cx23885_dev *dev)
6f8bee9b
ST
1945{
1946 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1947}
1948
6de72bd6 1949static inline int encoder_on_portc(struct cx23885_dev *dev)
6f8bee9b
ST
1950{
1951 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1952}
1953
1954/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1955 * registers depending on the board configuration (and whether the
1956 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1957 * be pushed into the correct hardware register, regardless of the
1958 * physical location. Certain registers are shared so we sanity check
1959 * and report errors if we think we're tampering with a GPIo that might
1960 * be assigned to the encoder (and used for the host bus).
1961 *
1962 * GPIO 2 thru 0 - On the cx23885 bridge
1963 * GPIO 18 thru 3 - On the cx23417 host bus interface
1964 * GPIO 23 thru 19 - On the cx25840 a/v core
1965 */
1966void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1967{
1968 if (mask & 0x7)
1969 cx_set(GP0_IO, mask & 0x7);
1970
1971 if (mask & 0x0007fff8) {
1972 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 1973 pr_err("%s: Setting GPIO on encoder ports\n",
6f8bee9b
ST
1974 dev->name);
1975 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1976 }
1977
1978 /* TODO: 23-19 */
1979 if (mask & 0x00f80000)
e39682b5 1980 pr_info("%s: Unsupported\n", dev->name);
6f8bee9b
ST
1981}
1982
1983void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1984{
1985 if (mask & 0x00000007)
1986 cx_clear(GP0_IO, mask & 0x7);
1987
1988 if (mask & 0x0007fff8) {
1989 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 1990 pr_err("%s: Clearing GPIO moving on encoder ports\n",
6f8bee9b
ST
1991 dev->name);
1992 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1993 }
1994
1995 /* TODO: 23-19 */
1996 if (mask & 0x00f80000)
e39682b5 1997 pr_info("%s: Unsupported\n", dev->name);
6f8bee9b
ST
1998}
1999
09ea33e5
IL
2000u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2001{
2002 if (mask & 0x00000007)
2003 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2004
2005 if (mask & 0x0007fff8) {
2006 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 2007 pr_err("%s: Reading GPIO moving on encoder ports\n",
09ea33e5
IL
2008 dev->name);
2009 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2010 }
2011
2012 /* TODO: 23-19 */
2013 if (mask & 0x00f80000)
e39682b5 2014 pr_info("%s: Unsupported\n", dev->name);
09ea33e5
IL
2015
2016 return 0;
2017}
2018
6f8bee9b
ST
2019void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2020{
2021 if ((mask & 0x00000007) && asoutput)
2022 cx_set(GP0_IO, (mask & 0x7) << 16);
2023 else if ((mask & 0x00000007) && !asoutput)
2024 cx_clear(GP0_IO, (mask & 0x7) << 16);
2025
2026 if (mask & 0x0007fff8) {
2027 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 2028 pr_err("%s: Enabling GPIO on encoder ports\n",
6f8bee9b
ST
2029 dev->name);
2030 }
2031
2032 /* MC417_OEN is active low for output, write 1 for an input */
2033 if ((mask & 0x0007fff8) && asoutput)
2034 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2035
2036 else if ((mask & 0x0007fff8) && !asoutput)
2037 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2038
2039 /* TODO: 23-19 */
2040}
2041
4c62e976
GKH
2042static int cx23885_initdev(struct pci_dev *pci_dev,
2043 const struct pci_device_id *pci_id)
d19770e5
ST
2044{
2045 struct cx23885_dev *dev;
da59a4de 2046 struct v4l2_ctrl_handler *hdl;
d19770e5
ST
2047 int err;
2048
44a6481d 2049 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
d19770e5
ST
2050 if (NULL == dev)
2051 return -ENOMEM;
2052
c0714f6c
HV
2053 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2054 if (err < 0)
2055 goto fail_free;
2056
da59a4de
HV
2057 hdl = &dev->ctrl_handler;
2058 v4l2_ctrl_handler_init(hdl, 6);
2059 if (hdl->error) {
2060 err = hdl->error;
2061 goto fail_ctrl;
2062 }
2063 dev->v4l2_dev.ctrl_handler = hdl;
2064
f59ad611
AW
2065 /* Prepare to handle notifications from subdevices */
2066 cx23885_v4l2_dev_notify_init(dev);
2067
d19770e5
ST
2068 /* pci init */
2069 dev->pci = pci_dev;
2070 if (pci_enable_device(pci_dev)) {
2071 err = -EIO;
da59a4de 2072 goto fail_ctrl;
d19770e5
ST
2073 }
2074
2075 if (cx23885_dev_setup(dev) < 0) {
2076 err = -EINVAL;
da59a4de 2077 goto fail_ctrl;
d19770e5
ST
2078 }
2079
2080 /* print pci info */
abd34d8d 2081 dev->pci_rev = pci_dev->revision;
d19770e5 2082 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
e39682b5 2083 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
07ab29e1 2084 dev->name,
d19770e5 2085 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
a589b665
ST
2086 dev->pci_lat,
2087 (unsigned long long)pci_resource_start(pci_dev, 0));
d19770e5
ST
2088
2089 pci_set_master(pci_dev);
1a47de6e
CH
2090 err = pci_set_dma_mask(pci_dev, 0xffffffff);
2091 if (err) {
e39682b5 2092 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2bc46b3a 2093 goto fail_ctrl;
d19770e5
ST
2094 }
2095
d7515b88 2096 err = request_irq(pci_dev->irq, cx23885_irq,
3e018fe4 2097 IRQF_SHARED, dev->name, dev);
d19770e5 2098 if (err < 0) {
e39682b5 2099 pr_err("%s: can't get IRQ %d\n",
d19770e5
ST
2100 dev->name, pci_dev->irq);
2101 goto fail_irq;
2102 }
2103
afd96668
HV
2104 switch (dev->board) {
2105 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
78db8547
IL
2106 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2107 break;
2108 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2109 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
afd96668
HV
2110 break;
2111 }
5a23b076 2112
f59ad611
AW
2113 /*
2114 * The CX2388[58] IR controller can start firing interrupts when
2115 * enabled, so these have to take place after the cx23885_irq() handler
2116 * is hooked up by the call to request_irq() above.
2117 */
2118 cx23885_ir_pci_int_enable(dev);
dbda8f70 2119 cx23885_input_init(dev);
f59ad611 2120
d19770e5
ST
2121 return 0;
2122
2123fail_irq:
2124 cx23885_dev_unregister(dev);
da59a4de
HV
2125fail_ctrl:
2126 v4l2_ctrl_handler_free(hdl);
c0714f6c 2127 v4l2_device_unregister(&dev->v4l2_dev);
d19770e5
ST
2128fail_free:
2129 kfree(dev);
2130 return err;
2131}
2132
4c62e976 2133static void cx23885_finidev(struct pci_dev *pci_dev)
d19770e5 2134{
c0714f6c
HV
2135 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2136 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
d19770e5 2137
dbda8f70 2138 cx23885_input_fini(dev);
f59ad611 2139 cx23885_ir_fini(dev);
d19770e5 2140
f59ad611 2141 cx23885_shutdown(dev);
29f8a0a5 2142
d19770e5
ST
2143 /* unregister stuff */
2144 free_irq(pci_dev->irq, dev);
d19770e5 2145
8d4d9329
HV
2146 pci_disable_device(pci_dev);
2147
d19770e5 2148 cx23885_dev_unregister(dev);
da59a4de 2149 v4l2_ctrl_handler_free(&dev->ctrl_handler);
c0714f6c 2150 v4l2_device_unregister(v4l2_dev);
d19770e5
ST
2151 kfree(dev);
2152}
2153
0fcefb39 2154static const struct pci_device_id cx23885_pci_tbl[] = {
d19770e5
ST
2155 {
2156 /* CX23885 */
2157 .vendor = 0x14f1,
2158 .device = 0x8852,
2159 .subvendor = PCI_ANY_ID,
2160 .subdevice = PCI_ANY_ID,
9c8ced51 2161 }, {
d19770e5
ST
2162 /* CX23887 Rev 2 */
2163 .vendor = 0x14f1,
2164 .device = 0x8880,
2165 .subvendor = PCI_ANY_ID,
2166 .subdevice = PCI_ANY_ID,
9c8ced51 2167 }, {
d19770e5
ST
2168 /* --- end of list --- */
2169 }
2170};
2171MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2172
2173static struct pci_driver cx23885_pci_driver = {
2174 .name = "cx23885",
2175 .id_table = cx23885_pci_tbl,
2176 .probe = cx23885_initdev,
4c62e976 2177 .remove = cx23885_finidev,
d19770e5
ST
2178 /* TODO */
2179 .suspend = NULL,
2180 .resume = NULL,
2181};
2182
9710e7a7 2183static int __init cx23885_init(void)
d19770e5 2184{
e39682b5 2185 pr_info("cx23885 driver version %s loaded\n",
1990d50b 2186 CX23885_VERSION);
d19770e5
ST
2187 return pci_register_driver(&cx23885_pci_driver);
2188}
2189
9710e7a7 2190static void __exit cx23885_fini(void)
d19770e5
ST
2191{
2192 pci_unregister_driver(&cx23885_pci_driver);
2193}
2194
2195module_init(cx23885_init);
2196module_exit(cx23885_fini);