]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/pci/cx23885/cx23885-core.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / pci / cx23885 / cx23885-core.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
d19770e5
ST
2/*
3 * Driver for the Conexant CX23885 PCIe bridge
4 *
6d897616 5 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
d19770e5
ST
6 */
7
e39682b5
MCC
8#include "cx23885.h"
9
d19770e5
ST
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kmod.h>
15#include <linux/kernel.h>
4bd46aa0 16#include <linux/pci.h>
d19770e5
ST
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <asm/div64.h>
78db8547 21#include <linux/firmware.h>
d19770e5 22
5a23b076 23#include "cimax2.h"
78db8547 24#include "altera-ci.h"
29f8a0a5 25#include "cx23888-ir.h"
f59ad611 26#include "cx23885-ir.h"
e5514f10 27#include "cx23885-av.h"
dbda8f70 28#include "cx23885-input.h"
d19770e5
ST
29
30MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
6d897616 31MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
d19770e5 32MODULE_LICENSE("GPL");
1990d50b 33MODULE_VERSION(CX23885_VERSION);
d19770e5 34
4bd46aa0
BL
35/*
36 * Some platforms have been found to require periodic resetting of the DMA
37 * engine. Ryzen and XEON platforms are known to be affected. The symptom
38 * encountered is "mpeg risc op code error". Only Ryzen platforms employ
39 * this workaround if the option equals 1. The workaround can be explicitly
40 * disabled for all platforms by setting to 0, the workaround can be forced
41 * on for any platform by setting to 2.
42 */
43static unsigned int dma_reset_workaround = 1;
44module_param(dma_reset_workaround, int, 0644);
45MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
46
4513fc69 47static unsigned int debug;
9c8ced51
ST
48module_param(debug, int, 0644);
49MODULE_PARM_DESC(debug, "enable debug messages");
d19770e5
ST
50
51static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52module_param_array(card, int, NULL, 0444);
9c8ced51 53MODULE_PARM_DESC(card, "card type");
d19770e5 54
4513fc69
ST
55#define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
e39682b5
MCC
57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
58 __func__, ##arg); \
4513fc69 59 } while (0)
d19770e5
ST
60
61static unsigned int cx23885_devcount;
62
d19770e5
ST
63#define NO_SYNC_LINE (-1U)
64
d19770e5
ST
65/* FIXME, these allocations will change when
66 * analog arrives. The be reviewed.
67 * CX23887 Assumptions
68 * 1 line = 16 bytes of CDT
69 * cmds size = 80
70 * cdt size = 16 * linesize
71 * iqsize = 64
72 * maxlines = 6
73 *
74 * Address Space:
75 * 0x00000000 0x00008fff FIFO clusters
76 * 0x00010000 0x000104af Channel Management Data Structures
77 * 0x000104b0 0x000104ff Free
78 * 0x00010500 0x000108bf 15 channels * iqsize
79 * 0x000108c0 0x000108ff Free
80 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
81 * 15 channels * (iqsize + (maxlines * linesize))
82 * 0x00010ea0 0x00010xxx Free
83 */
84
7e994302 85static struct sram_channel cx23885_sram_channels[] = {
d19770e5 86 [SRAM_CH01] = {
69ad6e56
ST
87 .name = "VID A",
88 .cmds_start = 0x10000,
d8d12b43
ST
89 .ctrl_start = 0x10380,
90 .cdt = 0x104c0,
69ad6e56
ST
91 .fifo_start = 0x40,
92 .fifo_size = 0x2800,
d19770e5
ST
93 .ptr1_reg = DMA1_PTR1,
94 .ptr2_reg = DMA1_PTR2,
95 .cnt1_reg = DMA1_CNT1,
96 .cnt2_reg = DMA1_CNT2,
97 },
98 [SRAM_CH02] = {
99 .name = "ch2",
100 .cmds_start = 0x0,
101 .ctrl_start = 0x0,
102 .cdt = 0x0,
103 .fifo_start = 0x0,
104 .fifo_size = 0x0,
105 .ptr1_reg = DMA2_PTR1,
106 .ptr2_reg = DMA2_PTR2,
107 .cnt1_reg = DMA2_CNT1,
108 .cnt2_reg = DMA2_CNT2,
109 },
110 [SRAM_CH03] = {
69ad6e56
ST
111 .name = "TS1 B",
112 .cmds_start = 0x100A0,
d8d12b43
ST
113 .ctrl_start = 0x10400,
114 .cdt = 0x10580,
69ad6e56
ST
115 .fifo_start = 0x5000,
116 .fifo_size = 0x1000,
d19770e5
ST
117 .ptr1_reg = DMA3_PTR1,
118 .ptr2_reg = DMA3_PTR2,
119 .cnt1_reg = DMA3_CNT1,
120 .cnt2_reg = DMA3_CNT2,
121 },
122 [SRAM_CH04] = {
123 .name = "ch4",
124 .cmds_start = 0x0,
125 .ctrl_start = 0x0,
126 .cdt = 0x0,
127 .fifo_start = 0x0,
128 .fifo_size = 0x0,
129 .ptr1_reg = DMA4_PTR1,
130 .ptr2_reg = DMA4_PTR2,
131 .cnt1_reg = DMA4_CNT1,
132 .cnt2_reg = DMA4_CNT2,
133 },
134 [SRAM_CH05] = {
135 .name = "ch5",
136 .cmds_start = 0x0,
137 .ctrl_start = 0x0,
138 .cdt = 0x0,
139 .fifo_start = 0x0,
140 .fifo_size = 0x0,
141 .ptr1_reg = DMA5_PTR1,
142 .ptr2_reg = DMA5_PTR2,
143 .cnt1_reg = DMA5_CNT1,
144 .cnt2_reg = DMA5_CNT2,
145 },
146 [SRAM_CH06] = {
147 .name = "TS2 C",
148 .cmds_start = 0x10140,
d8d12b43
ST
149 .ctrl_start = 0x10440,
150 .cdt = 0x105e0,
d19770e5
ST
151 .fifo_start = 0x6000,
152 .fifo_size = 0x1000,
153 .ptr1_reg = DMA5_PTR1,
154 .ptr2_reg = DMA5_PTR2,
155 .cnt1_reg = DMA5_CNT1,
156 .cnt2_reg = DMA5_CNT2,
157 },
158 [SRAM_CH07] = {
9e44d632
MM
159 .name = "TV Audio",
160 .cmds_start = 0x10190,
161 .ctrl_start = 0x10480,
162 .cdt = 0x10a00,
163 .fifo_start = 0x7000,
164 .fifo_size = 0x1000,
d19770e5
ST
165 .ptr1_reg = DMA6_PTR1,
166 .ptr2_reg = DMA6_PTR2,
167 .cnt1_reg = DMA6_CNT1,
168 .cnt2_reg = DMA6_CNT2,
169 },
170 [SRAM_CH08] = {
171 .name = "ch8",
172 .cmds_start = 0x0,
173 .ctrl_start = 0x0,
174 .cdt = 0x0,
175 .fifo_start = 0x0,
176 .fifo_size = 0x0,
177 .ptr1_reg = DMA7_PTR1,
178 .ptr2_reg = DMA7_PTR2,
179 .cnt1_reg = DMA7_CNT1,
180 .cnt2_reg = DMA7_CNT2,
181 },
182 [SRAM_CH09] = {
183 .name = "ch9",
184 .cmds_start = 0x0,
185 .ctrl_start = 0x0,
186 .cdt = 0x0,
187 .fifo_start = 0x0,
188 .fifo_size = 0x0,
189 .ptr1_reg = DMA8_PTR1,
190 .ptr2_reg = DMA8_PTR2,
191 .cnt1_reg = DMA8_CNT1,
192 .cnt2_reg = DMA8_CNT2,
193 },
194};
195
7e994302
ST
196static struct sram_channel cx23887_sram_channels[] = {
197 [SRAM_CH01] = {
198 .name = "VID A",
199 .cmds_start = 0x10000,
200 .ctrl_start = 0x105b0,
201 .cdt = 0x107b0,
202 .fifo_start = 0x40,
203 .fifo_size = 0x2800,
204 .ptr1_reg = DMA1_PTR1,
205 .ptr2_reg = DMA1_PTR2,
206 .cnt1_reg = DMA1_CNT1,
207 .cnt2_reg = DMA1_CNT2,
208 },
209 [SRAM_CH02] = {
35045137
ST
210 .name = "VID A (VBI)",
211 .cmds_start = 0x10050,
212 .ctrl_start = 0x105F0,
213 .cdt = 0x10810,
214 .fifo_start = 0x3000,
215 .fifo_size = 0x1000,
7e994302
ST
216 .ptr1_reg = DMA2_PTR1,
217 .ptr2_reg = DMA2_PTR2,
218 .cnt1_reg = DMA2_CNT1,
219 .cnt2_reg = DMA2_CNT2,
220 },
221 [SRAM_CH03] = {
222 .name = "TS1 B",
223 .cmds_start = 0x100A0,
224 .ctrl_start = 0x10630,
225 .cdt = 0x10870,
226 .fifo_start = 0x5000,
227 .fifo_size = 0x1000,
228 .ptr1_reg = DMA3_PTR1,
229 .ptr2_reg = DMA3_PTR2,
230 .cnt1_reg = DMA3_CNT1,
231 .cnt2_reg = DMA3_CNT2,
232 },
233 [SRAM_CH04] = {
234 .name = "ch4",
235 .cmds_start = 0x0,
236 .ctrl_start = 0x0,
237 .cdt = 0x0,
238 .fifo_start = 0x0,
239 .fifo_size = 0x0,
240 .ptr1_reg = DMA4_PTR1,
241 .ptr2_reg = DMA4_PTR2,
242 .cnt1_reg = DMA4_CNT1,
243 .cnt2_reg = DMA4_CNT2,
244 },
245 [SRAM_CH05] = {
246 .name = "ch5",
247 .cmds_start = 0x0,
248 .ctrl_start = 0x0,
249 .cdt = 0x0,
250 .fifo_start = 0x0,
251 .fifo_size = 0x0,
252 .ptr1_reg = DMA5_PTR1,
253 .ptr2_reg = DMA5_PTR2,
254 .cnt1_reg = DMA5_CNT1,
255 .cnt2_reg = DMA5_CNT2,
256 },
257 [SRAM_CH06] = {
258 .name = "TS2 C",
259 .cmds_start = 0x10140,
260 .ctrl_start = 0x10670,
261 .cdt = 0x108d0,
262 .fifo_start = 0x6000,
263 .fifo_size = 0x1000,
264 .ptr1_reg = DMA5_PTR1,
265 .ptr2_reg = DMA5_PTR2,
266 .cnt1_reg = DMA5_CNT1,
267 .cnt2_reg = DMA5_CNT2,
268 },
269 [SRAM_CH07] = {
35045137
ST
270 .name = "TV Audio",
271 .cmds_start = 0x10190,
272 .ctrl_start = 0x106B0,
273 .cdt = 0x10930,
274 .fifo_start = 0x7000,
275 .fifo_size = 0x1000,
7e994302
ST
276 .ptr1_reg = DMA6_PTR1,
277 .ptr2_reg = DMA6_PTR2,
278 .cnt1_reg = DMA6_CNT1,
279 .cnt2_reg = DMA6_CNT2,
280 },
281 [SRAM_CH08] = {
282 .name = "ch8",
283 .cmds_start = 0x0,
284 .ctrl_start = 0x0,
285 .cdt = 0x0,
286 .fifo_start = 0x0,
287 .fifo_size = 0x0,
288 .ptr1_reg = DMA7_PTR1,
289 .ptr2_reg = DMA7_PTR2,
290 .cnt1_reg = DMA7_CNT1,
291 .cnt2_reg = DMA7_CNT2,
292 },
293 [SRAM_CH09] = {
294 .name = "ch9",
295 .cmds_start = 0x0,
296 .ctrl_start = 0x0,
297 .cdt = 0x0,
298 .fifo_start = 0x0,
299 .fifo_size = 0x0,
300 .ptr1_reg = DMA8_PTR1,
301 .ptr2_reg = DMA8_PTR2,
302 .cnt1_reg = DMA8_CNT1,
303 .cnt2_reg = DMA8_CNT2,
304 },
305};
306
ada73eee 307static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
dbe83a3b
AW
308{
309 unsigned long flags;
310 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311
312 dev->pci_irqmask |= mask;
313
314 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
315}
316
317void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
318{
319 unsigned long flags;
320 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321
322 dev->pci_irqmask |= mask;
323 cx_set(PCI_INT_MSK, mask);
324
325 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
326}
327
328void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
329{
330 u32 v;
331 unsigned long flags;
332 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
333
334 v = mask & dev->pci_irqmask;
335 if (v)
336 cx_set(PCI_INT_MSK, v);
337
338 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
339}
340
341static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
342{
343 cx23885_irq_enable(dev, 0xffffffff);
344}
345
346void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
347{
348 unsigned long flags;
349 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
350
351 cx_clear(PCI_INT_MSK, mask);
352
353 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
354}
355
356static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
357{
358 cx23885_irq_disable(dev, 0xffffffff);
359}
360
361void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
362{
363 unsigned long flags;
364 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
365
366 dev->pci_irqmask &= ~mask;
367 cx_clear(PCI_INT_MSK, mask);
368
369 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
370}
371
372static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
373{
374 u32 v;
375 unsigned long flags;
376 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
377
378 v = cx_read(PCI_INT_MSK);
379
380 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
381 return v;
382}
383
d19770e5
ST
384static int cx23885_risc_decode(u32 risc)
385{
386 static char *instr[16] = {
b1b81f1d
ST
387 [RISC_SYNC >> 28] = "sync",
388 [RISC_WRITE >> 28] = "write",
389 [RISC_WRITEC >> 28] = "writec",
390 [RISC_READ >> 28] = "read",
391 [RISC_READC >> 28] = "readc",
392 [RISC_JUMP >> 28] = "jump",
393 [RISC_SKIP >> 28] = "skip",
394 [RISC_WRITERM >> 28] = "writerm",
395 [RISC_WRITECM >> 28] = "writecm",
396 [RISC_WRITECR >> 28] = "writecr",
d19770e5
ST
397 };
398 static int incr[16] = {
b1b81f1d
ST
399 [RISC_WRITE >> 28] = 3,
400 [RISC_JUMP >> 28] = 3,
401 [RISC_SKIP >> 28] = 1,
402 [RISC_SYNC >> 28] = 1,
403 [RISC_WRITERM >> 28] = 3,
404 [RISC_WRITECM >> 28] = 3,
405 [RISC_WRITECR >> 28] = 4,
d19770e5
ST
406 };
407 static char *bits[] = {
408 "12", "13", "14", "resync",
409 "cnt0", "cnt1", "18", "19",
410 "20", "21", "22", "23",
411 "irq1", "irq2", "eol", "sol",
412 };
413 int i;
414
09f8be26 415 printk(KERN_DEBUG "0x%08x [ %s", risc,
d19770e5 416 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
44a6481d 417 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
d19770e5 418 if (risc & (1 << (i + 12)))
e39682b5
MCC
419 pr_cont(" %s", bits[i]);
420 pr_cont(" count=%d ]\n", risc & 0xfff);
d19770e5
ST
421 return incr[risc >> 28] ? incr[risc >> 28] : 1;
422}
423
453afdd9 424static void cx23885_wakeup(struct cx23885_tsport *port,
39e75cfe 425 struct cx23885_dmaqueue *q, u32 count)
d19770e5 426{
d19770e5 427 struct cx23885_buffer *buf;
9a7dc2b0
BL
428 int count_delta;
429 int max_buf_done = 5; /* service maximum five buffers */
430
431 do {
432 if (list_empty(&q->active))
433 return;
434 buf = list_entry(q->active.next,
435 struct cx23885_buffer, queue);
436
437 buf->vb.vb2_buf.timestamp = ktime_get_ns();
438 buf->vb.sequence = q->count++;
439 if (count != (q->count % 65536)) {
440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
441 buf->vb.vb2_buf.index, count, q->count);
442 } else {
443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
444 buf->vb.vb2_buf.index, count, q->count);
445 }
446 list_del(&buf->queue);
447 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
448 max_buf_done--;
449 /* count register is 16 bits so apply modulo appropriately */
450 count_delta = ((int)count - (int)(q->count % 65536));
451 } while ((count_delta > 0) && (max_buf_done > 0));
d19770e5 452}
d19770e5 453
7b888014 454int cx23885_sram_channel_setup(struct cx23885_dev *dev,
39e75cfe
AB
455 struct sram_channel *ch,
456 unsigned int bpl, u32 risc)
d19770e5 457{
44a6481d 458 unsigned int i, lines;
d19770e5
ST
459 u32 cdt;
460
9c8ced51 461 if (ch->cmds_start == 0) {
22b4e64f 462 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
44a6481d 463 ch->name);
d19770e5
ST
464 cx_write(ch->ptr1_reg, 0);
465 cx_write(ch->ptr2_reg, 0);
466 cx_write(ch->cnt2_reg, 0);
467 cx_write(ch->cnt1_reg, 0);
468 return 0;
469 } else {
22b4e64f 470 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
44a6481d 471 ch->name);
d19770e5
ST
472 }
473
474 bpl = (bpl + 7) & ~7; /* alignment */
475 cdt = ch->cdt;
476 lines = ch->fifo_size / bpl;
477 if (lines > 6)
478 lines = 6;
479 BUG_ON(lines < 2);
480
453afdd9
HV
481 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
482 cx_write(8 + 4, 12);
86ecc027 483 cx_write(8 + 8, 0);
d19770e5
ST
484
485 /* write CDT */
486 for (i = 0; i < lines; i++) {
22b4e64f 487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
44a6481d 488 ch->fifo_start + bpl*i);
d19770e5
ST
489 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i + 4, 0);
491 cx_write(cdt + 16*i + 8, 0);
492 cx_write(cdt + 16*i + 12, 0);
493 }
494
495 /* write CMDS */
496 if (ch->jumponly)
9c8ced51 497 cx_write(ch->cmds_start + 0, 8);
d19770e5 498 else
9c8ced51 499 cx_write(ch->cmds_start + 0, risc);
d19770e5
ST
500 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
501 cx_write(ch->cmds_start + 8, cdt);
502 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
503 cx_write(ch->cmds_start + 16, ch->ctrl_start);
504 if (ch->jumponly)
9c8ced51 505 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
d19770e5
ST
506 else
507 cx_write(ch->cmds_start + 20, 64 >> 2);
508 for (i = 24; i < 80; i += 4)
509 cx_write(ch->cmds_start + i, 0);
510
511 /* fill registers */
512 cx_write(ch->ptr1_reg, ch->fifo_start);
513 cx_write(ch->ptr2_reg, cdt);
514 cx_write(ch->cnt2_reg, (lines*16) >> 3);
9c8ced51 515 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
d19770e5 516
9c8ced51 517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
e133be0f 518 dev->bridge,
d19770e5
ST
519 ch->name,
520 bpl,
521 lines);
522
523 return 0;
524}
525
7b888014 526void cx23885_sram_channel_dump(struct cx23885_dev *dev,
39e75cfe 527 struct sram_channel *ch)
d19770e5
ST
528{
529 static char *name[] = {
530 "init risc lo",
531 "init risc hi",
532 "cdt base",
533 "cdt size",
534 "iq base",
535 "iq size",
536 "risc pc lo",
537 "risc pc hi",
538 "iq wr ptr",
539 "iq rd ptr",
540 "cdt current",
541 "pci target lo",
542 "pci target hi",
543 "line / byte",
544 };
545 u32 risc;
44a6481d 546 unsigned int i, j, n;
d19770e5 547
e39682b5
MCC
548 pr_warn("%s: %s - dma channel status dump\n",
549 dev->name, ch->name);
d19770e5 550 for (i = 0; i < ARRAY_SIZE(name); i++)
e39682b5
MCC
551 pr_warn("%s: cmds: %-15s: 0x%08x\n",
552 dev->name, name[i],
553 cx_read(ch->cmds_start + 4*i));
d19770e5
ST
554
555 for (i = 0; i < 4; i++) {
44a6481d 556 risc = cx_read(ch->cmds_start + 4 * (i + 14));
e39682b5 557 pr_warn("%s: risc%d: ", dev->name, i);
d19770e5
ST
558 cx23885_risc_decode(risc);
559 }
560 for (i = 0; i < (64 >> 2); i += n) {
44a6481d
MK
561 risc = cx_read(ch->ctrl_start + 4 * i);
562 /* No consideration for bits 63-32 */
563
e39682b5
MCC
564 pr_warn("%s: (0x%08x) iq %x: ", dev->name,
565 ch->ctrl_start + 4 * i, i);
d19770e5
ST
566 n = cx23885_risc_decode(risc);
567 for (j = 1; j < n; j++) {
44a6481d 568 risc = cx_read(ch->ctrl_start + 4 * (i + j));
e39682b5
MCC
569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
570 dev->name, i+j, risc, j);
d19770e5
ST
571 }
572 }
573
e39682b5
MCC
574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
575 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
577 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
578 pr_warn("%s: ptr1_reg: 0x%08x\n",
579 dev->name, cx_read(ch->ptr1_reg));
580 pr_warn("%s: ptr2_reg: 0x%08x\n",
581 dev->name, cx_read(ch->ptr2_reg));
582 pr_warn("%s: cnt1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->cnt1_reg));
584 pr_warn("%s: cnt2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->cnt2_reg));
d19770e5
ST
586}
587
39e75cfe 588static void cx23885_risc_disasm(struct cx23885_tsport *port,
4d63a25c 589 struct cx23885_riscmem *risc)
d19770e5
ST
590{
591 struct cx23885_dev *dev = port->dev;
44a6481d 592 unsigned int i, j, n;
d19770e5 593
e39682b5 594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
d19770e5
ST
595 dev->name, risc->cpu, (unsigned long)risc->dma);
596 for (i = 0; i < (risc->size >> 2); i += n) {
e39682b5 597 pr_info("%s: %04d: ", dev->name, i);
86ecc027 598 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
d19770e5 599 for (j = 1; j < n; j++)
e39682b5
MCC
600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
601 dev->name, i + j, risc->cpu[i + j], j);
86ecc027 602 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
d19770e5
ST
603 break;
604 }
605}
606
95f408bb
BL
607static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
608{
4bd46aa0
BL
609 uint32_t reg1_val, reg2_val;
610
611 if (!dev->need_dma_reset)
612 return;
613
614 reg1_val = cx_read(TC_REQ); /* read-only */
615 reg2_val = cx_read(TC_REQ_SET);
95f408bb
BL
616
617 if (reg1_val && reg2_val) {
618 cx_write(TC_REQ, reg1_val);
619 cx_write(TC_REQ_SET, reg2_val);
620 cx_read(VID_B_DMA);
621 cx_read(VBI_B_DMA);
622 cx_read(VID_C_DMA);
623 cx_read(VBI_C_DMA);
624
625 dev_info(&dev->pci->dev,
626 "dma in progress detected 0x%08x 0x%08x, clearing\n",
627 reg1_val, reg2_val);
628 }
629}
630
39e75cfe 631static void cx23885_shutdown(struct cx23885_dev *dev)
d19770e5
ST
632{
633 /* disable RISC controller */
634 cx_write(DEV_CNTRL2, 0);
635
636 /* Disable all IR activity */
637 cx_write(IR_CNTRL_REG, 0);
638
639 /* Disable Video A/B activity */
640 cx_write(VID_A_DMA_CTL, 0);
641 cx_write(VID_B_DMA_CTL, 0);
642 cx_write(VID_C_DMA_CTL, 0);
643
644 /* Disable Audio activity */
645 cx_write(AUD_INT_DMA_CTL, 0);
646 cx_write(AUD_EXT_DMA_CTL, 0);
647
648 /* Disable Serial port */
649 cx_write(UART_CTL, 0);
650
651 /* Disable Interrupts */
dbe83a3b 652 cx23885_irq_disable_all(dev);
d19770e5
ST
653 cx_write(VID_A_INT_MSK, 0);
654 cx_write(VID_B_INT_MSK, 0);
655 cx_write(VID_C_INT_MSK, 0);
656 cx_write(AUDIO_INT_INT_MSK, 0);
657 cx_write(AUDIO_EXT_INT_MSK, 0);
658
659}
660
39e75cfe 661static void cx23885_reset(struct cx23885_dev *dev)
d19770e5 662{
22b4e64f 663 dprintk(1, "%s()\n", __func__);
d19770e5
ST
664
665 cx23885_shutdown(dev);
666
667 cx_write(PCI_INT_STAT, 0xffffffff);
668 cx_write(VID_A_INT_STAT, 0xffffffff);
669 cx_write(VID_B_INT_STAT, 0xffffffff);
670 cx_write(VID_C_INT_STAT, 0xffffffff);
671 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
672 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
673 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
ecda5966 674 cx_write(PAD_CTRL, 0x00500300);
d19770e5 675
95f408bb
BL
676 /* clear dma in progress */
677 cx23885_clear_bridge_error(dev);
71be8dee 678 msleep(100);
d19770e5 679
7b888014
ST
680 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
681 720*4, 0);
682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
684 188*4, 0);
685 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
686 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
687 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
688 188*4, 0);
689 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
690 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
691 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
d19770e5 692
a6a3f140 693 cx23885_gpio_setup(dev);
95f408bb
BL
694
695 cx23885_irq_get_mask(dev);
696
697 /* clear dma in progress */
698 cx23885_clear_bridge_error(dev);
d19770e5
ST
699}
700
701
702static int cx23885_pci_quirks(struct cx23885_dev *dev)
703{
22b4e64f 704 dprintk(1, "%s()\n", __func__);
d19770e5 705
2df9a4c2
ST
706 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
707 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
708 * occur on the cx23887 bridge.
709 */
9c8ced51 710 if (dev->bridge == CX23885_BRIDGE_885)
d19770e5 711 cx_clear(RDR_TLCTL0, 1 << 4);
4823e9ee 712
95f408bb
BL
713 /* clear dma in progress */
714 cx23885_clear_bridge_error(dev);
d19770e5
ST
715 return 0;
716}
717
718static int get_resources(struct cx23885_dev *dev)
719{
9c8ced51
ST
720 if (request_mem_region(pci_resource_start(dev->pci, 0),
721 pci_resource_len(dev->pci, 0),
44a6481d 722 dev->name))
d19770e5
ST
723 return 0;
724
e39682b5
MCC
725 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
726 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
d19770e5
ST
727
728 return -EBUSY;
729}
730
9c8ced51
ST
731static int cx23885_init_tsport(struct cx23885_dev *dev,
732 struct cx23885_tsport *port, int portno)
d19770e5 733{
22b4e64f 734 dprintk(1, "%s(portno=%d)\n", __func__, portno);
a6a3f140
ST
735
736 /* Transport bus init dma queue - Common settings */
737 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
738 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
b1b81f1d
ST
739 port->vld_misc_val = 0x0;
740 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
a6a3f140
ST
741
742 spin_lock_init(&port->slock);
743 port->dev = dev;
744 port->nr = portno;
745
746 INIT_LIST_HEAD(&port->mpegq.active);
d782ffa2 747 mutex_init(&port->frontends.lock);
7bdf84fc 748 INIT_LIST_HEAD(&port->frontends.felist);
d782ffa2
ST
749 port->frontends.active_fe_id = 0;
750
a739a7e4
ST
751 /* This should be hardcoded allow a single frontend
752 * attachment to this tsport, keeping the -dvb.c
753 * code clean and safe.
754 */
9c8ced51 755 if (!port->num_frontends)
a739a7e4
ST
756 port->num_frontends = 1;
757
9c8ced51 758 switch (portno) {
a6a3f140
ST
759 case 1:
760 port->reg_gpcnt = VID_B_GPCNT;
761 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
762 port->reg_dma_ctl = VID_B_DMA_CTL;
763 port->reg_lngth = VID_B_LNGTH;
764 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
765 port->reg_gen_ctrl = VID_B_GEN_CTL;
766 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
767 port->reg_sop_status = VID_B_SOP_STATUS;
768 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
769 port->reg_vld_misc = VID_B_VLD_MISC;
770 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
771 port->reg_src_sel = VID_B_SRC_SEL;
772 port->reg_ts_int_msk = VID_B_INT_MSK;
b1b81f1d 773 port->reg_ts_int_stat = VID_B_INT_STAT;
a6a3f140
ST
774 port->sram_chno = SRAM_CH03; /* VID_B */
775 port->pci_irqmask = 0x02; /* VID_B bit1 */
776 break;
777 case 2:
778 port->reg_gpcnt = VID_C_GPCNT;
779 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
780 port->reg_dma_ctl = VID_C_DMA_CTL;
781 port->reg_lngth = VID_C_LNGTH;
782 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
783 port->reg_gen_ctrl = VID_C_GEN_CTL;
784 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
785 port->reg_sop_status = VID_C_SOP_STATUS;
786 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
787 port->reg_vld_misc = VID_C_VLD_MISC;
788 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
789 port->reg_src_sel = 0;
790 port->reg_ts_int_msk = VID_C_INT_MSK;
791 port->reg_ts_int_stat = VID_C_INT_STAT;
792 port->sram_chno = SRAM_CH06; /* VID_C */
793 port->pci_irqmask = 0x04; /* VID_C bit2 */
d19770e5 794 break;
a6a3f140
ST
795 default:
796 BUG();
d19770e5
ST
797 }
798
799 return 0;
800}
801
0ac5881a
ST
802static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
803{
804 switch (cx_read(RDR_CFG2) & 0xff) {
805 case 0x00:
806 /* cx23885 */
807 dev->hwrevision = 0xa0;
808 break;
809 case 0x01:
810 /* CX23885-12Z */
811 dev->hwrevision = 0xa1;
812 break;
813 case 0x02:
25ea66e2 814 /* CX23885-13Z/14Z */
0ac5881a
ST
815 dev->hwrevision = 0xb0;
816 break;
817 case 0x03:
25ea66e2
ST
818 if (dev->pci->device == 0x8880) {
819 /* CX23888-21Z/22Z */
820 dev->hwrevision = 0xc0;
821 } else {
822 /* CX23885-14Z */
823 dev->hwrevision = 0xa4;
824 }
825 break;
826 case 0x04:
827 if (dev->pci->device == 0x8880) {
828 /* CX23888-31Z */
829 dev->hwrevision = 0xd0;
830 } else {
831 /* CX23885-15Z, CX23888-31Z */
832 dev->hwrevision = 0xa5;
833 }
0ac5881a
ST
834 break;
835 case 0x0e:
836 /* CX23887-15Z */
837 dev->hwrevision = 0xc0;
abe1def4 838 break;
0ac5881a
ST
839 case 0x0f:
840 /* CX23887-14Z */
841 dev->hwrevision = 0xb1;
842 break;
843 default:
e39682b5
MCC
844 pr_err("%s() New hardware revision found 0x%x\n",
845 __func__, dev->hwrevision);
0ac5881a
ST
846 }
847 if (dev->hwrevision)
e39682b5 848 pr_info("%s() Hardware revision = 0x%02x\n",
22b4e64f 849 __func__, dev->hwrevision);
0ac5881a 850 else
e39682b5
MCC
851 pr_err("%s() Hardware revision unknown 0x%x\n",
852 __func__, dev->hwrevision);
0ac5881a
ST
853}
854
29f8a0a5
AW
855/* Find the first v4l2_subdev member of the group id in hw */
856struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
857{
858 struct v4l2_subdev *result = NULL;
859 struct v4l2_subdev *sd;
860
861 spin_lock(&dev->v4l2_dev.lock);
862 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
863 if (sd->grp_id == hw) {
864 result = sd;
865 break;
866 }
867 }
868 spin_unlock(&dev->v4l2_dev.lock);
869 return result;
870}
871
d19770e5
ST
872static int cx23885_dev_setup(struct cx23885_dev *dev)
873{
874 int i;
875
dbe83a3b 876 spin_lock_init(&dev->pci_irqmask_lock);
af7f388e 877 spin_lock_init(&dev->slock);
dbe83a3b 878
d19770e5 879 mutex_init(&dev->lock);
8386c27f 880 mutex_init(&dev->gpio_lock);
d19770e5
ST
881
882 atomic_inc(&dev->refcount);
883
884 dev->nr = cx23885_devcount++;
579f1163
ST
885 sprintf(dev->name, "cx23885[%d]", dev->nr);
886
579f1163 887 /* Configure the internal memory */
9c8ced51 888 if (dev->pci->device == 0x8880) {
5da1a682
BL
889 /* Could be 887 or 888, assume an 888 default */
890 dev->bridge = CX23885_BRIDGE_888;
c7712613 891 /* Apply a sensible clock frequency for the PCIe bridge */
5da1a682 892 dev->clk_freq = 50000000;
7e994302 893 dev->sram_channels = cx23887_sram_channels;
579f1163 894 } else
9c8ced51 895 if (dev->pci->device == 0x8852) {
579f1163 896 dev->bridge = CX23885_BRIDGE_885;
c7712613
ST
897 /* Apply a sensible clock frequency for the PCIe bridge */
898 dev->clk_freq = 28000000;
7e994302 899 dev->sram_channels = cx23885_sram_channels;
579f1163
ST
900 } else
901 BUG();
902
903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
22b4e64f 904 __func__, dev->bridge);
579f1163
ST
905
906 /* board config */
907 dev->board = UNSET;
908 if (card[dev->nr] < cx23885_bcount)
909 dev->board = card[dev->nr];
910 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
911 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
912 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
913 dev->board = cx23885_subids[i].card;
914 if (UNSET == dev->board) {
915 dev->board = CX23885_BOARD_UNKNOWN;
916 cx23885_card_list(dev);
917 }
918
c00ba2c1
BL
919 if (dev->pci->device == 0x8852) {
920 /* no DIF on cx23885, so no analog tuner support possible */
921 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
922 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
923 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
924 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
925 }
926
c7712613
ST
927 /* If the user specific a clk freq override, apply it */
928 if (cx23885_boards[dev->board].clk_freq > 0)
929 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
930
779c79d4
BL
931 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
932 dev->pci->subsystem_device == 0x7137) {
933 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
934 * with an 888, and a 25Mhz crystal, instead of the
935 * usual third overtone 50Mhz. The default clock rate must
936 * be overridden so the cx25840 is properly configured
937 */
938 dev->clk_freq = 25000000;
939 }
940
d19770e5
ST
941 dev->pci_bus = dev->pci->bus->number;
942 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dbe83a3b 943 cx23885_irq_add(dev, 0x001f00);
d19770e5
ST
944
945 /* External Master 1 Bus */
946 dev->i2c_bus[0].nr = 0;
947 dev->i2c_bus[0].dev = dev;
948 dev->i2c_bus[0].reg_stat = I2C1_STAT;
949 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
950 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
951 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
952 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
953 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
954
955 /* External Master 2 Bus */
956 dev->i2c_bus[1].nr = 1;
957 dev->i2c_bus[1].dev = dev;
958 dev->i2c_bus[1].reg_stat = I2C2_STAT;
959 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
960 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
961 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
962 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
963 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
964
965 /* Internal Master 3 Bus */
966 dev->i2c_bus[2].nr = 2;
967 dev->i2c_bus[2].dev = dev;
968 dev->i2c_bus[2].reg_stat = I2C3_STAT;
969 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
a2129af5 970 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
d19770e5
ST
971 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
972 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
973 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
974
b1b81f1d
ST
975 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
976 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
a6a3f140 977 cx23885_init_tsport(dev, &dev->ts1, 1);
579f1163 978
b1b81f1d
ST
979 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
980 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
a6a3f140 981 cx23885_init_tsport(dev, &dev->ts2, 2);
d19770e5 982
d19770e5 983 if (get_resources(dev) < 0) {
e39682b5 984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
44a6481d
MK
985 dev->name, dev->pci->subsystem_vendor,
986 dev->pci->subsystem_device);
d19770e5
ST
987
988 cx23885_devcount--;
fcf94c89 989 return -ENODEV;
d19770e5
ST
990 }
991
d19770e5 992 /* PCIe stuff */
9c8ced51
ST
993 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
994 pci_resource_len(dev->pci, 0));
d19770e5
ST
995
996 dev->bmmio = (u8 __iomem *)dev->lmmio;
997
e39682b5
MCC
998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
999 dev->name, dev->pci->subsystem_vendor,
1000 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
1001 dev->board, card[dev->nr] == dev->board ?
1002 "insmod option" : "autodetected");
d19770e5 1003
4823e9ee
ST
1004 cx23885_pci_quirks(dev);
1005
7b888014
ST
1006 /* Assume some sensible defaults */
1007 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1008 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
557f48d5 1009 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
7b888014
ST
1010 dev->radio_type = cx23885_boards[dev->board].radio_type;
1011 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1012
557f48d5
IL
1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1014 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
7b888014 1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
22b4e64f 1016 __func__, dev->radio_type, dev->radio_addr);
7b888014 1017
f659c513
ST
1018 /* The cx23417 encoder has GPIO's that need to be initialised
1019 * before DVB, so that demodulators and tuners are out of
1020 * reset before DVB uses them.
1021 */
1022 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1023 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1024 cx23885_mc417_init(dev);
1025
d19770e5
ST
1026 /* init hardware */
1027 cx23885_reset(dev);
1028
1029 cx23885_i2c_register(&dev->i2c_bus[0]);
1030 cx23885_i2c_register(&dev->i2c_bus[1]);
1031 cx23885_i2c_register(&dev->i2c_bus[2]);
d19770e5 1032 cx23885_card_setup(dev);
3aab15af 1033 call_all(dev, tuner, standby);
d19770e5
ST
1034 cx23885_ir_init(dev);
1035
6c43a217
HV
1036 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1037 /*
1038 * GPIOs 9/8 are input detection bits for the breakout video
1039 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1040 * this gpios are pulled high. Make sure these GPIOs are marked
1041 * as inputs.
1042 */
1043 cx23885_gpio_enable(dev, 0x300, 0);
1044 }
1045
7b888014
ST
1046 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1047 if (cx23885_video_register(dev) < 0) {
e39682b5 1048 pr_err("%s() Failed to register analog video adapters on VID_A\n",
07ab29e1 1049 __func__);
7b888014
ST
1050 }
1051 }
1052
1053 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
10d0dcd7
IL
1054 if (cx23885_boards[dev->board].num_fds_portb)
1055 dev->ts1.num_frontends =
1056 cx23885_boards[dev->board].num_fds_portb;
a6a3f140 1057 if (cx23885_dvb_register(&dev->ts1) < 0) {
e39682b5 1058 pr_err("%s() Failed to register dvb adapters on VID_B\n",
22b4e64f 1059 __func__);
a6a3f140 1060 }
b1b81f1d
ST
1061 } else
1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1063 if (cx23885_417_register(dev) < 0) {
e39682b5 1064 pr_err("%s() Failed to register 417 on VID_B\n",
b1b81f1d
ST
1065 __func__);
1066 }
579f1163
ST
1067 }
1068
7b888014 1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
10d0dcd7
IL
1070 if (cx23885_boards[dev->board].num_fds_portc)
1071 dev->ts2.num_frontends =
1072 cx23885_boards[dev->board].num_fds_portc;
a6a3f140 1073 if (cx23885_dvb_register(&dev->ts2) < 0) {
e39682b5 1074 pr_err("%s() Failed to register dvb on VID_C\n",
b1b81f1d
ST
1075 __func__);
1076 }
1077 } else
1078 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1079 if (cx23885_417_register(dev) < 0) {
e39682b5 1080 pr_err("%s() Failed to register 417 on VID_C\n",
22b4e64f 1081 __func__);
a6a3f140 1082 }
d19770e5
ST
1083 }
1084
0ac5881a
ST
1085 cx23885_dev_checkrevision(dev);
1086
702dd790
IL
1087 /* disable MSI for NetUP cards, otherwise CI is not working */
1088 if (cx23885_boards[dev->board].ci_type > 0)
1089 cx_clear(RDR_RDRCTL1, 1 << 8);
1090
7b134e85
IL
1091 switch (dev->board) {
1092 case CX23885_BOARD_TEVII_S470:
1093 case CX23885_BOARD_TEVII_S471:
1094 cx_clear(RDR_RDRCTL1, 1 << 8);
1095 break;
1096 }
1097
d19770e5 1098 return 0;
d19770e5
ST
1099}
1100
39e75cfe 1101static void cx23885_dev_unregister(struct cx23885_dev *dev)
d19770e5 1102{
9c8ced51
ST
1103 release_mem_region(pci_resource_start(dev->pci, 0),
1104 pci_resource_len(dev->pci, 0));
d19770e5
ST
1105
1106 if (!atomic_dec_and_test(&dev->refcount))
1107 return;
1108
7b888014
ST
1109 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1110 cx23885_video_unregister(dev);
1111
b1b81f1d 1112 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
a6a3f140
ST
1113 cx23885_dvb_unregister(&dev->ts1);
1114
b1b81f1d
ST
1115 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1116 cx23885_417_unregister(dev);
1117
1118 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
a6a3f140
ST
1119 cx23885_dvb_unregister(&dev->ts2);
1120
b1b81f1d
ST
1121 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1122 cx23885_417_unregister(dev);
1123
d19770e5
ST
1124 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1125 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1126 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1127
1128 iounmap(dev->lmmio);
1129}
1130
9c8ced51 1131static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
44a6481d
MK
1132 unsigned int offset, u32 sync_line,
1133 unsigned int bpl, unsigned int padding,
453afdd9 1134 unsigned int lines, unsigned int lpi, bool jump)
d19770e5
ST
1135{
1136 struct scatterlist *sg;
9e44d632 1137 unsigned int line, todo, sol;
d19770e5 1138
453afdd9
HV
1139
1140 if (jump) {
1141 *(rp++) = cpu_to_le32(RISC_JUMP);
1142 *(rp++) = cpu_to_le32(0);
1143 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1144 }
1145
d19770e5
ST
1146 /* sync instruction */
1147 if (sync_line != NO_SYNC_LINE)
1148 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1149
1150 /* scan lines */
1151 sg = sglist;
1152 for (line = 0; line < lines; line++) {
1153 while (offset && offset >= sg_dma_len(sg)) {
1154 offset -= sg_dma_len(sg);
7675fe99 1155 sg = sg_next(sg);
d19770e5 1156 }
9e44d632
MM
1157
1158 if (lpi && line > 0 && !(line % lpi))
1159 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1160 else
1161 sol = RISC_SOL;
1162
d19770e5
ST
1163 if (bpl <= sg_dma_len(sg)-offset) {
1164 /* fits into current chunk */
9e44d632 1165 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
9c8ced51
ST
1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1168 offset += bpl;
d19770e5
ST
1169 } else {
1170 /* scanline needs to be split */
1171 todo = bpl;
9e44d632 1172 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
d19770e5 1173 (sg_dma_len(sg)-offset));
9c8ced51
ST
1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1176 todo -= (sg_dma_len(sg)-offset);
1177 offset = 0;
7675fe99 1178 sg = sg_next(sg);
d19770e5 1179 while (todo > sg_dma_len(sg)) {
9c8ced51 1180 *(rp++) = cpu_to_le32(RISC_WRITE|
d19770e5 1181 sg_dma_len(sg));
9c8ced51
ST
1182 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1183 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5 1184 todo -= sg_dma_len(sg);
7675fe99 1185 sg = sg_next(sg);
d19770e5 1186 }
9c8ced51
ST
1187 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1188 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1189 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1190 offset += todo;
1191 }
1192 offset += padding;
1193 }
1194
1195 return rp;
1196}
1197
4d63a25c 1198int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
7b888014
ST
1199 struct scatterlist *sglist, unsigned int top_offset,
1200 unsigned int bottom_offset, unsigned int bpl,
1201 unsigned int padding, unsigned int lines)
1202{
1203 u32 instructions, fields;
d8eaa58b 1204 __le32 *rp;
7b888014
ST
1205
1206 fields = 0;
1207 if (UNSET != top_offset)
1208 fields++;
1209 if (UNSET != bottom_offset)
1210 fields++;
1211
1212 /* estimate risc mem: worst case is one write per page border +
1213 one write per scan line + syncs + jump (all 2 dwords). Padding
1214 can cause next bpl to start close to a page border. First DMA
1215 region may be smaller than PAGE_SIZE */
1216 /* write and jump need and extra dword */
9c8ced51
ST
1217 instructions = fields * (1 + ((bpl + padding) * lines)
1218 / PAGE_SIZE + lines);
453afdd9 1219 instructions += 5;
4d63a25c
HV
1220 risc->size = instructions * 12;
1221 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1222 if (risc->cpu == NULL)
1223 return -ENOMEM;
7b888014
ST
1224
1225 /* write risc instructions */
1226 rp = risc->cpu;
1227 if (UNSET != top_offset)
1228 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1229 bpl, padding, lines, 0, true);
7b888014
ST
1230 if (UNSET != bottom_offset)
1231 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1232 bpl, padding, lines, 0, UNSET == top_offset);
7b888014
ST
1233
1234 /* save pointer to jmp instruction address */
1235 risc->jmp = rp;
9c8ced51 1236 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
7b888014
ST
1237 return 0;
1238}
d19770e5 1239
9e44d632 1240int cx23885_risc_databuffer(struct pci_dev *pci,
4d63a25c 1241 struct cx23885_riscmem *risc,
39e75cfe
AB
1242 struct scatterlist *sglist,
1243 unsigned int bpl,
9e44d632 1244 unsigned int lines, unsigned int lpi)
d19770e5
ST
1245{
1246 u32 instructions;
d8eaa58b 1247 __le32 *rp;
d19770e5
ST
1248
1249 /* estimate risc mem: worst case is one write per page border +
1250 one write per scan line + syncs + jump (all 2 dwords). Here
1251 there is no padding and no sync. First DMA region may be smaller
1252 than PAGE_SIZE */
1253 /* Jump and write need an extra dword */
1254 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
453afdd9 1255 instructions += 4;
d19770e5 1256
4d63a25c
HV
1257 risc->size = instructions * 12;
1258 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1259 if (risc->cpu == NULL)
1260 return -ENOMEM;
d19770e5
ST
1261
1262 /* write risc instructions */
1263 rp = risc->cpu;
9e44d632 1264 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
453afdd9 1265 bpl, 0, lines, lpi, lpi == 0);
d19770e5
ST
1266
1267 /* save pointer to jmp instruction address */
1268 risc->jmp = rp;
9c8ced51 1269 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
d19770e5
ST
1270 return 0;
1271}
1272
4d63a25c 1273int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
5ab27e6d
ST
1274 struct scatterlist *sglist, unsigned int top_offset,
1275 unsigned int bottom_offset, unsigned int bpl,
1276 unsigned int padding, unsigned int lines)
1277{
1278 u32 instructions, fields;
1279 __le32 *rp;
5ab27e6d
ST
1280
1281 fields = 0;
1282 if (UNSET != top_offset)
1283 fields++;
1284 if (UNSET != bottom_offset)
1285 fields++;
1286
1287 /* estimate risc mem: worst case is one write per page border +
1288 one write per scan line + syncs + jump (all 2 dwords). Padding
1289 can cause next bpl to start close to a page border. First DMA
1290 region may be smaller than PAGE_SIZE */
1291 /* write and jump need and extra dword */
1292 instructions = fields * (1 + ((bpl + padding) * lines)
1293 / PAGE_SIZE + lines);
453afdd9 1294 instructions += 5;
4d63a25c
HV
1295 risc->size = instructions * 12;
1296 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1297 if (risc->cpu == NULL)
1298 return -ENOMEM;
5ab27e6d
ST
1299 /* write risc instructions */
1300 rp = risc->cpu;
1301
1302 /* Sync to line 6, so US CC line 21 will appear in line '12'
1303 * in the userland vbi payload */
1304 if (UNSET != top_offset)
420b2176 1305 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1306 bpl, padding, lines, 0, true);
5ab27e6d
ST
1307
1308 if (UNSET != bottom_offset)
420b2176 1309 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1310 bpl, padding, lines, 0, UNSET == top_offset);
5ab27e6d
ST
1311
1312
1313
1314 /* save pointer to jmp instruction address */
1315 risc->jmp = rp;
1316 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1317 return 0;
1318}
1319
1320
453afdd9 1321void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
d19770e5 1322{
4d63a25c
HV
1323 struct cx23885_riscmem *risc = &buf->risc;
1324
d19770e5 1325 BUG_ON(in_interrupt());
4d63a25c 1326 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
d19770e5
ST
1327}
1328
7b888014
ST
1329static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1330{
1331 struct cx23885_dev *dev = port->dev;
1332
22b4e64f
HH
1333 dprintk(1, "%s() Register Dump\n", __func__);
1334 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
7b888014 1335 cx_read(DEV_CNTRL2));
22b4e64f 1336 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
dbe83a3b 1337 cx23885_irq_get_mask(dev));
22b4e64f 1338 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
7b888014 1339 cx_read(AUDIO_INT_INT_MSK));
22b4e64f 1340 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
7b888014 1341 cx_read(AUD_INT_DMA_CTL));
22b4e64f 1342 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
7b888014 1343 cx_read(AUDIO_EXT_INT_MSK));
22b4e64f 1344 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
7b888014 1345 cx_read(AUD_EXT_DMA_CTL));
22b4e64f 1346 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
7b888014 1347 cx_read(PAD_CTRL));
22b4e64f 1348 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
7b888014 1349 cx_read(ALT_PIN_OUT_SEL));
22b4e64f 1350 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
7b888014 1351 cx_read(GPIO2));
22b4e64f 1352 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
7b888014 1353 port->reg_gpcnt, cx_read(port->reg_gpcnt));
22b4e64f 1354 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1355 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
22b4e64f 1356 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1357 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
7b913908
ST
1358 if (port->reg_src_sel)
1359 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1360 port->reg_src_sel, cx_read(port->reg_src_sel));
22b4e64f 1361 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
7b888014 1362 port->reg_lngth, cx_read(port->reg_lngth));
22b4e64f 1363 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1364 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
22b4e64f 1365 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1366 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
22b4e64f 1367 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
7b888014 1368 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
22b4e64f 1369 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
7b888014 1370 port->reg_sop_status, cx_read(port->reg_sop_status));
22b4e64f 1371 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
7b888014 1372 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
22b4e64f 1373 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
7b888014 1374 port->reg_vld_misc, cx_read(port->reg_vld_misc));
22b4e64f 1375 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
7b888014 1376 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
22b4e64f 1377 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
7b888014 1378 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
f72ff638
BL
1379 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__,
1380 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
1381 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__,
1382 cx_read(PCI_INT_STAT));
1383 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__,
1384 cx_read(VID_B_INT_MSTAT));
1385 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__,
1386 cx_read(VID_B_INT_SSTAT));
1387 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__,
1388 cx_read(VID_C_INT_MSTAT));
1389 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__,
1390 cx_read(VID_C_INT_SSTAT));
7b888014
ST
1391}
1392
453afdd9 1393int cx23885_start_dma(struct cx23885_tsport *port,
44a6481d
MK
1394 struct cx23885_dmaqueue *q,
1395 struct cx23885_buffer *buf)
d19770e5
ST
1396{
1397 struct cx23885_dev *dev = port->dev;
a589b665 1398 u32 reg;
d19770e5 1399
22b4e64f 1400 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
453afdd9 1401 dev->width, dev->height, dev->field);
d19770e5 1402
95f408bb
BL
1403 /* clear dma in progress */
1404 cx23885_clear_bridge_error(dev);
1405
d8d12b43
ST
1406 /* Stop the fifo and risc engine for this port */
1407 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1408
d19770e5
ST
1409 /* setup fifo + format */
1410 cx23885_sram_channel_setup(dev,
9c8ced51 1411 &dev->sram_channels[port->sram_chno],
44a6481d 1412 port->ts_packet_size, buf->risc.dma);
9c8ced51
ST
1413 if (debug > 5) {
1414 cx23885_sram_channel_dump(dev,
1415 &dev->sram_channels[port->sram_chno]);
d19770e5 1416 cx23885_risc_disasm(port, &buf->risc);
3328e4fb 1417 }
d19770e5
ST
1418
1419 /* write TS length to chip */
453afdd9 1420 cx_write(port->reg_lngth, port->ts_packet_size);
d19770e5 1421
9c8ced51
ST
1422 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1423 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
e39682b5 1424 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
22b4e64f 1425 __func__,
661c7e44 1426 cx23885_boards[dev->board].portb,
9c8ced51 1427 cx23885_boards[dev->board].portc);
d19770e5
ST
1428 return -EINVAL;
1429 }
1430
a589b665
ST
1431 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1432 cx23885_av_clk(dev, 0);
1433
d19770e5
ST
1434 udelay(100);
1435
579f1163 1436 /* If the port supports SRC SELECT, configure it */
9c8ced51 1437 if (port->reg_src_sel)
579f1163
ST
1438 cx_write(port->reg_src_sel, port->src_sel_val);
1439
b1b81f1d 1440 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
d19770e5 1441 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
b1b81f1d 1442 cx_write(port->reg_vld_misc, port->vld_misc_val);
d19770e5
ST
1443 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1444 udelay(100);
1445
9c8ced51 1446 /* NOTE: this is 2 (reserved) for portb, does it matter? */
d19770e5
ST
1447 /* reset counter to zero */
1448 cx_write(port->reg_gpcnt_ctl, 3);
453afdd9 1449 q->count = 0;
d19770e5 1450
52ce27bf
ST
1451 /* Set VIDB pins to input */
1452 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1453 reg = cx_read(PAD_CTRL);
1454 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1455 cx_write(PAD_CTRL, reg);
1456 }
1457
1458 /* Set VIDC pins to input */
1459 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1460 reg = cx_read(PAD_CTRL);
1461 reg &= ~0x4; /* Clear TS2_SOP_OE */
1462 cx_write(PAD_CTRL, reg);
1463 }
1464
1465 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1466
1467 reg = cx_read(PAD_CTRL);
1468 reg = reg & ~0x1; /* Clear TS1_OE */
1469
1470 /* FIXME, bit 2 writing here is questionable */
1471 /* set TS1_SOP_OE and TS1_OE_HI */
1472 reg = reg | 0xa;
1473 cx_write(PAD_CTRL, reg);
1474
ff9d1c01
BL
1475 /* Sets MOE_CLK_DIS to disable MoE clock */
1476 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
a589b665 1477 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
ff9d1c01
BL
1478
1479 /* ALT_GPIO_ALT_SET: GPIO[0]
1480 * IR_ALT_TX_SEL: GPIO[1]
1481 * GPIO1_ALT_SEL: VIP_656_DATA[0]
1482 * GPIO0_ALT_SEL: VIP_656_CLK
1483 */
a589b665
ST
1484 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1485 }
1486
9c8ced51 1487 switch (dev->bridge) {
d19770e5 1488 case CX23885_BRIDGE_885:
3bd40659 1489 case CX23885_BRIDGE_887:
25ea66e2 1490 case CX23885_BRIDGE_888:
d19770e5 1491 /* enable irqs */
9c8ced51 1492 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
95f408bb
BL
1493 /* clear dma in progress */
1494 cx23885_clear_bridge_error(dev);
d19770e5
ST
1495 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1496 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
95f408bb
BL
1497
1498 /* clear dma in progress */
1499 cx23885_clear_bridge_error(dev);
dbe83a3b
AW
1500 cx23885_irq_add(dev, port->pci_irqmask);
1501 cx23885_irq_enable_all(dev);
95f408bb
BL
1502
1503 /* clear dma in progress */
1504 cx23885_clear_bridge_error(dev);
d19770e5 1505 break;
d19770e5 1506 default:
579f1163 1507 BUG();
d19770e5
ST
1508 }
1509
d19770e5 1510 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
95f408bb
BL
1511 /* clear dma in progress */
1512 cx23885_clear_bridge_error(dev);
d19770e5 1513
a589b665
ST
1514 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1515 cx23885_av_clk(dev, 1);
1516
7b888014
ST
1517 if (debug > 4)
1518 cx23885_tsport_reg_dump(port);
1519
95f408bb
BL
1520 cx23885_irq_get_mask(dev);
1521
1522 /* clear dma in progress */
1523 cx23885_clear_bridge_error(dev);
1524
d19770e5
ST
1525 return 0;
1526}
1527
1528static int cx23885_stop_dma(struct cx23885_tsport *port)
1529{
1530 struct cx23885_dev *dev = port->dev;
a589b665 1531 u32 reg;
95f408bb
BL
1532 int delay = 0;
1533 uint32_t reg1_val;
1534 uint32_t reg2_val;
a589b665 1535
22b4e64f 1536 dprintk(1, "%s()\n", __func__);
d19770e5
ST
1537
1538 /* Stop interrupts and DMA */
1539 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1540 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
95f408bb
BL
1541 /* just in case wait for any dma to complete before allowing dealloc */
1542 mdelay(20);
1543 for (delay = 0; delay < 100; delay++) {
1544 reg1_val = cx_read(TC_REQ);
1545 reg2_val = cx_read(TC_REQ_SET);
1546 if (reg1_val == 0 || reg2_val == 0)
1547 break;
1548 mdelay(1);
1549 }
1550 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1551 delay, reg1_val, reg2_val);
d19770e5 1552
52ce27bf 1553 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1554 reg = cx_read(PAD_CTRL);
1555
1556 /* Set TS1_OE */
1557 reg = reg | 0x1;
1558
1559 /* clear TS1_SOP_OE and TS1_OE_HI */
1560 reg = reg & ~0xa;
1561 cx_write(PAD_CTRL, reg);
1562 cx_write(port->reg_src_sel, 0);
1563 cx_write(port->reg_gen_ctrl, 8);
a589b665
ST
1564 }
1565
1566 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1567 cx23885_av_clk(dev, 0);
1568
d19770e5
ST
1569 return 0;
1570}
1571
d19770e5
ST
1572/* ------------------------------------------------------------------ */
1573
453afdd9 1574int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
d19770e5
ST
1575{
1576 struct cx23885_dev *dev = port->dev;
1577 int size = port->ts_packet_size * port->ts_packet_count;
2d700715 1578 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
d19770e5 1579
22b4e64f 1580 dprintk(1, "%s: %p\n", __func__, buf);
2d700715 1581 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
d19770e5 1582 return -EINVAL;
2d700715 1583 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
d19770e5 1584
453afdd9
HV
1585 cx23885_risc_databuffer(dev->pci, &buf->risc,
1586 sgt->sgl,
1587 port->ts_packet_size, port->ts_packet_count, 0);
1588 return 0;
d19770e5
ST
1589}
1590
453afdd9
HV
1591/*
1592 * The risc program for each buffer works as follows: it starts with a simple
1593 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1594 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1595 * the initial JUMP).
1596 *
1597 * This is the risc program of the first buffer to be queued if the active list
1598 * is empty and it just keeps DMAing this buffer without generating any
1599 * interrupts.
1600 *
1601 * If a new buffer is added then the initial JUMP in the code for that buffer
1602 * will generate an interrupt which signals that the previous buffer has been
1603 * DMAed successfully and that it can be returned to userspace.
1604 *
1605 * It also sets the final jump of the previous buffer to the start of the new
1606 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1607 * atomic u32 write, so there is no race condition.
1608 *
1609 * The end-result of all this that you only get an interrupt when a buffer
1610 * is ready, so the control flow is very easy.
1611 */
d19770e5
ST
1612void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1613{
1614 struct cx23885_buffer *prev;
1615 struct cx23885_dev *dev = port->dev;
1616 struct cx23885_dmaqueue *cx88q = &port->mpegq;
453afdd9 1617 unsigned long flags;
d19770e5 1618
453afdd9
HV
1619 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1620 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1621 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
d19770e5
ST
1622 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1623
453afdd9 1624 spin_lock_irqsave(&dev->slock, flags);
d19770e5 1625 if (list_empty(&cx88q->active)) {
453afdd9 1626 list_add_tail(&buf->queue, &cx88q->active);
44a6481d 1627 dprintk(1, "[%p/%d] %s - first active\n",
2d700715 1628 buf, buf->vb.vb2_buf.index, __func__);
d19770e5 1629 } else {
453afdd9 1630 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
44a6481d 1631 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
453afdd9
HV
1632 queue);
1633 list_add_tail(&buf->queue, &cx88q->active);
d19770e5 1634 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
9c8ced51 1635 dprintk(1, "[%p/%d] %s - append to active\n",
2d700715 1636 buf, buf->vb.vb2_buf.index, __func__);
d19770e5 1637 }
453afdd9 1638 spin_unlock_irqrestore(&dev->slock, flags);
d19770e5
ST
1639}
1640
1641/* ----------------------------------------------------------- */
1642
453afdd9 1643static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
d19770e5 1644{
d19770e5
ST
1645 struct cx23885_dmaqueue *q = &port->mpegq;
1646 struct cx23885_buffer *buf;
1647 unsigned long flags;
1648
44a6481d 1649 spin_lock_irqsave(&port->slock, flags);
d19770e5 1650 while (!list_empty(&q->active)) {
44a6481d 1651 buf = list_entry(q->active.next, struct cx23885_buffer,
453afdd9
HV
1652 queue);
1653 list_del(&buf->queue);
2d700715 1654 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
44a6481d 1655 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
2d700715
JS
1656 buf, buf->vb.vb2_buf.index, reason,
1657 (unsigned long)buf->risc.dma);
d19770e5 1658 }
44a6481d 1659 spin_unlock_irqrestore(&port->slock, flags);
d19770e5
ST
1660}
1661
b1b81f1d
ST
1662void cx23885_cancel_buffers(struct cx23885_tsport *port)
1663{
9c8ced51 1664 dprintk(1, "%s()\n", __func__);
d19770e5 1665 cx23885_stop_dma(port);
453afdd9 1666 do_cancel_buffers(port, "cancel");
d19770e5
ST
1667}
1668
b1b81f1d
ST
1669int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1670{
1671 /* FIXME: port1 assumption here. */
1672 struct cx23885_tsport *port = &dev->ts1;
1673 int count = 0;
1674 int handled = 0;
1675
1676 if (status == 0)
1677 return handled;
1678
1679 count = cx_read(port->reg_gpcnt);
1680 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1681 status, cx_read(port->reg_ts_int_msk), count);
1682
1683 if ((status & VID_B_MSK_BAD_PKT) ||
1684 (status & VID_B_MSK_OPC_ERR) ||
1685 (status & VID_B_MSK_VBI_OPC_ERR) ||
1686 (status & VID_B_MSK_SYNC) ||
1687 (status & VID_B_MSK_VBI_SYNC) ||
1688 (status & VID_B_MSK_OF) ||
1689 (status & VID_B_MSK_VBI_OF)) {
e39682b5 1690 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
07ab29e1 1691 dev->name, status);
b1b81f1d
ST
1692 if (status & VID_B_MSK_BAD_PKT)
1693 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1694 if (status & VID_B_MSK_OPC_ERR)
1695 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1696 if (status & VID_B_MSK_VBI_OPC_ERR)
1697 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1698 if (status & VID_B_MSK_SYNC)
1699 dprintk(1, " VID_B_MSK_SYNC\n");
1700 if (status & VID_B_MSK_VBI_SYNC)
1701 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1702 if (status & VID_B_MSK_OF)
1703 dprintk(1, " VID_B_MSK_OF\n");
1704 if (status & VID_B_MSK_VBI_OF)
1705 dprintk(1, " VID_B_MSK_VBI_OF\n");
1706
1707 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1708 cx23885_sram_channel_dump(dev,
1709 &dev->sram_channels[port->sram_chno]);
1710 cx23885_417_check_encoder(dev);
1711 } else if (status & VID_B_MSK_RISCI1) {
1712 dprintk(7, " VID_B_MSK_RISCI1\n");
1713 spin_lock(&port->slock);
1714 cx23885_wakeup(port, &port->mpegq, count);
1715 spin_unlock(&port->slock);
b1b81f1d
ST
1716 }
1717 if (status) {
1718 cx_write(port->reg_ts_int_stat, status);
1719 handled = 1;
1720 }
1721
1722 return handled;
1723}
1724
a6a3f140
ST
1725static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1726{
1727 struct cx23885_dev *dev = port->dev;
1728 int handled = 0;
1729 u32 count;
1730
b1b81f1d
ST
1731 if ((status & VID_BC_MSK_OPC_ERR) ||
1732 (status & VID_BC_MSK_BAD_PKT) ||
1733 (status & VID_BC_MSK_SYNC) ||
9c8ced51
ST
1734 (status & VID_BC_MSK_OF)) {
1735
a6a3f140 1736 if (status & VID_BC_MSK_OPC_ERR)
9c8ced51
ST
1737 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1738 VID_BC_MSK_OPC_ERR);
1739
a6a3f140 1740 if (status & VID_BC_MSK_BAD_PKT)
9c8ced51
ST
1741 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1742 VID_BC_MSK_BAD_PKT);
1743
a6a3f140 1744 if (status & VID_BC_MSK_SYNC)
9c8ced51
ST
1745 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1746 VID_BC_MSK_SYNC);
1747
a6a3f140 1748 if (status & VID_BC_MSK_OF)
9c8ced51
ST
1749 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1750 VID_BC_MSK_OF);
a6a3f140 1751
e39682b5 1752 pr_err("%s: mpeg risc op code error\n", dev->name);
a6a3f140
ST
1753
1754 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
b1b81f1d
ST
1755 cx23885_sram_channel_dump(dev,
1756 &dev->sram_channels[port->sram_chno]);
a6a3f140
ST
1757
1758 } else if (status & VID_BC_MSK_RISCI1) {
1759
1760 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1761
1762 spin_lock(&port->slock);
1763 count = cx_read(port->reg_gpcnt);
1764 cx23885_wakeup(port, &port->mpegq, count);
1765 spin_unlock(&port->slock);
1766
a6a3f140
ST
1767 }
1768 if (status) {
1769 cx_write(port->reg_ts_int_stat, status);
1770 handled = 1;
1771 }
1772
1773 return handled;
1774}
1775
03121f05 1776static irqreturn_t cx23885_irq(int irq, void *dev_id)
d19770e5
ST
1777{
1778 struct cx23885_dev *dev = dev_id;
a6a3f140
ST
1779 struct cx23885_tsport *ts1 = &dev->ts1;
1780 struct cx23885_tsport *ts2 = &dev->ts2;
d19770e5 1781 u32 pci_status, pci_mask;
7b888014 1782 u32 vida_status, vida_mask;
9e44d632 1783 u32 audint_status, audint_mask;
6f074abb 1784 u32 ts1_status, ts1_mask;
d19770e5 1785 u32 ts2_status, ts2_mask;
7b888014 1786 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
9e44d632 1787 int audint_count = 0;
98d109f9 1788 bool subdev_handled;
d19770e5
ST
1789
1790 pci_status = cx_read(PCI_INT_STAT);
dbe83a3b 1791 pci_mask = cx23885_irq_get_mask(dev);
3b8315f3
BL
1792 if ((pci_status & pci_mask) == 0) {
1793 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1794 pci_status, pci_mask);
1795 goto out;
1796 }
1797
7b888014
ST
1798 vida_status = cx_read(VID_A_INT_STAT);
1799 vida_mask = cx_read(VID_A_INT_MSK);
9e44d632
MM
1800 audint_status = cx_read(AUDIO_INT_INT_STAT);
1801 audint_mask = cx_read(AUDIO_INT_INT_MSK);
6f074abb
ST
1802 ts1_status = cx_read(VID_B_INT_STAT);
1803 ts1_mask = cx_read(VID_B_INT_MSK);
d19770e5
ST
1804 ts2_status = cx_read(VID_C_INT_STAT);
1805 ts2_mask = cx_read(VID_C_INT_MSK);
1806
3b8315f3
BL
1807 if (((pci_status & pci_mask) == 0) &&
1808 ((ts2_status & ts2_mask) == 0) &&
1809 ((ts1_status & ts1_mask) == 0))
d19770e5
ST
1810 goto out;
1811
7b888014 1812 vida_count = cx_read(VID_A_GPCNT);
9e44d632 1813 audint_count = cx_read(AUD_INT_A_GPCNT);
a6a3f140
ST
1814 ts1_count = cx_read(ts1->reg_gpcnt);
1815 ts2_count = cx_read(ts2->reg_gpcnt);
7b888014
ST
1816 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1817 pci_status, pci_mask);
1818 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1819 vida_status, vida_mask, vida_count);
9e44d632
MM
1820 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1821 audint_status, audint_mask, audint_count);
7b888014
ST
1822 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1823 ts1_status, ts1_mask, ts1_count);
1824 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1825 ts2_status, ts2_mask, ts2_count);
d19770e5 1826
f59ad611
AW
1827 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1828 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1829 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1830 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1831 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
98d109f9 1832 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
d19770e5
ST
1833
1834 if (pci_status & PCI_MSK_RISC_RD)
9c8ced51
ST
1835 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1836 PCI_MSK_RISC_RD);
1837
d19770e5 1838 if (pci_status & PCI_MSK_RISC_WR)
9c8ced51
ST
1839 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1840 PCI_MSK_RISC_WR);
1841
d19770e5 1842 if (pci_status & PCI_MSK_AL_RD)
9c8ced51
ST
1843 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1844 PCI_MSK_AL_RD);
1845
d19770e5 1846 if (pci_status & PCI_MSK_AL_WR)
9c8ced51
ST
1847 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1848 PCI_MSK_AL_WR);
1849
d19770e5 1850 if (pci_status & PCI_MSK_APB_DMA)
9c8ced51
ST
1851 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1852 PCI_MSK_APB_DMA);
1853
d19770e5 1854 if (pci_status & PCI_MSK_VID_C)
9c8ced51
ST
1855 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1856 PCI_MSK_VID_C);
1857
d19770e5 1858 if (pci_status & PCI_MSK_VID_B)
9c8ced51
ST
1859 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1860 PCI_MSK_VID_B);
1861
d19770e5 1862 if (pci_status & PCI_MSK_VID_A)
9c8ced51
ST
1863 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1864 PCI_MSK_VID_A);
1865
d19770e5 1866 if (pci_status & PCI_MSK_AUD_INT)
9c8ced51
ST
1867 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1868 PCI_MSK_AUD_INT);
1869
d19770e5 1870 if (pci_status & PCI_MSK_AUD_EXT)
9c8ced51
ST
1871 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1872 PCI_MSK_AUD_EXT);
d19770e5 1873
5a23b076
IL
1874 if (pci_status & PCI_MSK_GPIO0)
1875 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1876 PCI_MSK_GPIO0);
1877
1878 if (pci_status & PCI_MSK_GPIO1)
1879 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1880 PCI_MSK_GPIO1);
f59ad611 1881
98d109f9
AW
1882 if (pci_status & PCI_MSK_AV_CORE)
1883 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1884 PCI_MSK_AV_CORE);
1885
f59ad611
AW
1886 if (pci_status & PCI_MSK_IR)
1887 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1888 PCI_MSK_IR);
d19770e5
ST
1889 }
1890
78db8547
IL
1891 if (cx23885_boards[dev->board].ci_type == 1 &&
1892 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1893 handled += netup_ci_slot_status(dev, pci_status);
a26ccc9d 1894
78db8547
IL
1895 if (cx23885_boards[dev->board].ci_type == 2 &&
1896 (pci_status & PCI_MSK_GPIO0))
1897 handled += altera_ci_irq(dev);
5a23b076 1898
7b888014
ST
1899 if (ts1_status) {
1900 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1901 handled += cx23885_irq_ts(ts1, ts1_status);
b1b81f1d
ST
1902 else
1903 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1904 handled += cx23885_irq_417(dev, ts1_status);
7b888014
ST
1905 }
1906
1907 if (ts2_status) {
1908 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1909 handled += cx23885_irq_ts(ts2, ts2_status);
b1b81f1d
ST
1910 else
1911 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1912 handled += cx23885_irq_417(dev, ts2_status);
7b888014 1913 }
6f074abb 1914
7b888014
ST
1915 if (vida_status)
1916 handled += cx23885_video_irq(dev, vida_status);
6f074abb 1917
9e44d632
MM
1918 if (audint_status)
1919 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1920
f59ad611 1921 if (pci_status & PCI_MSK_IR) {
98d109f9 1922 subdev_handled = false;
260e689b 1923 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
98d109f9
AW
1924 pci_status, &subdev_handled);
1925 if (subdev_handled)
1926 handled++;
1927 }
1928
e5514f10
AW
1929 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1930 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
c21412f5 1931 schedule_work(&dev->cx25840_work);
e5514f10 1932 handled++;
f59ad611
AW
1933 }
1934
6f074abb 1935 if (handled)
3b8315f3 1936 cx_write(PCI_INT_STAT, pci_status & pci_mask);
d19770e5
ST
1937out:
1938 return IRQ_RETVAL(handled);
1939}
1940
f59ad611
AW
1941static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1942 unsigned int notification, void *arg)
1943{
1944 struct cx23885_dev *dev;
1945
1946 if (sd == NULL)
1947 return;
1948
1949 dev = to_cx23885(sd->v4l2_dev);
1950
1951 switch (notification) {
e5514f10 1952 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1953 if (sd == dev->sd_ir)
1954 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1955 break;
e5514f10 1956 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1957 if (sd == dev->sd_ir)
1958 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1959 break;
1960 }
1961}
1962
1963static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1964{
e5514f10 1965 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
f59ad611
AW
1966 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1967 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1968 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1969}
1970
6de72bd6 1971static inline int encoder_on_portb(struct cx23885_dev *dev)
6f8bee9b
ST
1972{
1973 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1974}
1975
6de72bd6 1976static inline int encoder_on_portc(struct cx23885_dev *dev)
6f8bee9b
ST
1977{
1978 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1979}
1980
1981/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1982 * registers depending on the board configuration (and whether the
1983 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1984 * be pushed into the correct hardware register, regardless of the
1985 * physical location. Certain registers are shared so we sanity check
1986 * and report errors if we think we're tampering with a GPIo that might
1987 * be assigned to the encoder (and used for the host bus).
1988 *
16790554
MCC
1989 * GPIO 2 through 0 - On the cx23885 bridge
1990 * GPIO 18 through 3 - On the cx23417 host bus interface
1991 * GPIO 23 through 19 - On the cx25840 a/v core
6f8bee9b
ST
1992 */
1993void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1994{
1995 if (mask & 0x7)
1996 cx_set(GP0_IO, mask & 0x7);
1997
1998 if (mask & 0x0007fff8) {
1999 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 2000 pr_err("%s: Setting GPIO on encoder ports\n",
6f8bee9b
ST
2001 dev->name);
2002 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2003 }
2004
2005 /* TODO: 23-19 */
2006 if (mask & 0x00f80000)
e39682b5 2007 pr_info("%s: Unsupported\n", dev->name);
6f8bee9b
ST
2008}
2009
2010void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2011{
2012 if (mask & 0x00000007)
2013 cx_clear(GP0_IO, mask & 0x7);
2014
2015 if (mask & 0x0007fff8) {
2016 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 2017 pr_err("%s: Clearing GPIO moving on encoder ports\n",
6f8bee9b
ST
2018 dev->name);
2019 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2020 }
2021
2022 /* TODO: 23-19 */
2023 if (mask & 0x00f80000)
e39682b5 2024 pr_info("%s: Unsupported\n", dev->name);
6f8bee9b
ST
2025}
2026
09ea33e5
IL
2027u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2028{
2029 if (mask & 0x00000007)
2030 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2031
2032 if (mask & 0x0007fff8) {
2033 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 2034 pr_err("%s: Reading GPIO moving on encoder ports\n",
09ea33e5
IL
2035 dev->name);
2036 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2037 }
2038
2039 /* TODO: 23-19 */
2040 if (mask & 0x00f80000)
e39682b5 2041 pr_info("%s: Unsupported\n", dev->name);
09ea33e5
IL
2042
2043 return 0;
2044}
2045
6f8bee9b
ST
2046void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2047{
2048 if ((mask & 0x00000007) && asoutput)
2049 cx_set(GP0_IO, (mask & 0x7) << 16);
2050 else if ((mask & 0x00000007) && !asoutput)
2051 cx_clear(GP0_IO, (mask & 0x7) << 16);
2052
2053 if (mask & 0x0007fff8) {
2054 if (encoder_on_portb(dev) || encoder_on_portc(dev))
e39682b5 2055 pr_err("%s: Enabling GPIO on encoder ports\n",
6f8bee9b
ST
2056 dev->name);
2057 }
2058
2059 /* MC417_OEN is active low for output, write 1 for an input */
2060 if ((mask & 0x0007fff8) && asoutput)
2061 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2062
2063 else if ((mask & 0x0007fff8) && !asoutput)
2064 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2065
2066 /* TODO: 23-19 */
2067}
2068
4bd46aa0
BL
2069static struct {
2070 int vendor, dev;
2071} const broken_dev_id[] = {
2072 /* According with
2073 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
2074 * 0x1451 is PCI ID for the IOMMU found on Ryzen
2075 */
2076 { PCI_VENDOR_ID_AMD, 0x1451 },
2077};
2078
2079static bool cx23885_does_need_dma_reset(void)
2080{
2081 int i;
2082 struct pci_dev *pdev = NULL;
2083
2084 if (dma_reset_workaround == 0)
2085 return false;
2086 else if (dma_reset_workaround == 2)
2087 return true;
2088
2089 for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
2090 pdev = pci_get_device(broken_dev_id[i].vendor,
2091 broken_dev_id[i].dev, NULL);
2092 if (pdev) {
2093 pci_dev_put(pdev);
2094 return true;
2095 }
2096 }
2097 return false;
2098}
2099
4c62e976
GKH
2100static int cx23885_initdev(struct pci_dev *pci_dev,
2101 const struct pci_device_id *pci_id)
d19770e5
ST
2102{
2103 struct cx23885_dev *dev;
da59a4de 2104 struct v4l2_ctrl_handler *hdl;
d19770e5
ST
2105 int err;
2106
44a6481d 2107 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
d19770e5
ST
2108 if (NULL == dev)
2109 return -ENOMEM;
2110
4bd46aa0
BL
2111 dev->need_dma_reset = cx23885_does_need_dma_reset();
2112
c0714f6c
HV
2113 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2114 if (err < 0)
2115 goto fail_free;
2116
da59a4de
HV
2117 hdl = &dev->ctrl_handler;
2118 v4l2_ctrl_handler_init(hdl, 6);
2119 if (hdl->error) {
2120 err = hdl->error;
2121 goto fail_ctrl;
2122 }
2123 dev->v4l2_dev.ctrl_handler = hdl;
2124
f59ad611
AW
2125 /* Prepare to handle notifications from subdevices */
2126 cx23885_v4l2_dev_notify_init(dev);
2127
d19770e5
ST
2128 /* pci init */
2129 dev->pci = pci_dev;
2130 if (pci_enable_device(pci_dev)) {
2131 err = -EIO;
da59a4de 2132 goto fail_ctrl;
d19770e5
ST
2133 }
2134
2135 if (cx23885_dev_setup(dev) < 0) {
2136 err = -EINVAL;
da59a4de 2137 goto fail_ctrl;
d19770e5
ST
2138 }
2139
2140 /* print pci info */
abd34d8d 2141 dev->pci_rev = pci_dev->revision;
d19770e5 2142 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
e39682b5 2143 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
07ab29e1 2144 dev->name,
d19770e5 2145 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
a589b665
ST
2146 dev->pci_lat,
2147 (unsigned long long)pci_resource_start(pci_dev, 0));
d19770e5
ST
2148
2149 pci_set_master(pci_dev);
1a47de6e
CH
2150 err = pci_set_dma_mask(pci_dev, 0xffffffff);
2151 if (err) {
e39682b5 2152 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2bc46b3a 2153 goto fail_ctrl;
d19770e5
ST
2154 }
2155
d7515b88 2156 err = request_irq(pci_dev->irq, cx23885_irq,
3e018fe4 2157 IRQF_SHARED, dev->name, dev);
d19770e5 2158 if (err < 0) {
e39682b5 2159 pr_err("%s: can't get IRQ %d\n",
d19770e5
ST
2160 dev->name, pci_dev->irq);
2161 goto fail_irq;
2162 }
2163
afd96668
HV
2164 switch (dev->board) {
2165 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
78db8547
IL
2166 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2167 break;
2168 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2169 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
afd96668
HV
2170 break;
2171 }
5a23b076 2172
f59ad611
AW
2173 /*
2174 * The CX2388[58] IR controller can start firing interrupts when
2175 * enabled, so these have to take place after the cx23885_irq() handler
2176 * is hooked up by the call to request_irq() above.
2177 */
2178 cx23885_ir_pci_int_enable(dev);
dbda8f70 2179 cx23885_input_init(dev);
f59ad611 2180
d19770e5
ST
2181 return 0;
2182
2183fail_irq:
2184 cx23885_dev_unregister(dev);
da59a4de
HV
2185fail_ctrl:
2186 v4l2_ctrl_handler_free(hdl);
c0714f6c 2187 v4l2_device_unregister(&dev->v4l2_dev);
d19770e5
ST
2188fail_free:
2189 kfree(dev);
2190 return err;
2191}
2192
4c62e976 2193static void cx23885_finidev(struct pci_dev *pci_dev)
d19770e5 2194{
c0714f6c
HV
2195 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2196 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
d19770e5 2197
dbda8f70 2198 cx23885_input_fini(dev);
f59ad611 2199 cx23885_ir_fini(dev);
d19770e5 2200
f59ad611 2201 cx23885_shutdown(dev);
29f8a0a5 2202
d19770e5
ST
2203 /* unregister stuff */
2204 free_irq(pci_dev->irq, dev);
d19770e5 2205
8d4d9329
HV
2206 pci_disable_device(pci_dev);
2207
d19770e5 2208 cx23885_dev_unregister(dev);
da59a4de 2209 v4l2_ctrl_handler_free(&dev->ctrl_handler);
c0714f6c 2210 v4l2_device_unregister(v4l2_dev);
d19770e5
ST
2211 kfree(dev);
2212}
2213
0fcefb39 2214static const struct pci_device_id cx23885_pci_tbl[] = {
d19770e5
ST
2215 {
2216 /* CX23885 */
2217 .vendor = 0x14f1,
2218 .device = 0x8852,
2219 .subvendor = PCI_ANY_ID,
2220 .subdevice = PCI_ANY_ID,
9c8ced51 2221 }, {
d19770e5
ST
2222 /* CX23887 Rev 2 */
2223 .vendor = 0x14f1,
2224 .device = 0x8880,
2225 .subvendor = PCI_ANY_ID,
2226 .subdevice = PCI_ANY_ID,
9c8ced51 2227 }, {
d19770e5
ST
2228 /* --- end of list --- */
2229 }
2230};
2231MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2232
2233static struct pci_driver cx23885_pci_driver = {
2234 .name = "cx23885",
2235 .id_table = cx23885_pci_tbl,
2236 .probe = cx23885_initdev,
4c62e976 2237 .remove = cx23885_finidev,
d19770e5
ST
2238 /* TODO */
2239 .suspend = NULL,
2240 .resume = NULL,
2241};
2242
9710e7a7 2243static int __init cx23885_init(void)
d19770e5 2244{
e39682b5 2245 pr_info("cx23885 driver version %s loaded\n",
1990d50b 2246 CX23885_VERSION);
d19770e5
ST
2247 return pci_register_driver(&cx23885_pci_driver);
2248}
2249
9710e7a7 2250static void __exit cx23885_fini(void)
d19770e5
ST
2251{
2252 pci_unregister_driver(&cx23885_pci_driver);
2253}
2254
2255module_init(cx23885_init);
2256module_exit(cx23885_fini);