]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/pci/cx23885/cx23885-core.c
[media] media: videobuf2: Restructure vb2_buffer
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / pci / cx23885 / cx23885-core.c
CommitLineData
d19770e5
ST
1/*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
6d897616 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
d19770e5
ST
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
d19770e5
ST
16 */
17
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kmod.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/div64.h>
78db8547 28#include <linux/firmware.h>
d19770e5
ST
29
30#include "cx23885.h"
5a23b076 31#include "cimax2.h"
78db8547 32#include "altera-ci.h"
29f8a0a5 33#include "cx23888-ir.h"
f59ad611 34#include "cx23885-ir.h"
e5514f10 35#include "cx23885-av.h"
dbda8f70 36#include "cx23885-input.h"
d19770e5
ST
37
38MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
6d897616 39MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
d19770e5 40MODULE_LICENSE("GPL");
1990d50b 41MODULE_VERSION(CX23885_VERSION);
d19770e5 42
4513fc69 43static unsigned int debug;
9c8ced51
ST
44module_param(debug, int, 0644);
45MODULE_PARM_DESC(debug, "enable debug messages");
d19770e5
ST
46
47static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48module_param_array(card, int, NULL, 0444);
9c8ced51 49MODULE_PARM_DESC(card, "card type");
d19770e5 50
4513fc69
ST
51#define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
b5f74050 53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
4513fc69 54 } while (0)
d19770e5
ST
55
56static unsigned int cx23885_devcount;
57
d19770e5
ST
58#define NO_SYNC_LINE (-1U)
59
d19770e5
ST
60/* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
62 * CX23887 Assumptions
63 * 1 line = 16 bytes of CDT
64 * cmds size = 80
65 * cdt size = 16 * linesize
66 * iqsize = 64
67 * maxlines = 6
68 *
69 * Address Space:
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
78 */
79
7e994302 80static struct sram_channel cx23885_sram_channels[] = {
d19770e5 81 [SRAM_CH01] = {
69ad6e56
ST
82 .name = "VID A",
83 .cmds_start = 0x10000,
d8d12b43
ST
84 .ctrl_start = 0x10380,
85 .cdt = 0x104c0,
69ad6e56
ST
86 .fifo_start = 0x40,
87 .fifo_size = 0x2800,
d19770e5
ST
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
92 },
93 [SRAM_CH02] = {
94 .name = "ch2",
95 .cmds_start = 0x0,
96 .ctrl_start = 0x0,
97 .cdt = 0x0,
98 .fifo_start = 0x0,
99 .fifo_size = 0x0,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
104 },
105 [SRAM_CH03] = {
69ad6e56
ST
106 .name = "TS1 B",
107 .cmds_start = 0x100A0,
d8d12b43
ST
108 .ctrl_start = 0x10400,
109 .cdt = 0x10580,
69ad6e56
ST
110 .fifo_start = 0x5000,
111 .fifo_size = 0x1000,
d19770e5
ST
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
116 },
117 [SRAM_CH04] = {
118 .name = "ch4",
119 .cmds_start = 0x0,
120 .ctrl_start = 0x0,
121 .cdt = 0x0,
122 .fifo_start = 0x0,
123 .fifo_size = 0x0,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
128 },
129 [SRAM_CH05] = {
130 .name = "ch5",
131 .cmds_start = 0x0,
132 .ctrl_start = 0x0,
133 .cdt = 0x0,
134 .fifo_start = 0x0,
135 .fifo_size = 0x0,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
140 },
141 [SRAM_CH06] = {
142 .name = "TS2 C",
143 .cmds_start = 0x10140,
d8d12b43
ST
144 .ctrl_start = 0x10440,
145 .cdt = 0x105e0,
d19770e5
ST
146 .fifo_start = 0x6000,
147 .fifo_size = 0x1000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
152 },
153 [SRAM_CH07] = {
9e44d632
MM
154 .name = "TV Audio",
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
157 .cdt = 0x10a00,
158 .fifo_start = 0x7000,
159 .fifo_size = 0x1000,
d19770e5
ST
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
164 },
165 [SRAM_CH08] = {
166 .name = "ch8",
167 .cmds_start = 0x0,
168 .ctrl_start = 0x0,
169 .cdt = 0x0,
170 .fifo_start = 0x0,
171 .fifo_size = 0x0,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
176 },
177 [SRAM_CH09] = {
178 .name = "ch9",
179 .cmds_start = 0x0,
180 .ctrl_start = 0x0,
181 .cdt = 0x0,
182 .fifo_start = 0x0,
183 .fifo_size = 0x0,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
188 },
189};
190
7e994302
ST
191static struct sram_channel cx23887_sram_channels[] = {
192 [SRAM_CH01] = {
193 .name = "VID A",
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
196 .cdt = 0x107b0,
197 .fifo_start = 0x40,
198 .fifo_size = 0x2800,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
203 },
204 [SRAM_CH02] = {
35045137
ST
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
208 .cdt = 0x10810,
209 .fifo_start = 0x3000,
210 .fifo_size = 0x1000,
7e994302
ST
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
215 },
216 [SRAM_CH03] = {
217 .name = "TS1 B",
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
220 .cdt = 0x10870,
221 .fifo_start = 0x5000,
222 .fifo_size = 0x1000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
227 },
228 [SRAM_CH04] = {
229 .name = "ch4",
230 .cmds_start = 0x0,
231 .ctrl_start = 0x0,
232 .cdt = 0x0,
233 .fifo_start = 0x0,
234 .fifo_size = 0x0,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
239 },
240 [SRAM_CH05] = {
241 .name = "ch5",
242 .cmds_start = 0x0,
243 .ctrl_start = 0x0,
244 .cdt = 0x0,
245 .fifo_start = 0x0,
246 .fifo_size = 0x0,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
251 },
252 [SRAM_CH06] = {
253 .name = "TS2 C",
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
256 .cdt = 0x108d0,
257 .fifo_start = 0x6000,
258 .fifo_size = 0x1000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
263 },
264 [SRAM_CH07] = {
35045137
ST
265 .name = "TV Audio",
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
268 .cdt = 0x10930,
269 .fifo_start = 0x7000,
270 .fifo_size = 0x1000,
7e994302
ST
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
275 },
276 [SRAM_CH08] = {
277 .name = "ch8",
278 .cmds_start = 0x0,
279 .ctrl_start = 0x0,
280 .cdt = 0x0,
281 .fifo_start = 0x0,
282 .fifo_size = 0x0,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
287 },
288 [SRAM_CH09] = {
289 .name = "ch9",
290 .cmds_start = 0x0,
291 .ctrl_start = 0x0,
292 .cdt = 0x0,
293 .fifo_start = 0x0,
294 .fifo_size = 0x0,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
299 },
300};
301
ada73eee 302static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
dbe83a3b
AW
303{
304 unsigned long flags;
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307 dev->pci_irqmask |= mask;
308
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310}
311
312void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313{
314 unsigned long flags;
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
319
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321}
322
323void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324{
325 u32 v;
326 unsigned long flags;
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329 v = mask & dev->pci_irqmask;
330 if (v)
331 cx_set(PCI_INT_MSK, v);
332
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334}
335
336static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337{
338 cx23885_irq_enable(dev, 0xffffffff);
339}
340
341void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342{
343 unsigned long flags;
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346 cx_clear(PCI_INT_MSK, mask);
347
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349}
350
351static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352{
353 cx23885_irq_disable(dev, 0xffffffff);
354}
355
356void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357{
358 unsigned long flags;
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
363
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365}
366
367static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368{
369 u32 v;
370 unsigned long flags;
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373 v = cx_read(PCI_INT_MSK);
374
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 return v;
377}
378
d19770e5
ST
379static int cx23885_risc_decode(u32 risc)
380{
381 static char *instr[16] = {
b1b81f1d
ST
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
d19770e5
ST
392 };
393 static int incr[16] = {
b1b81f1d
ST
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
d19770e5
ST
401 };
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
407 };
408 int i;
409
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
44a6481d 412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
d19770e5 413 if (risc & (1 << (i + 12)))
44a6481d 414 printk(" %s", bits[i]);
d19770e5
ST
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
417}
418
453afdd9 419static void cx23885_wakeup(struct cx23885_tsport *port,
39e75cfe 420 struct cx23885_dmaqueue *q, u32 count)
d19770e5
ST
421{
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
d19770e5 424
9c8ced51 425 if (list_empty(&q->active))
453afdd9
HV
426 return;
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
429
2d700715
JS
430 v4l2_get_timestamp(&buf->vb.timestamp);
431 buf->vb.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
433 buf->vb.vb2_buf.index,
453afdd9
HV
434 count, q->count);
435 list_del(&buf->queue);
2d700715 436 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
d19770e5 437}
d19770e5 438
7b888014 439int cx23885_sram_channel_setup(struct cx23885_dev *dev,
39e75cfe
AB
440 struct sram_channel *ch,
441 unsigned int bpl, u32 risc)
d19770e5 442{
44a6481d 443 unsigned int i, lines;
d19770e5
ST
444 u32 cdt;
445
9c8ced51 446 if (ch->cmds_start == 0) {
22b4e64f 447 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
44a6481d 448 ch->name);
d19770e5
ST
449 cx_write(ch->ptr1_reg, 0);
450 cx_write(ch->ptr2_reg, 0);
451 cx_write(ch->cnt2_reg, 0);
452 cx_write(ch->cnt1_reg, 0);
453 return 0;
454 } else {
22b4e64f 455 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
44a6481d 456 ch->name);
d19770e5
ST
457 }
458
459 bpl = (bpl + 7) & ~7; /* alignment */
460 cdt = ch->cdt;
461 lines = ch->fifo_size / bpl;
462 if (lines > 6)
463 lines = 6;
464 BUG_ON(lines < 2);
465
453afdd9
HV
466 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
467 cx_write(8 + 4, 12);
86ecc027 468 cx_write(8 + 8, 0);
d19770e5
ST
469
470 /* write CDT */
471 for (i = 0; i < lines; i++) {
22b4e64f 472 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
44a6481d 473 ch->fifo_start + bpl*i);
d19770e5
ST
474 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
475 cx_write(cdt + 16*i + 4, 0);
476 cx_write(cdt + 16*i + 8, 0);
477 cx_write(cdt + 16*i + 12, 0);
478 }
479
480 /* write CMDS */
481 if (ch->jumponly)
9c8ced51 482 cx_write(ch->cmds_start + 0, 8);
d19770e5 483 else
9c8ced51 484 cx_write(ch->cmds_start + 0, risc);
d19770e5
ST
485 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
486 cx_write(ch->cmds_start + 8, cdt);
487 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
488 cx_write(ch->cmds_start + 16, ch->ctrl_start);
489 if (ch->jumponly)
9c8ced51 490 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
d19770e5
ST
491 else
492 cx_write(ch->cmds_start + 20, 64 >> 2);
493 for (i = 24; i < 80; i += 4)
494 cx_write(ch->cmds_start + i, 0);
495
496 /* fill registers */
497 cx_write(ch->ptr1_reg, ch->fifo_start);
498 cx_write(ch->ptr2_reg, cdt);
499 cx_write(ch->cnt2_reg, (lines*16) >> 3);
9c8ced51 500 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
d19770e5 501
9c8ced51 502 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
e133be0f 503 dev->bridge,
d19770e5
ST
504 ch->name,
505 bpl,
506 lines);
507
508 return 0;
509}
510
7b888014 511void cx23885_sram_channel_dump(struct cx23885_dev *dev,
39e75cfe 512 struct sram_channel *ch)
d19770e5
ST
513{
514 static char *name[] = {
515 "init risc lo",
516 "init risc hi",
517 "cdt base",
518 "cdt size",
519 "iq base",
520 "iq size",
521 "risc pc lo",
522 "risc pc hi",
523 "iq wr ptr",
524 "iq rd ptr",
525 "cdt current",
526 "pci target lo",
527 "pci target hi",
528 "line / byte",
529 };
530 u32 risc;
44a6481d 531 unsigned int i, j, n;
d19770e5 532
9c8ced51 533 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
d19770e5
ST
534 dev->name, ch->name);
535 for (i = 0; i < ARRAY_SIZE(name); i++)
9c8ced51 536 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
d19770e5
ST
537 dev->name, name[i],
538 cx_read(ch->cmds_start + 4*i));
539
540 for (i = 0; i < 4; i++) {
44a6481d 541 risc = cx_read(ch->cmds_start + 4 * (i + 14));
9c8ced51 542 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
d19770e5
ST
543 cx23885_risc_decode(risc);
544 }
545 for (i = 0; i < (64 >> 2); i += n) {
44a6481d
MK
546 risc = cx_read(ch->ctrl_start + 4 * i);
547 /* No consideration for bits 63-32 */
548
9c8ced51 549 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
44a6481d 550 ch->ctrl_start + 4 * i, i);
d19770e5
ST
551 n = cx23885_risc_decode(risc);
552 for (j = 1; j < n; j++) {
44a6481d 553 risc = cx_read(ch->ctrl_start + 4 * (i + j));
9c8ced51 554 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
d19770e5
ST
555 dev->name, i+j, risc, j);
556 }
557 }
558
9c8ced51 559 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
d19770e5 560 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
9c8ced51 561 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
44a6481d 562 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
9c8ced51 563 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
d19770e5 564 dev->name, cx_read(ch->ptr1_reg));
9c8ced51 565 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
d19770e5 566 dev->name, cx_read(ch->ptr2_reg));
9c8ced51 567 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
d19770e5 568 dev->name, cx_read(ch->cnt1_reg));
9c8ced51 569 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
d19770e5
ST
570 dev->name, cx_read(ch->cnt2_reg));
571}
572
39e75cfe 573static void cx23885_risc_disasm(struct cx23885_tsport *port,
4d63a25c 574 struct cx23885_riscmem *risc)
d19770e5
ST
575{
576 struct cx23885_dev *dev = port->dev;
44a6481d 577 unsigned int i, j, n;
d19770e5 578
9c8ced51 579 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
d19770e5
ST
580 dev->name, risc->cpu, (unsigned long)risc->dma);
581 for (i = 0; i < (risc->size >> 2); i += n) {
9c8ced51 582 printk(KERN_INFO "%s: %04d: ", dev->name, i);
86ecc027 583 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
d19770e5 584 for (j = 1; j < n; j++)
9c8ced51 585 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
44a6481d 586 dev->name, i + j, risc->cpu[i + j], j);
86ecc027 587 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
d19770e5
ST
588 break;
589 }
590}
591
39e75cfe 592static void cx23885_shutdown(struct cx23885_dev *dev)
d19770e5
ST
593{
594 /* disable RISC controller */
595 cx_write(DEV_CNTRL2, 0);
596
597 /* Disable all IR activity */
598 cx_write(IR_CNTRL_REG, 0);
599
600 /* Disable Video A/B activity */
601 cx_write(VID_A_DMA_CTL, 0);
602 cx_write(VID_B_DMA_CTL, 0);
603 cx_write(VID_C_DMA_CTL, 0);
604
605 /* Disable Audio activity */
606 cx_write(AUD_INT_DMA_CTL, 0);
607 cx_write(AUD_EXT_DMA_CTL, 0);
608
609 /* Disable Serial port */
610 cx_write(UART_CTL, 0);
611
612 /* Disable Interrupts */
dbe83a3b 613 cx23885_irq_disable_all(dev);
d19770e5
ST
614 cx_write(VID_A_INT_MSK, 0);
615 cx_write(VID_B_INT_MSK, 0);
616 cx_write(VID_C_INT_MSK, 0);
617 cx_write(AUDIO_INT_INT_MSK, 0);
618 cx_write(AUDIO_EXT_INT_MSK, 0);
619
620}
621
39e75cfe 622static void cx23885_reset(struct cx23885_dev *dev)
d19770e5 623{
22b4e64f 624 dprintk(1, "%s()\n", __func__);
d19770e5
ST
625
626 cx23885_shutdown(dev);
627
628 cx_write(PCI_INT_STAT, 0xffffffff);
629 cx_write(VID_A_INT_STAT, 0xffffffff);
630 cx_write(VID_B_INT_STAT, 0xffffffff);
631 cx_write(VID_C_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
633 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
634 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
ecda5966 635 cx_write(PAD_CTRL, 0x00500300);
d19770e5
ST
636
637 mdelay(100);
638
7b888014
ST
639 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
640 720*4, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
642 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
643 188*4, 0);
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
646 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
647 188*4, 0);
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
650 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
d19770e5 651
a6a3f140 652 cx23885_gpio_setup(dev);
d19770e5
ST
653}
654
655
656static int cx23885_pci_quirks(struct cx23885_dev *dev)
657{
22b4e64f 658 dprintk(1, "%s()\n", __func__);
d19770e5 659
2df9a4c2
ST
660 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
661 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
662 * occur on the cx23887 bridge.
663 */
9c8ced51 664 if (dev->bridge == CX23885_BRIDGE_885)
d19770e5 665 cx_clear(RDR_TLCTL0, 1 << 4);
4823e9ee 666
d19770e5
ST
667 return 0;
668}
669
670static int get_resources(struct cx23885_dev *dev)
671{
9c8ced51
ST
672 if (request_mem_region(pci_resource_start(dev->pci, 0),
673 pci_resource_len(dev->pci, 0),
44a6481d 674 dev->name))
d19770e5
ST
675 return 0;
676
677 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
9c8ced51 678 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
d19770e5
ST
679
680 return -EBUSY;
681}
682
9c8ced51
ST
683static int cx23885_init_tsport(struct cx23885_dev *dev,
684 struct cx23885_tsport *port, int portno)
d19770e5 685{
22b4e64f 686 dprintk(1, "%s(portno=%d)\n", __func__, portno);
a6a3f140
ST
687
688 /* Transport bus init dma queue - Common settings */
689 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
690 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
b1b81f1d
ST
691 port->vld_misc_val = 0x0;
692 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
a6a3f140
ST
693
694 spin_lock_init(&port->slock);
695 port->dev = dev;
696 port->nr = portno;
697
698 INIT_LIST_HEAD(&port->mpegq.active);
d782ffa2 699 mutex_init(&port->frontends.lock);
7bdf84fc 700 INIT_LIST_HEAD(&port->frontends.felist);
d782ffa2
ST
701 port->frontends.active_fe_id = 0;
702
a739a7e4
ST
703 /* This should be hardcoded allow a single frontend
704 * attachment to this tsport, keeping the -dvb.c
705 * code clean and safe.
706 */
9c8ced51 707 if (!port->num_frontends)
a739a7e4
ST
708 port->num_frontends = 1;
709
9c8ced51 710 switch (portno) {
a6a3f140
ST
711 case 1:
712 port->reg_gpcnt = VID_B_GPCNT;
713 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
714 port->reg_dma_ctl = VID_B_DMA_CTL;
715 port->reg_lngth = VID_B_LNGTH;
716 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
717 port->reg_gen_ctrl = VID_B_GEN_CTL;
718 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
719 port->reg_sop_status = VID_B_SOP_STATUS;
720 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
721 port->reg_vld_misc = VID_B_VLD_MISC;
722 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
723 port->reg_src_sel = VID_B_SRC_SEL;
724 port->reg_ts_int_msk = VID_B_INT_MSK;
b1b81f1d 725 port->reg_ts_int_stat = VID_B_INT_STAT;
a6a3f140
ST
726 port->sram_chno = SRAM_CH03; /* VID_B */
727 port->pci_irqmask = 0x02; /* VID_B bit1 */
728 break;
729 case 2:
730 port->reg_gpcnt = VID_C_GPCNT;
731 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
732 port->reg_dma_ctl = VID_C_DMA_CTL;
733 port->reg_lngth = VID_C_LNGTH;
734 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
735 port->reg_gen_ctrl = VID_C_GEN_CTL;
736 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
737 port->reg_sop_status = VID_C_SOP_STATUS;
738 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
739 port->reg_vld_misc = VID_C_VLD_MISC;
740 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
741 port->reg_src_sel = 0;
742 port->reg_ts_int_msk = VID_C_INT_MSK;
743 port->reg_ts_int_stat = VID_C_INT_STAT;
744 port->sram_chno = SRAM_CH06; /* VID_C */
745 port->pci_irqmask = 0x04; /* VID_C bit2 */
d19770e5 746 break;
a6a3f140
ST
747 default:
748 BUG();
d19770e5
ST
749 }
750
751 return 0;
752}
753
0ac5881a
ST
754static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
755{
756 switch (cx_read(RDR_CFG2) & 0xff) {
757 case 0x00:
758 /* cx23885 */
759 dev->hwrevision = 0xa0;
760 break;
761 case 0x01:
762 /* CX23885-12Z */
763 dev->hwrevision = 0xa1;
764 break;
765 case 0x02:
25ea66e2 766 /* CX23885-13Z/14Z */
0ac5881a
ST
767 dev->hwrevision = 0xb0;
768 break;
769 case 0x03:
25ea66e2
ST
770 if (dev->pci->device == 0x8880) {
771 /* CX23888-21Z/22Z */
772 dev->hwrevision = 0xc0;
773 } else {
774 /* CX23885-14Z */
775 dev->hwrevision = 0xa4;
776 }
777 break;
778 case 0x04:
779 if (dev->pci->device == 0x8880) {
780 /* CX23888-31Z */
781 dev->hwrevision = 0xd0;
782 } else {
783 /* CX23885-15Z, CX23888-31Z */
784 dev->hwrevision = 0xa5;
785 }
0ac5881a
ST
786 break;
787 case 0x0e:
788 /* CX23887-15Z */
789 dev->hwrevision = 0xc0;
abe1def4 790 break;
0ac5881a
ST
791 case 0x0f:
792 /* CX23887-14Z */
793 dev->hwrevision = 0xb1;
794 break;
795 default:
796 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
22b4e64f 797 __func__, dev->hwrevision);
0ac5881a
ST
798 }
799 if (dev->hwrevision)
800 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
22b4e64f 801 __func__, dev->hwrevision);
0ac5881a
ST
802 else
803 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
22b4e64f 804 __func__, dev->hwrevision);
0ac5881a
ST
805}
806
29f8a0a5
AW
807/* Find the first v4l2_subdev member of the group id in hw */
808struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
809{
810 struct v4l2_subdev *result = NULL;
811 struct v4l2_subdev *sd;
812
813 spin_lock(&dev->v4l2_dev.lock);
814 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
815 if (sd->grp_id == hw) {
816 result = sd;
817 break;
818 }
819 }
820 spin_unlock(&dev->v4l2_dev.lock);
821 return result;
822}
823
d19770e5
ST
824static int cx23885_dev_setup(struct cx23885_dev *dev)
825{
826 int i;
827
dbe83a3b 828 spin_lock_init(&dev->pci_irqmask_lock);
af7f388e 829 spin_lock_init(&dev->slock);
dbe83a3b 830
d19770e5 831 mutex_init(&dev->lock);
8386c27f 832 mutex_init(&dev->gpio_lock);
d19770e5
ST
833
834 atomic_inc(&dev->refcount);
835
836 dev->nr = cx23885_devcount++;
579f1163
ST
837 sprintf(dev->name, "cx23885[%d]", dev->nr);
838
579f1163 839 /* Configure the internal memory */
9c8ced51 840 if (dev->pci->device == 0x8880) {
25ea66e2 841 /* Could be 887 or 888, assume a default */
579f1163 842 dev->bridge = CX23885_BRIDGE_887;
c7712613
ST
843 /* Apply a sensible clock frequency for the PCIe bridge */
844 dev->clk_freq = 25000000;
7e994302 845 dev->sram_channels = cx23887_sram_channels;
579f1163 846 } else
9c8ced51 847 if (dev->pci->device == 0x8852) {
579f1163 848 dev->bridge = CX23885_BRIDGE_885;
c7712613
ST
849 /* Apply a sensible clock frequency for the PCIe bridge */
850 dev->clk_freq = 28000000;
7e994302 851 dev->sram_channels = cx23885_sram_channels;
579f1163
ST
852 } else
853 BUG();
854
855 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
22b4e64f 856 __func__, dev->bridge);
579f1163
ST
857
858 /* board config */
859 dev->board = UNSET;
860 if (card[dev->nr] < cx23885_bcount)
861 dev->board = card[dev->nr];
862 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
863 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
864 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
865 dev->board = cx23885_subids[i].card;
866 if (UNSET == dev->board) {
867 dev->board = CX23885_BOARD_UNKNOWN;
868 cx23885_card_list(dev);
869 }
870
c7712613
ST
871 /* If the user specific a clk freq override, apply it */
872 if (cx23885_boards[dev->board].clk_freq > 0)
873 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
874
d19770e5
ST
875 dev->pci_bus = dev->pci->bus->number;
876 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dbe83a3b 877 cx23885_irq_add(dev, 0x001f00);
d19770e5
ST
878
879 /* External Master 1 Bus */
880 dev->i2c_bus[0].nr = 0;
881 dev->i2c_bus[0].dev = dev;
882 dev->i2c_bus[0].reg_stat = I2C1_STAT;
883 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
884 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
885 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
886 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
887 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
888
889 /* External Master 2 Bus */
890 dev->i2c_bus[1].nr = 1;
891 dev->i2c_bus[1].dev = dev;
892 dev->i2c_bus[1].reg_stat = I2C2_STAT;
893 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
894 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
895 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
896 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
897 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
898
899 /* Internal Master 3 Bus */
900 dev->i2c_bus[2].nr = 2;
901 dev->i2c_bus[2].dev = dev;
902 dev->i2c_bus[2].reg_stat = I2C3_STAT;
903 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
a2129af5 904 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
d19770e5
ST
905 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
906 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
907 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
908
b1b81f1d
ST
909 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
910 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
a6a3f140 911 cx23885_init_tsport(dev, &dev->ts1, 1);
579f1163 912
b1b81f1d
ST
913 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
914 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
a6a3f140 915 cx23885_init_tsport(dev, &dev->ts2, 2);
d19770e5 916
d19770e5
ST
917 if (get_resources(dev) < 0) {
918 printk(KERN_ERR "CORE %s No more PCIe resources for "
44a6481d
MK
919 "subsystem: %04x:%04x\n",
920 dev->name, dev->pci->subsystem_vendor,
921 dev->pci->subsystem_device);
d19770e5
ST
922
923 cx23885_devcount--;
fcf94c89 924 return -ENODEV;
d19770e5
ST
925 }
926
d19770e5 927 /* PCIe stuff */
9c8ced51
ST
928 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
929 pci_resource_len(dev->pci, 0));
d19770e5
ST
930
931 dev->bmmio = (u8 __iomem *)dev->lmmio;
932
d19770e5 933 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
44a6481d
MK
934 dev->name, dev->pci->subsystem_vendor,
935 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
936 dev->board, card[dev->nr] == dev->board ?
937 "insmod option" : "autodetected");
d19770e5 938
4823e9ee
ST
939 cx23885_pci_quirks(dev);
940
7b888014
ST
941 /* Assume some sensible defaults */
942 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
943 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
557f48d5 944 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
7b888014
ST
945 dev->radio_type = cx23885_boards[dev->board].radio_type;
946 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
947
557f48d5
IL
948 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
949 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
7b888014 950 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
22b4e64f 951 __func__, dev->radio_type, dev->radio_addr);
7b888014 952
f659c513
ST
953 /* The cx23417 encoder has GPIO's that need to be initialised
954 * before DVB, so that demodulators and tuners are out of
955 * reset before DVB uses them.
956 */
957 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
958 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
959 cx23885_mc417_init(dev);
960
d19770e5
ST
961 /* init hardware */
962 cx23885_reset(dev);
963
964 cx23885_i2c_register(&dev->i2c_bus[0]);
965 cx23885_i2c_register(&dev->i2c_bus[1]);
966 cx23885_i2c_register(&dev->i2c_bus[2]);
d19770e5 967 cx23885_card_setup(dev);
622b828a 968 call_all(dev, core, s_power, 0);
d19770e5
ST
969 cx23885_ir_init(dev);
970
7b888014
ST
971 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
972 if (cx23885_video_register(dev) < 0) {
973 printk(KERN_ERR "%s() Failed to register analog "
22b4e64f 974 "video adapters on VID_A\n", __func__);
7b888014
ST
975 }
976 }
977
978 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
10d0dcd7
IL
979 if (cx23885_boards[dev->board].num_fds_portb)
980 dev->ts1.num_frontends =
981 cx23885_boards[dev->board].num_fds_portb;
a6a3f140
ST
982 if (cx23885_dvb_register(&dev->ts1) < 0) {
983 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
22b4e64f 984 __func__);
a6a3f140 985 }
b1b81f1d
ST
986 } else
987 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
988 if (cx23885_417_register(dev) < 0) {
989 printk(KERN_ERR
990 "%s() Failed to register 417 on VID_B\n",
991 __func__);
992 }
579f1163
ST
993 }
994
7b888014 995 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
10d0dcd7
IL
996 if (cx23885_boards[dev->board].num_fds_portc)
997 dev->ts2.num_frontends =
998 cx23885_boards[dev->board].num_fds_portc;
a6a3f140 999 if (cx23885_dvb_register(&dev->ts2) < 0) {
b1b81f1d
ST
1000 printk(KERN_ERR
1001 "%s() Failed to register dvb on VID_C\n",
1002 __func__);
1003 }
1004 } else
1005 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1006 if (cx23885_417_register(dev) < 0) {
1007 printk(KERN_ERR
1008 "%s() Failed to register 417 on VID_C\n",
22b4e64f 1009 __func__);
a6a3f140 1010 }
d19770e5
ST
1011 }
1012
0ac5881a
ST
1013 cx23885_dev_checkrevision(dev);
1014
702dd790
IL
1015 /* disable MSI for NetUP cards, otherwise CI is not working */
1016 if (cx23885_boards[dev->board].ci_type > 0)
1017 cx_clear(RDR_RDRCTL1, 1 << 8);
1018
7b134e85
IL
1019 switch (dev->board) {
1020 case CX23885_BOARD_TEVII_S470:
1021 case CX23885_BOARD_TEVII_S471:
1022 cx_clear(RDR_RDRCTL1, 1 << 8);
1023 break;
1024 }
1025
d19770e5 1026 return 0;
d19770e5
ST
1027}
1028
39e75cfe 1029static void cx23885_dev_unregister(struct cx23885_dev *dev)
d19770e5 1030{
9c8ced51
ST
1031 release_mem_region(pci_resource_start(dev->pci, 0),
1032 pci_resource_len(dev->pci, 0));
d19770e5
ST
1033
1034 if (!atomic_dec_and_test(&dev->refcount))
1035 return;
1036
7b888014
ST
1037 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1038 cx23885_video_unregister(dev);
1039
b1b81f1d 1040 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
a6a3f140
ST
1041 cx23885_dvb_unregister(&dev->ts1);
1042
b1b81f1d
ST
1043 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1044 cx23885_417_unregister(dev);
1045
1046 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
a6a3f140
ST
1047 cx23885_dvb_unregister(&dev->ts2);
1048
b1b81f1d
ST
1049 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1050 cx23885_417_unregister(dev);
1051
d19770e5
ST
1052 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1053 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1054 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1055
1056 iounmap(dev->lmmio);
1057}
1058
9c8ced51 1059static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
44a6481d
MK
1060 unsigned int offset, u32 sync_line,
1061 unsigned int bpl, unsigned int padding,
453afdd9 1062 unsigned int lines, unsigned int lpi, bool jump)
d19770e5
ST
1063{
1064 struct scatterlist *sg;
9e44d632 1065 unsigned int line, todo, sol;
d19770e5 1066
453afdd9
HV
1067
1068 if (jump) {
1069 *(rp++) = cpu_to_le32(RISC_JUMP);
1070 *(rp++) = cpu_to_le32(0);
1071 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1072 }
1073
d19770e5
ST
1074 /* sync instruction */
1075 if (sync_line != NO_SYNC_LINE)
1076 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1077
1078 /* scan lines */
1079 sg = sglist;
1080 for (line = 0; line < lines; line++) {
1081 while (offset && offset >= sg_dma_len(sg)) {
1082 offset -= sg_dma_len(sg);
7675fe99 1083 sg = sg_next(sg);
d19770e5 1084 }
9e44d632
MM
1085
1086 if (lpi && line > 0 && !(line % lpi))
1087 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1088 else
1089 sol = RISC_SOL;
1090
d19770e5
ST
1091 if (bpl <= sg_dma_len(sg)-offset) {
1092 /* fits into current chunk */
9e44d632 1093 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
9c8ced51
ST
1094 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1095 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1096 offset += bpl;
d19770e5
ST
1097 } else {
1098 /* scanline needs to be split */
1099 todo = bpl;
9e44d632 1100 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
d19770e5 1101 (sg_dma_len(sg)-offset));
9c8ced51
ST
1102 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1103 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1104 todo -= (sg_dma_len(sg)-offset);
1105 offset = 0;
7675fe99 1106 sg = sg_next(sg);
d19770e5 1107 while (todo > sg_dma_len(sg)) {
9c8ced51 1108 *(rp++) = cpu_to_le32(RISC_WRITE|
d19770e5 1109 sg_dma_len(sg));
9c8ced51
ST
1110 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1111 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5 1112 todo -= sg_dma_len(sg);
7675fe99 1113 sg = sg_next(sg);
d19770e5 1114 }
9c8ced51
ST
1115 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1116 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1117 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1118 offset += todo;
1119 }
1120 offset += padding;
1121 }
1122
1123 return rp;
1124}
1125
4d63a25c 1126int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
7b888014
ST
1127 struct scatterlist *sglist, unsigned int top_offset,
1128 unsigned int bottom_offset, unsigned int bpl,
1129 unsigned int padding, unsigned int lines)
1130{
1131 u32 instructions, fields;
d8eaa58b 1132 __le32 *rp;
7b888014
ST
1133
1134 fields = 0;
1135 if (UNSET != top_offset)
1136 fields++;
1137 if (UNSET != bottom_offset)
1138 fields++;
1139
1140 /* estimate risc mem: worst case is one write per page border +
1141 one write per scan line + syncs + jump (all 2 dwords). Padding
1142 can cause next bpl to start close to a page border. First DMA
1143 region may be smaller than PAGE_SIZE */
1144 /* write and jump need and extra dword */
9c8ced51
ST
1145 instructions = fields * (1 + ((bpl + padding) * lines)
1146 / PAGE_SIZE + lines);
453afdd9 1147 instructions += 5;
4d63a25c
HV
1148 risc->size = instructions * 12;
1149 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1150 if (risc->cpu == NULL)
1151 return -ENOMEM;
7b888014
ST
1152
1153 /* write risc instructions */
1154 rp = risc->cpu;
1155 if (UNSET != top_offset)
1156 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1157 bpl, padding, lines, 0, true);
7b888014
ST
1158 if (UNSET != bottom_offset)
1159 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1160 bpl, padding, lines, 0, UNSET == top_offset);
7b888014
ST
1161
1162 /* save pointer to jmp instruction address */
1163 risc->jmp = rp;
9c8ced51 1164 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
7b888014
ST
1165 return 0;
1166}
d19770e5 1167
9e44d632 1168int cx23885_risc_databuffer(struct pci_dev *pci,
4d63a25c 1169 struct cx23885_riscmem *risc,
39e75cfe
AB
1170 struct scatterlist *sglist,
1171 unsigned int bpl,
9e44d632 1172 unsigned int lines, unsigned int lpi)
d19770e5
ST
1173{
1174 u32 instructions;
d8eaa58b 1175 __le32 *rp;
d19770e5
ST
1176
1177 /* estimate risc mem: worst case is one write per page border +
1178 one write per scan line + syncs + jump (all 2 dwords). Here
1179 there is no padding and no sync. First DMA region may be smaller
1180 than PAGE_SIZE */
1181 /* Jump and write need an extra dword */
1182 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
453afdd9 1183 instructions += 4;
d19770e5 1184
4d63a25c
HV
1185 risc->size = instructions * 12;
1186 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1187 if (risc->cpu == NULL)
1188 return -ENOMEM;
d19770e5
ST
1189
1190 /* write risc instructions */
1191 rp = risc->cpu;
9e44d632 1192 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
453afdd9 1193 bpl, 0, lines, lpi, lpi == 0);
d19770e5
ST
1194
1195 /* save pointer to jmp instruction address */
1196 risc->jmp = rp;
9c8ced51 1197 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
d19770e5
ST
1198 return 0;
1199}
1200
4d63a25c 1201int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
5ab27e6d
ST
1202 struct scatterlist *sglist, unsigned int top_offset,
1203 unsigned int bottom_offset, unsigned int bpl,
1204 unsigned int padding, unsigned int lines)
1205{
1206 u32 instructions, fields;
1207 __le32 *rp;
5ab27e6d
ST
1208
1209 fields = 0;
1210 if (UNSET != top_offset)
1211 fields++;
1212 if (UNSET != bottom_offset)
1213 fields++;
1214
1215 /* estimate risc mem: worst case is one write per page border +
1216 one write per scan line + syncs + jump (all 2 dwords). Padding
1217 can cause next bpl to start close to a page border. First DMA
1218 region may be smaller than PAGE_SIZE */
1219 /* write and jump need and extra dword */
1220 instructions = fields * (1 + ((bpl + padding) * lines)
1221 / PAGE_SIZE + lines);
453afdd9 1222 instructions += 5;
4d63a25c
HV
1223 risc->size = instructions * 12;
1224 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1225 if (risc->cpu == NULL)
1226 return -ENOMEM;
5ab27e6d
ST
1227 /* write risc instructions */
1228 rp = risc->cpu;
1229
1230 /* Sync to line 6, so US CC line 21 will appear in line '12'
1231 * in the userland vbi payload */
1232 if (UNSET != top_offset)
420b2176 1233 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1234 bpl, padding, lines, 0, true);
5ab27e6d
ST
1235
1236 if (UNSET != bottom_offset)
420b2176 1237 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1238 bpl, padding, lines, 0, UNSET == top_offset);
5ab27e6d
ST
1239
1240
1241
1242 /* save pointer to jmp instruction address */
1243 risc->jmp = rp;
1244 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1245 return 0;
1246}
1247
1248
453afdd9 1249void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
d19770e5 1250{
4d63a25c
HV
1251 struct cx23885_riscmem *risc = &buf->risc;
1252
d19770e5 1253 BUG_ON(in_interrupt());
4d63a25c 1254 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
d19770e5
ST
1255}
1256
7b888014
ST
1257static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1258{
1259 struct cx23885_dev *dev = port->dev;
1260
22b4e64f
HH
1261 dprintk(1, "%s() Register Dump\n", __func__);
1262 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
7b888014 1263 cx_read(DEV_CNTRL2));
22b4e64f 1264 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
dbe83a3b 1265 cx23885_irq_get_mask(dev));
22b4e64f 1266 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
7b888014 1267 cx_read(AUDIO_INT_INT_MSK));
22b4e64f 1268 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
7b888014 1269 cx_read(AUD_INT_DMA_CTL));
22b4e64f 1270 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
7b888014 1271 cx_read(AUDIO_EXT_INT_MSK));
22b4e64f 1272 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
7b888014 1273 cx_read(AUD_EXT_DMA_CTL));
22b4e64f 1274 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
7b888014 1275 cx_read(PAD_CTRL));
22b4e64f 1276 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
7b888014 1277 cx_read(ALT_PIN_OUT_SEL));
22b4e64f 1278 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
7b888014 1279 cx_read(GPIO2));
22b4e64f 1280 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
7b888014 1281 port->reg_gpcnt, cx_read(port->reg_gpcnt));
22b4e64f 1282 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1283 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
22b4e64f 1284 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1285 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
7b913908
ST
1286 if (port->reg_src_sel)
1287 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1288 port->reg_src_sel, cx_read(port->reg_src_sel));
22b4e64f 1289 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
7b888014 1290 port->reg_lngth, cx_read(port->reg_lngth));
22b4e64f 1291 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1292 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
22b4e64f 1293 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1294 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
22b4e64f 1295 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
7b888014 1296 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
22b4e64f 1297 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
7b888014 1298 port->reg_sop_status, cx_read(port->reg_sop_status));
22b4e64f 1299 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
7b888014 1300 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
22b4e64f 1301 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
7b888014 1302 port->reg_vld_misc, cx_read(port->reg_vld_misc));
22b4e64f 1303 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
7b888014 1304 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
22b4e64f 1305 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
7b888014
ST
1306 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1307}
1308
453afdd9 1309int cx23885_start_dma(struct cx23885_tsport *port,
44a6481d
MK
1310 struct cx23885_dmaqueue *q,
1311 struct cx23885_buffer *buf)
d19770e5
ST
1312{
1313 struct cx23885_dev *dev = port->dev;
a589b665 1314 u32 reg;
d19770e5 1315
22b4e64f 1316 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
453afdd9 1317 dev->width, dev->height, dev->field);
d19770e5 1318
d8d12b43
ST
1319 /* Stop the fifo and risc engine for this port */
1320 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1321
d19770e5
ST
1322 /* setup fifo + format */
1323 cx23885_sram_channel_setup(dev,
9c8ced51 1324 &dev->sram_channels[port->sram_chno],
44a6481d 1325 port->ts_packet_size, buf->risc.dma);
9c8ced51
ST
1326 if (debug > 5) {
1327 cx23885_sram_channel_dump(dev,
1328 &dev->sram_channels[port->sram_chno]);
d19770e5 1329 cx23885_risc_disasm(port, &buf->risc);
3328e4fb 1330 }
d19770e5
ST
1331
1332 /* write TS length to chip */
453afdd9 1333 cx_write(port->reg_lngth, port->ts_packet_size);
d19770e5 1334
9c8ced51
ST
1335 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1336 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1337 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
22b4e64f 1338 __func__,
661c7e44 1339 cx23885_boards[dev->board].portb,
9c8ced51 1340 cx23885_boards[dev->board].portc);
d19770e5
ST
1341 return -EINVAL;
1342 }
1343
a589b665
ST
1344 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1345 cx23885_av_clk(dev, 0);
1346
d19770e5
ST
1347 udelay(100);
1348
579f1163 1349 /* If the port supports SRC SELECT, configure it */
9c8ced51 1350 if (port->reg_src_sel)
579f1163
ST
1351 cx_write(port->reg_src_sel, port->src_sel_val);
1352
b1b81f1d 1353 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
d19770e5 1354 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
b1b81f1d 1355 cx_write(port->reg_vld_misc, port->vld_misc_val);
d19770e5
ST
1356 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1357 udelay(100);
1358
9c8ced51 1359 /* NOTE: this is 2 (reserved) for portb, does it matter? */
d19770e5
ST
1360 /* reset counter to zero */
1361 cx_write(port->reg_gpcnt_ctl, 3);
453afdd9 1362 q->count = 0;
d19770e5 1363
52ce27bf
ST
1364 /* Set VIDB pins to input */
1365 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1366 reg = cx_read(PAD_CTRL);
1367 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1368 cx_write(PAD_CTRL, reg);
1369 }
1370
1371 /* Set VIDC pins to input */
1372 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1373 reg = cx_read(PAD_CTRL);
1374 reg &= ~0x4; /* Clear TS2_SOP_OE */
1375 cx_write(PAD_CTRL, reg);
1376 }
1377
1378 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1379
1380 reg = cx_read(PAD_CTRL);
1381 reg = reg & ~0x1; /* Clear TS1_OE */
1382
1383 /* FIXME, bit 2 writing here is questionable */
1384 /* set TS1_SOP_OE and TS1_OE_HI */
1385 reg = reg | 0xa;
1386 cx_write(PAD_CTRL, reg);
1387
1388 /* FIXME and these two registers should be documented. */
1389 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1390 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1391 }
1392
9c8ced51 1393 switch (dev->bridge) {
d19770e5 1394 case CX23885_BRIDGE_885:
3bd40659 1395 case CX23885_BRIDGE_887:
25ea66e2 1396 case CX23885_BRIDGE_888:
d19770e5 1397 /* enable irqs */
9c8ced51 1398 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
d19770e5
ST
1399 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1400 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
dbe83a3b
AW
1401 cx23885_irq_add(dev, port->pci_irqmask);
1402 cx23885_irq_enable_all(dev);
d19770e5 1403 break;
d19770e5 1404 default:
579f1163 1405 BUG();
d19770e5
ST
1406 }
1407
d19770e5
ST
1408 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1409
a589b665
ST
1410 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1411 cx23885_av_clk(dev, 1);
1412
7b888014
ST
1413 if (debug > 4)
1414 cx23885_tsport_reg_dump(port);
1415
d19770e5
ST
1416 return 0;
1417}
1418
1419static int cx23885_stop_dma(struct cx23885_tsport *port)
1420{
1421 struct cx23885_dev *dev = port->dev;
a589b665
ST
1422 u32 reg;
1423
22b4e64f 1424 dprintk(1, "%s()\n", __func__);
d19770e5
ST
1425
1426 /* Stop interrupts and DMA */
1427 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1428 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1429
52ce27bf 1430 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1431
1432 reg = cx_read(PAD_CTRL);
1433
1434 /* Set TS1_OE */
1435 reg = reg | 0x1;
1436
1437 /* clear TS1_SOP_OE and TS1_OE_HI */
1438 reg = reg & ~0xa;
1439 cx_write(PAD_CTRL, reg);
1440 cx_write(port->reg_src_sel, 0);
1441 cx_write(port->reg_gen_ctrl, 8);
1442
1443 }
1444
1445 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1446 cx23885_av_clk(dev, 0);
1447
d19770e5
ST
1448 return 0;
1449}
1450
d19770e5
ST
1451/* ------------------------------------------------------------------ */
1452
453afdd9 1453int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
d19770e5
ST
1454{
1455 struct cx23885_dev *dev = port->dev;
1456 int size = port->ts_packet_size * port->ts_packet_count;
2d700715 1457 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
d19770e5 1458
22b4e64f 1459 dprintk(1, "%s: %p\n", __func__, buf);
2d700715 1460 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
d19770e5 1461 return -EINVAL;
2d700715 1462 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
d19770e5 1463
453afdd9
HV
1464 cx23885_risc_databuffer(dev->pci, &buf->risc,
1465 sgt->sgl,
1466 port->ts_packet_size, port->ts_packet_count, 0);
1467 return 0;
d19770e5
ST
1468}
1469
453afdd9
HV
1470/*
1471 * The risc program for each buffer works as follows: it starts with a simple
1472 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1473 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1474 * the initial JUMP).
1475 *
1476 * This is the risc program of the first buffer to be queued if the active list
1477 * is empty and it just keeps DMAing this buffer without generating any
1478 * interrupts.
1479 *
1480 * If a new buffer is added then the initial JUMP in the code for that buffer
1481 * will generate an interrupt which signals that the previous buffer has been
1482 * DMAed successfully and that it can be returned to userspace.
1483 *
1484 * It also sets the final jump of the previous buffer to the start of the new
1485 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1486 * atomic u32 write, so there is no race condition.
1487 *
1488 * The end-result of all this that you only get an interrupt when a buffer
1489 * is ready, so the control flow is very easy.
1490 */
d19770e5
ST
1491void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1492{
1493 struct cx23885_buffer *prev;
1494 struct cx23885_dev *dev = port->dev;
1495 struct cx23885_dmaqueue *cx88q = &port->mpegq;
453afdd9 1496 unsigned long flags;
d19770e5 1497
453afdd9
HV
1498 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1499 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1500 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
d19770e5
ST
1501 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1502
453afdd9 1503 spin_lock_irqsave(&dev->slock, flags);
d19770e5 1504 if (list_empty(&cx88q->active)) {
453afdd9 1505 list_add_tail(&buf->queue, &cx88q->active);
44a6481d 1506 dprintk(1, "[%p/%d] %s - first active\n",
2d700715 1507 buf, buf->vb.vb2_buf.index, __func__);
d19770e5 1508 } else {
453afdd9 1509 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
44a6481d 1510 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
453afdd9
HV
1511 queue);
1512 list_add_tail(&buf->queue, &cx88q->active);
d19770e5 1513 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
9c8ced51 1514 dprintk(1, "[%p/%d] %s - append to active\n",
2d700715 1515 buf, buf->vb.vb2_buf.index, __func__);
d19770e5 1516 }
453afdd9 1517 spin_unlock_irqrestore(&dev->slock, flags);
d19770e5
ST
1518}
1519
1520/* ----------------------------------------------------------- */
1521
453afdd9 1522static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
d19770e5
ST
1523{
1524 struct cx23885_dev *dev = port->dev;
1525 struct cx23885_dmaqueue *q = &port->mpegq;
1526 struct cx23885_buffer *buf;
1527 unsigned long flags;
1528
44a6481d 1529 spin_lock_irqsave(&port->slock, flags);
d19770e5 1530 while (!list_empty(&q->active)) {
44a6481d 1531 buf = list_entry(q->active.next, struct cx23885_buffer,
453afdd9
HV
1532 queue);
1533 list_del(&buf->queue);
2d700715 1534 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
44a6481d 1535 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
2d700715
JS
1536 buf, buf->vb.vb2_buf.index, reason,
1537 (unsigned long)buf->risc.dma);
d19770e5 1538 }
44a6481d 1539 spin_unlock_irqrestore(&port->slock, flags);
d19770e5
ST
1540}
1541
b1b81f1d
ST
1542void cx23885_cancel_buffers(struct cx23885_tsport *port)
1543{
1544 struct cx23885_dev *dev = port->dev;
d19770e5 1545
9c8ced51 1546 dprintk(1, "%s()\n", __func__);
d19770e5 1547 cx23885_stop_dma(port);
453afdd9 1548 do_cancel_buffers(port, "cancel");
d19770e5
ST
1549}
1550
b1b81f1d
ST
1551int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1552{
1553 /* FIXME: port1 assumption here. */
1554 struct cx23885_tsport *port = &dev->ts1;
1555 int count = 0;
1556 int handled = 0;
1557
1558 if (status == 0)
1559 return handled;
1560
1561 count = cx_read(port->reg_gpcnt);
1562 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1563 status, cx_read(port->reg_ts_int_msk), count);
1564
1565 if ((status & VID_B_MSK_BAD_PKT) ||
1566 (status & VID_B_MSK_OPC_ERR) ||
1567 (status & VID_B_MSK_VBI_OPC_ERR) ||
1568 (status & VID_B_MSK_SYNC) ||
1569 (status & VID_B_MSK_VBI_SYNC) ||
1570 (status & VID_B_MSK_OF) ||
1571 (status & VID_B_MSK_VBI_OF)) {
1572 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1573 "= 0x%x\n", dev->name, status);
1574 if (status & VID_B_MSK_BAD_PKT)
1575 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1576 if (status & VID_B_MSK_OPC_ERR)
1577 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1578 if (status & VID_B_MSK_VBI_OPC_ERR)
1579 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1580 if (status & VID_B_MSK_SYNC)
1581 dprintk(1, " VID_B_MSK_SYNC\n");
1582 if (status & VID_B_MSK_VBI_SYNC)
1583 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1584 if (status & VID_B_MSK_OF)
1585 dprintk(1, " VID_B_MSK_OF\n");
1586 if (status & VID_B_MSK_VBI_OF)
1587 dprintk(1, " VID_B_MSK_VBI_OF\n");
1588
1589 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1590 cx23885_sram_channel_dump(dev,
1591 &dev->sram_channels[port->sram_chno]);
1592 cx23885_417_check_encoder(dev);
1593 } else if (status & VID_B_MSK_RISCI1) {
1594 dprintk(7, " VID_B_MSK_RISCI1\n");
1595 spin_lock(&port->slock);
1596 cx23885_wakeup(port, &port->mpegq, count);
1597 spin_unlock(&port->slock);
b1b81f1d
ST
1598 }
1599 if (status) {
1600 cx_write(port->reg_ts_int_stat, status);
1601 handled = 1;
1602 }
1603
1604 return handled;
1605}
1606
a6a3f140
ST
1607static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1608{
1609 struct cx23885_dev *dev = port->dev;
1610 int handled = 0;
1611 u32 count;
1612
b1b81f1d
ST
1613 if ((status & VID_BC_MSK_OPC_ERR) ||
1614 (status & VID_BC_MSK_BAD_PKT) ||
1615 (status & VID_BC_MSK_SYNC) ||
9c8ced51
ST
1616 (status & VID_BC_MSK_OF)) {
1617
a6a3f140 1618 if (status & VID_BC_MSK_OPC_ERR)
9c8ced51
ST
1619 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1620 VID_BC_MSK_OPC_ERR);
1621
a6a3f140 1622 if (status & VID_BC_MSK_BAD_PKT)
9c8ced51
ST
1623 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1624 VID_BC_MSK_BAD_PKT);
1625
a6a3f140 1626 if (status & VID_BC_MSK_SYNC)
9c8ced51
ST
1627 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1628 VID_BC_MSK_SYNC);
1629
a6a3f140 1630 if (status & VID_BC_MSK_OF)
9c8ced51
ST
1631 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1632 VID_BC_MSK_OF);
a6a3f140
ST
1633
1634 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1635
1636 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
b1b81f1d
ST
1637 cx23885_sram_channel_dump(dev,
1638 &dev->sram_channels[port->sram_chno]);
a6a3f140
ST
1639
1640 } else if (status & VID_BC_MSK_RISCI1) {
1641
1642 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1643
1644 spin_lock(&port->slock);
1645 count = cx_read(port->reg_gpcnt);
1646 cx23885_wakeup(port, &port->mpegq, count);
1647 spin_unlock(&port->slock);
1648
a6a3f140
ST
1649 }
1650 if (status) {
1651 cx_write(port->reg_ts_int_stat, status);
1652 handled = 1;
1653 }
1654
1655 return handled;
1656}
1657
03121f05 1658static irqreturn_t cx23885_irq(int irq, void *dev_id)
d19770e5
ST
1659{
1660 struct cx23885_dev *dev = dev_id;
a6a3f140
ST
1661 struct cx23885_tsport *ts1 = &dev->ts1;
1662 struct cx23885_tsport *ts2 = &dev->ts2;
d19770e5 1663 u32 pci_status, pci_mask;
7b888014 1664 u32 vida_status, vida_mask;
9e44d632 1665 u32 audint_status, audint_mask;
6f074abb 1666 u32 ts1_status, ts1_mask;
d19770e5 1667 u32 ts2_status, ts2_mask;
7b888014 1668 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
9e44d632 1669 int audint_count = 0;
98d109f9 1670 bool subdev_handled;
d19770e5
ST
1671
1672 pci_status = cx_read(PCI_INT_STAT);
dbe83a3b 1673 pci_mask = cx23885_irq_get_mask(dev);
7b888014
ST
1674 vida_status = cx_read(VID_A_INT_STAT);
1675 vida_mask = cx_read(VID_A_INT_MSK);
9e44d632
MM
1676 audint_status = cx_read(AUDIO_INT_INT_STAT);
1677 audint_mask = cx_read(AUDIO_INT_INT_MSK);
6f074abb
ST
1678 ts1_status = cx_read(VID_B_INT_STAT);
1679 ts1_mask = cx_read(VID_B_INT_MSK);
d19770e5
ST
1680 ts2_status = cx_read(VID_C_INT_STAT);
1681 ts2_mask = cx_read(VID_C_INT_MSK);
1682
9c8ced51 1683 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
d19770e5
ST
1684 goto out;
1685
7b888014 1686 vida_count = cx_read(VID_A_GPCNT);
9e44d632 1687 audint_count = cx_read(AUD_INT_A_GPCNT);
a6a3f140
ST
1688 ts1_count = cx_read(ts1->reg_gpcnt);
1689 ts2_count = cx_read(ts2->reg_gpcnt);
7b888014
ST
1690 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1691 pci_status, pci_mask);
1692 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1693 vida_status, vida_mask, vida_count);
9e44d632
MM
1694 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1695 audint_status, audint_mask, audint_count);
7b888014
ST
1696 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1697 ts1_status, ts1_mask, ts1_count);
1698 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1699 ts2_status, ts2_mask, ts2_count);
d19770e5 1700
f59ad611
AW
1701 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1702 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1703 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1704 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1705 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
98d109f9 1706 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
d19770e5
ST
1707
1708 if (pci_status & PCI_MSK_RISC_RD)
9c8ced51
ST
1709 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1710 PCI_MSK_RISC_RD);
1711
d19770e5 1712 if (pci_status & PCI_MSK_RISC_WR)
9c8ced51
ST
1713 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1714 PCI_MSK_RISC_WR);
1715
d19770e5 1716 if (pci_status & PCI_MSK_AL_RD)
9c8ced51
ST
1717 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1718 PCI_MSK_AL_RD);
1719
d19770e5 1720 if (pci_status & PCI_MSK_AL_WR)
9c8ced51
ST
1721 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1722 PCI_MSK_AL_WR);
1723
d19770e5 1724 if (pci_status & PCI_MSK_APB_DMA)
9c8ced51
ST
1725 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1726 PCI_MSK_APB_DMA);
1727
d19770e5 1728 if (pci_status & PCI_MSK_VID_C)
9c8ced51
ST
1729 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1730 PCI_MSK_VID_C);
1731
d19770e5 1732 if (pci_status & PCI_MSK_VID_B)
9c8ced51
ST
1733 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1734 PCI_MSK_VID_B);
1735
d19770e5 1736 if (pci_status & PCI_MSK_VID_A)
9c8ced51
ST
1737 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1738 PCI_MSK_VID_A);
1739
d19770e5 1740 if (pci_status & PCI_MSK_AUD_INT)
9c8ced51
ST
1741 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1742 PCI_MSK_AUD_INT);
1743
d19770e5 1744 if (pci_status & PCI_MSK_AUD_EXT)
9c8ced51
ST
1745 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1746 PCI_MSK_AUD_EXT);
d19770e5 1747
5a23b076
IL
1748 if (pci_status & PCI_MSK_GPIO0)
1749 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1750 PCI_MSK_GPIO0);
1751
1752 if (pci_status & PCI_MSK_GPIO1)
1753 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1754 PCI_MSK_GPIO1);
f59ad611 1755
98d109f9
AW
1756 if (pci_status & PCI_MSK_AV_CORE)
1757 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1758 PCI_MSK_AV_CORE);
1759
f59ad611
AW
1760 if (pci_status & PCI_MSK_IR)
1761 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1762 PCI_MSK_IR);
d19770e5
ST
1763 }
1764
78db8547
IL
1765 if (cx23885_boards[dev->board].ci_type == 1 &&
1766 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1767 handled += netup_ci_slot_status(dev, pci_status);
a26ccc9d 1768
78db8547
IL
1769 if (cx23885_boards[dev->board].ci_type == 2 &&
1770 (pci_status & PCI_MSK_GPIO0))
1771 handled += altera_ci_irq(dev);
5a23b076 1772
7b888014
ST
1773 if (ts1_status) {
1774 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1775 handled += cx23885_irq_ts(ts1, ts1_status);
b1b81f1d
ST
1776 else
1777 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1778 handled += cx23885_irq_417(dev, ts1_status);
7b888014
ST
1779 }
1780
1781 if (ts2_status) {
1782 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1783 handled += cx23885_irq_ts(ts2, ts2_status);
b1b81f1d
ST
1784 else
1785 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1786 handled += cx23885_irq_417(dev, ts2_status);
7b888014 1787 }
6f074abb 1788
7b888014
ST
1789 if (vida_status)
1790 handled += cx23885_video_irq(dev, vida_status);
6f074abb 1791
9e44d632
MM
1792 if (audint_status)
1793 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1794
f59ad611 1795 if (pci_status & PCI_MSK_IR) {
98d109f9 1796 subdev_handled = false;
260e689b 1797 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
98d109f9
AW
1798 pci_status, &subdev_handled);
1799 if (subdev_handled)
1800 handled++;
1801 }
1802
e5514f10
AW
1803 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1804 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
c21412f5 1805 schedule_work(&dev->cx25840_work);
e5514f10 1806 handled++;
f59ad611
AW
1807 }
1808
6f074abb
ST
1809 if (handled)
1810 cx_write(PCI_INT_STAT, pci_status);
d19770e5
ST
1811out:
1812 return IRQ_RETVAL(handled);
1813}
1814
f59ad611
AW
1815static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1816 unsigned int notification, void *arg)
1817{
1818 struct cx23885_dev *dev;
1819
1820 if (sd == NULL)
1821 return;
1822
1823 dev = to_cx23885(sd->v4l2_dev);
1824
1825 switch (notification) {
e5514f10 1826 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1827 if (sd == dev->sd_ir)
1828 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1829 break;
e5514f10 1830 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1831 if (sd == dev->sd_ir)
1832 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1833 break;
1834 }
1835}
1836
1837static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1838{
e5514f10 1839 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
f59ad611
AW
1840 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1841 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1842 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1843}
1844
6de72bd6 1845static inline int encoder_on_portb(struct cx23885_dev *dev)
6f8bee9b
ST
1846{
1847 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1848}
1849
6de72bd6 1850static inline int encoder_on_portc(struct cx23885_dev *dev)
6f8bee9b
ST
1851{
1852 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1853}
1854
1855/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1856 * registers depending on the board configuration (and whether the
1857 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1858 * be pushed into the correct hardware register, regardless of the
1859 * physical location. Certain registers are shared so we sanity check
1860 * and report errors if we think we're tampering with a GPIo that might
1861 * be assigned to the encoder (and used for the host bus).
1862 *
1863 * GPIO 2 thru 0 - On the cx23885 bridge
1864 * GPIO 18 thru 3 - On the cx23417 host bus interface
1865 * GPIO 23 thru 19 - On the cx25840 a/v core
1866 */
1867void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1868{
1869 if (mask & 0x7)
1870 cx_set(GP0_IO, mask & 0x7);
1871
1872 if (mask & 0x0007fff8) {
1873 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1874 printk(KERN_ERR
1875 "%s: Setting GPIO on encoder ports\n",
1876 dev->name);
1877 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1878 }
1879
1880 /* TODO: 23-19 */
1881 if (mask & 0x00f80000)
1882 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1883}
1884
1885void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1886{
1887 if (mask & 0x00000007)
1888 cx_clear(GP0_IO, mask & 0x7);
1889
1890 if (mask & 0x0007fff8) {
1891 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1892 printk(KERN_ERR
1893 "%s: Clearing GPIO moving on encoder ports\n",
1894 dev->name);
1895 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1896 }
1897
1898 /* TODO: 23-19 */
1899 if (mask & 0x00f80000)
1900 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1901}
1902
09ea33e5
IL
1903u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1904{
1905 if (mask & 0x00000007)
1906 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1907
1908 if (mask & 0x0007fff8) {
1909 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1910 printk(KERN_ERR
1911 "%s: Reading GPIO moving on encoder ports\n",
1912 dev->name);
1913 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1914 }
1915
1916 /* TODO: 23-19 */
1917 if (mask & 0x00f80000)
1918 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1919
1920 return 0;
1921}
1922
6f8bee9b
ST
1923void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1924{
1925 if ((mask & 0x00000007) && asoutput)
1926 cx_set(GP0_IO, (mask & 0x7) << 16);
1927 else if ((mask & 0x00000007) && !asoutput)
1928 cx_clear(GP0_IO, (mask & 0x7) << 16);
1929
1930 if (mask & 0x0007fff8) {
1931 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1932 printk(KERN_ERR
1933 "%s: Enabling GPIO on encoder ports\n",
1934 dev->name);
1935 }
1936
1937 /* MC417_OEN is active low for output, write 1 for an input */
1938 if ((mask & 0x0007fff8) && asoutput)
1939 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1940
1941 else if ((mask & 0x0007fff8) && !asoutput)
1942 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1943
1944 /* TODO: 23-19 */
1945}
1946
4c62e976
GKH
1947static int cx23885_initdev(struct pci_dev *pci_dev,
1948 const struct pci_device_id *pci_id)
d19770e5
ST
1949{
1950 struct cx23885_dev *dev;
da59a4de 1951 struct v4l2_ctrl_handler *hdl;
d19770e5
ST
1952 int err;
1953
44a6481d 1954 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
d19770e5
ST
1955 if (NULL == dev)
1956 return -ENOMEM;
1957
c0714f6c
HV
1958 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1959 if (err < 0)
1960 goto fail_free;
1961
da59a4de
HV
1962 hdl = &dev->ctrl_handler;
1963 v4l2_ctrl_handler_init(hdl, 6);
1964 if (hdl->error) {
1965 err = hdl->error;
1966 goto fail_ctrl;
1967 }
1968 dev->v4l2_dev.ctrl_handler = hdl;
1969
f59ad611
AW
1970 /* Prepare to handle notifications from subdevices */
1971 cx23885_v4l2_dev_notify_init(dev);
1972
d19770e5
ST
1973 /* pci init */
1974 dev->pci = pci_dev;
1975 if (pci_enable_device(pci_dev)) {
1976 err = -EIO;
da59a4de 1977 goto fail_ctrl;
d19770e5
ST
1978 }
1979
1980 if (cx23885_dev_setup(dev) < 0) {
1981 err = -EINVAL;
da59a4de 1982 goto fail_ctrl;
d19770e5
ST
1983 }
1984
1985 /* print pci info */
abd34d8d 1986 dev->pci_rev = pci_dev->revision;
d19770e5
ST
1987 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1988 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1989 "latency: %d, mmio: 0x%llx\n", dev->name,
1990 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
a589b665
ST
1991 dev->pci_lat,
1992 (unsigned long long)pci_resource_start(pci_dev, 0));
d19770e5
ST
1993
1994 pci_set_master(pci_dev);
1995 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1996 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1997 err = -EIO;
0c3a14c1 1998 goto fail_context;
d19770e5
ST
1999 }
2000
0c3a14c1
HV
2001 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
2002 if (IS_ERR(dev->alloc_ctx)) {
2003 err = PTR_ERR(dev->alloc_ctx);
2004 goto fail_context;
2005 }
d7515b88 2006 err = request_irq(pci_dev->irq, cx23885_irq,
3e018fe4 2007 IRQF_SHARED, dev->name, dev);
d19770e5
ST
2008 if (err < 0) {
2009 printk(KERN_ERR "%s: can't get IRQ %d\n",
2010 dev->name, pci_dev->irq);
2011 goto fail_irq;
2012 }
2013
afd96668
HV
2014 switch (dev->board) {
2015 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
78db8547
IL
2016 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2017 break;
2018 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2019 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
afd96668
HV
2020 break;
2021 }
5a23b076 2022
f59ad611
AW
2023 /*
2024 * The CX2388[58] IR controller can start firing interrupts when
2025 * enabled, so these have to take place after the cx23885_irq() handler
2026 * is hooked up by the call to request_irq() above.
2027 */
2028 cx23885_ir_pci_int_enable(dev);
dbda8f70 2029 cx23885_input_init(dev);
f59ad611 2030
d19770e5
ST
2031 return 0;
2032
2033fail_irq:
0c3a14c1
HV
2034 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2035fail_context:
d19770e5 2036 cx23885_dev_unregister(dev);
da59a4de
HV
2037fail_ctrl:
2038 v4l2_ctrl_handler_free(hdl);
c0714f6c 2039 v4l2_device_unregister(&dev->v4l2_dev);
d19770e5
ST
2040fail_free:
2041 kfree(dev);
2042 return err;
2043}
2044
4c62e976 2045static void cx23885_finidev(struct pci_dev *pci_dev)
d19770e5 2046{
c0714f6c
HV
2047 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2048 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
d19770e5 2049
dbda8f70 2050 cx23885_input_fini(dev);
f59ad611 2051 cx23885_ir_fini(dev);
d19770e5 2052
f59ad611 2053 cx23885_shutdown(dev);
29f8a0a5 2054
d19770e5
ST
2055 /* unregister stuff */
2056 free_irq(pci_dev->irq, dev);
d19770e5 2057
8d4d9329
HV
2058 pci_disable_device(pci_dev);
2059
d19770e5 2060 cx23885_dev_unregister(dev);
0c3a14c1 2061 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
da59a4de 2062 v4l2_ctrl_handler_free(&dev->ctrl_handler);
c0714f6c 2063 v4l2_device_unregister(v4l2_dev);
d19770e5
ST
2064 kfree(dev);
2065}
2066
2067static struct pci_device_id cx23885_pci_tbl[] = {
2068 {
2069 /* CX23885 */
2070 .vendor = 0x14f1,
2071 .device = 0x8852,
2072 .subvendor = PCI_ANY_ID,
2073 .subdevice = PCI_ANY_ID,
9c8ced51 2074 }, {
d19770e5
ST
2075 /* CX23887 Rev 2 */
2076 .vendor = 0x14f1,
2077 .device = 0x8880,
2078 .subvendor = PCI_ANY_ID,
2079 .subdevice = PCI_ANY_ID,
9c8ced51 2080 }, {
d19770e5
ST
2081 /* --- end of list --- */
2082 }
2083};
2084MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2085
2086static struct pci_driver cx23885_pci_driver = {
2087 .name = "cx23885",
2088 .id_table = cx23885_pci_tbl,
2089 .probe = cx23885_initdev,
4c62e976 2090 .remove = cx23885_finidev,
d19770e5
ST
2091 /* TODO */
2092 .suspend = NULL,
2093 .resume = NULL,
2094};
2095
9710e7a7 2096static int __init cx23885_init(void)
d19770e5 2097{
1990d50b
MCC
2098 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2099 CX23885_VERSION);
d19770e5
ST
2100 return pci_register_driver(&cx23885_pci_driver);
2101}
2102
9710e7a7 2103static void __exit cx23885_fini(void)
d19770e5
ST
2104{
2105 pci_unregister_driver(&cx23885_pci_driver);
2106}
2107
2108module_init(cx23885_init);
2109module_exit(cx23885_fini);