]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/media/video/ivtv/ivtv-irq.c
V4L/DVB (5675): Move big PIO accesses from the interrupt handler to a workhandler
[mirror_ubuntu-artful-kernel.git] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-firmware.h"
23 #include "ivtv-fileops.h"
24 #include "ivtv-queue.h"
25 #include "ivtv-udma.h"
26 #include "ivtv-irq.h"
27 #include "ivtv-ioctl.h"
28 #include "ivtv-mailbox.h"
29 #include "ivtv-vbi.h"
30 #include "ivtv-yuv.h"
31
32 #define DMA_MAGIC_COOKIE 0x000001fe
33
34 static void ivtv_dma_dec_start(struct ivtv_stream *s);
35
36 static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
41 };
42
43
44 static void ivtv_pio_work_handler(struct ivtv *itv)
45 {
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
48 struct list_head *p;
49 int i = 0;
50
51 IVTV_DEBUG_DMA("ivtv_pio_work_handler\n");
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57 return;
58 }
59 IVTV_DEBUG_DMA("Process PIO %s\n", s->name);
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff;
64
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size);
68 }
69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size);
71 }
72 if (s->PIOarray[i].size & 0x80000000)
73 break;
74 i++;
75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
77 }
78
79 void ivtv_irq_work_handler(struct work_struct *work)
80 {
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82
83 DEFINE_WAIT(wait);
84
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
87
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
89 ivtv_vbi_work_handler(itv);
90
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
93 }
94
95 /* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
98 */
99 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
100 {
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
103 struct list_head *p;
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length;
109 int rc;
110
111 /* sanity checks */
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
126 s->dma_pts = 0;
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
134 s->dma_pts = ((u64) data[5] << 32) | data[6];
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
140 s->dma_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->dma_pts = 0;
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
175 s->dma_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
178 s->dma_offset = offset;
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
190 IVTV_DEBUG_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
205 /* got the buffers, now fill in SGarray (DMA) */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210
211 if (skip_bufs-- > 0)
212 continue;
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
214 s->SGarray[idx].src = cpu_to_le32(offset);
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217
218 s->q_predma.bytesused += buf->bytesused;
219 size -= buf->bytesused;
220 offset += s->buf_size;
221
222 /* Sync SG buffers */
223 ivtv_buf_sync_for_device(s, buf);
224
225 if (size == 0) { /* YUV */
226 /* process the UV section */
227 offset = UVoffset;
228 size = UVsize;
229 }
230 idx++;
231 }
232 s->SG_length = idx;
233 return 0;
234 }
235
236 static void dma_post(struct ivtv_stream *s)
237 {
238 struct ivtv *itv = s->itv;
239 struct ivtv_buffer *buf = NULL;
240 struct list_head *p;
241 u32 offset;
242 u32 *u32buf;
243 int x = 0;
244
245 IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
246 s->name, s->dma_offset);
247 list_for_each(p, &s->q_dma.list) {
248 buf = list_entry(p, struct ivtv_buffer, list);
249 u32buf = (u32 *)buf->buf;
250
251 /* Sync Buffer */
252 ivtv_buf_sync_for_cpu(s, buf);
253
254 if (x == 0) {
255 offset = s->dma_last_offset;
256 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
257 {
258 for (offset = 0; offset < 64; offset++) {
259 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
260 break;
261 }
262 }
263 offset *= 4;
264 if (offset == 256) {
265 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
266 offset = s->dma_last_offset;
267 }
268 if (s->dma_last_offset != offset)
269 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
270 s->dma_last_offset = offset;
271 }
272 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
273 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
274 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
275 }
276 else {
277 write_enc_sync(0, s->dma_offset);
278 }
279 if (offset) {
280 buf->bytesused -= offset;
281 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
282 }
283 *u32buf = cpu_to_le32(s->dma_backup);
284 }
285 x++;
286 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
287 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
288 s->type == IVTV_ENC_STREAM_TYPE_VBI)
289 set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
290 }
291 if (buf)
292 buf->bytesused += s->dma_last_offset;
293 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
294 list_for_each(p, &s->q_dma.list) {
295 buf = list_entry(p, struct ivtv_buffer, list);
296
297 /* Parse and Groom VBI Data */
298 s->q_dma.bytesused -= buf->bytesused;
299 ivtv_process_vbi_data(itv, buf, 0, s->type);
300 s->q_dma.bytesused += buf->bytesused;
301 }
302 if (s->id == -1) {
303 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
304 return;
305 }
306 }
307 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
308 if (s->id != -1)
309 wake_up(&s->waitq);
310 }
311
312 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
313 {
314 struct ivtv *itv = s->itv;
315 struct ivtv_buffer *buf;
316 struct list_head *p;
317 u32 y_size = itv->params.height * itv->params.width;
318 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
319 int y_done = 0;
320 int bytes_written = 0;
321 unsigned long flags = 0;
322 int idx = 0;
323
324 IVTV_DEBUG_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
325 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
326 list_for_each(p, &s->q_predma.list) {
327 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
328
329 /* YUV UV Offset from Y Buffer */
330 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
331 offset = uv_offset;
332 y_done = 1;
333 }
334 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
335 s->SGarray[idx].dst = cpu_to_le32(offset);
336 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
337
338 offset += buf->bytesused;
339 bytes_written += buf->bytesused;
340
341 /* Sync SG buffers */
342 ivtv_buf_sync_for_device(s, buf);
343 idx++;
344 }
345 s->SG_length = idx;
346
347 /* Mark last buffer size for Interrupt flag */
348 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
349
350 /* Sync Hardware SG List of buffers */
351 ivtv_stream_sync_for_device(s);
352 if (lock)
353 spin_lock_irqsave(&itv->dma_reg_lock, flags);
354 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
355 ivtv_dma_dec_start(s);
356 }
357 else {
358 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
359 }
360 if (lock)
361 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
362 }
363
364 /* start the encoder DMA */
365 static void ivtv_dma_enc_start(struct ivtv_stream *s)
366 {
367 struct ivtv *itv = s->itv;
368 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
369 int i;
370
371 IVTV_DEBUG_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
372
373 if (s->q_predma.bytesused)
374 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
375
376 if (ivtv_use_dma(s))
377 s->SGarray[s->SG_length - 1].size =
378 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
379
380 /* If this is an MPEG stream, and VBI data is also pending, then append the
381 VBI DMA to the MPEG DMA and transfer both sets of data at once.
382
383 VBI DMA is a second class citizen compared to MPEG and mixing them together
384 will confuse the firmware (the end of a VBI DMA is seen as the end of a
385 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
386 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
387 use. This way no conflicts occur. */
388 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
389 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
390 s->SG_length + s_vbi->SG_length <= s->buffers) {
391 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
392 if (ivtv_use_dma(s_vbi))
393 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
394 for (i = 0; i < s_vbi->SG_length; i++) {
395 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
396 }
397 itv->vbi.dma_offset = s_vbi->dma_offset;
398 s_vbi->SG_length = 0;
399 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
400 IVTV_DEBUG_DMA("include DMA for %s\n", s->name);
401 }
402
403 /* Mark last buffer size for Interrupt flag */
404 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
405
406 if (ivtv_use_pio(s)) {
407 for (i = 0; i < s->SG_length; i++) {
408 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
409 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
410 }
411 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
412 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
413 set_bit(IVTV_F_I_PIO, &itv->i_flags);
414 itv->cur_pio_stream = s->type;
415 }
416 else {
417 /* Sync Hardware SG List of buffers */
418 ivtv_stream_sync_for_device(s);
419 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
420 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
421 set_bit(IVTV_F_I_DMA, &itv->i_flags);
422 itv->cur_dma_stream = s->type;
423 itv->dma_timer.expires = jiffies + HZ / 10;
424 add_timer(&itv->dma_timer);
425 }
426 }
427
428 static void ivtv_dma_dec_start(struct ivtv_stream *s)
429 {
430 struct ivtv *itv = s->itv;
431
432 if (s->q_predma.bytesused)
433 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
434 IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
435 /* put SG Handle into register 0x0c */
436 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
437 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
438 set_bit(IVTV_F_I_DMA, &itv->i_flags);
439 itv->cur_dma_stream = s->type;
440 itv->dma_timer.expires = jiffies + HZ / 10;
441 add_timer(&itv->dma_timer);
442 }
443
444 static void ivtv_irq_dma_read(struct ivtv *itv)
445 {
446 struct ivtv_stream *s = NULL;
447 struct ivtv_buffer *buf;
448 int hw_stream_type;
449
450 IVTV_DEBUG_IRQ("DEC DMA READ\n");
451 del_timer(&itv->dma_timer);
452 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
453 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
454 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
455 }
456 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
457 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
458 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
459 hw_stream_type = 2;
460 }
461 else {
462 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
463 hw_stream_type = 0;
464 }
465 IVTV_DEBUG_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
466
467 ivtv_stream_sync_for_cpu(s);
468
469 /* For some reason must kick the firmware, like PIO mode,
470 I think this tells the firmware we are done and the size
471 of the xfer so it can calculate what we need next.
472 I think we can do this part ourselves but would have to
473 fully calculate xfer info ourselves and not use interrupts
474 */
475 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
476 hw_stream_type);
477
478 /* Free last DMA call */
479 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
480 ivtv_buf_sync_for_cpu(s, buf);
481 ivtv_enqueue(s, buf, &s->q_free);
482 }
483 wake_up(&s->waitq);
484 }
485 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
486 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
487 itv->cur_dma_stream = -1;
488 wake_up(&itv->dma_waitq);
489 }
490
491 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
492 {
493 u32 data[CX2341X_MBOX_MAX_DATA];
494 struct ivtv_stream *s;
495
496 del_timer(&itv->dma_timer);
497 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
498 IVTV_DEBUG_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
499 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
500 data[1] = 3;
501 else if (data[1] > 2)
502 return;
503 s = &itv->streams[ivtv_stream_map[data[1]]];
504 if (data[0] & 0x18) {
505 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
506 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
507 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
508 }
509 s->SG_length = 0;
510 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
511 itv->cur_dma_stream = -1;
512 dma_post(s);
513 ivtv_stream_sync_for_cpu(s);
514 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
515 u32 tmp;
516
517 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
518 tmp = s->dma_offset;
519 s->dma_offset = itv->vbi.dma_offset;
520 dma_post(s);
521 s->dma_offset = tmp;
522 }
523 wake_up(&itv->dma_waitq);
524 }
525
526 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
527 {
528 struct ivtv_stream *s;
529
530 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
531 itv->cur_pio_stream = -1;
532 return;
533 }
534 s = &itv->streams[itv->cur_pio_stream];
535 IVTV_DEBUG_IRQ("ENC PIO COMPLETE %s\n", s->name);
536 s->SG_length = 0;
537 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
538 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
539 itv->cur_pio_stream = -1;
540 dma_post(s);
541 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
542 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
543 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
544 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
545 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
546 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
547 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
548 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
549 u32 tmp;
550
551 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
552 tmp = s->dma_offset;
553 s->dma_offset = itv->vbi.dma_offset;
554 dma_post(s);
555 s->dma_offset = tmp;
556 }
557 wake_up(&itv->dma_waitq);
558 }
559
560 static void ivtv_irq_dma_err(struct ivtv *itv)
561 {
562 u32 data[CX2341X_MBOX_MAX_DATA];
563
564 del_timer(&itv->dma_timer);
565 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
566 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
567 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
568 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
569 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
570 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
571
572 /* retry */
573 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
574 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
575 ivtv_dma_dec_start(s);
576 else
577 ivtv_dma_enc_start(s);
578 return;
579 }
580 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
581 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
582 itv->cur_dma_stream = -1;
583 wake_up(&itv->dma_waitq);
584 }
585
586 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
587 {
588 u32 data[CX2341X_MBOX_MAX_DATA];
589 struct ivtv_stream *s;
590
591 /* Get DMA destination and size arguments from card */
592 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
593 IVTV_DEBUG_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
594
595 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
596 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
597 data[0], data[1], data[2]);
598 return;
599 }
600 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
601 s = &itv->streams[ivtv_stream_map[data[0]]];
602 if (!stream_enc_dma_append(s, data)) {
603 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
604 }
605 }
606
607 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
608 {
609 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
610 u32 data[CX2341X_MBOX_MAX_DATA];
611 struct ivtv_stream *s;
612
613 IVTV_DEBUG_IRQ("ENC START VBI CAP\n");
614 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
615
616 /* If more than two VBI buffers are pending, then
617 clear the old ones and start with this new one.
618 This can happen during transition stages when MPEG capturing is
619 started, but the first interrupts haven't arrived yet. During
620 that period VBI requests can accumulate without being able to
621 DMA the data. Since at most four VBI DMA buffers are available,
622 we just drop the old requests when there are already three
623 requests queued. */
624 if (s->SG_length > 2) {
625 struct list_head *p;
626 list_for_each(p, &s->q_predma.list) {
627 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
628 ivtv_buf_sync_for_cpu(s, buf);
629 }
630 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
631 s->SG_length = 0;
632 }
633 /* if we can append the data, and the MPEG stream isn't capturing,
634 then start a DMA request for just the VBI data. */
635 if (!stream_enc_dma_append(s, data) &&
636 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
637 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
638 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
639 }
640 }
641
642 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
643 {
644 u32 data[CX2341X_MBOX_MAX_DATA];
645 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
646
647 IVTV_DEBUG_IRQ("DEC VBI REINSERT\n");
648 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
649 !stream_enc_dma_append(s, data)) {
650 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
651 }
652 }
653
654 static void ivtv_irq_dec_data_req(struct ivtv *itv)
655 {
656 u32 data[CX2341X_MBOX_MAX_DATA];
657 struct ivtv_stream *s;
658
659 /* YUV or MPG */
660 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
661
662 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
663 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
664 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
665 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
666 }
667 else {
668 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
669 itv->dma_data_req_offset = data[1];
670 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
671 }
672 IVTV_DEBUG_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
673 itv->dma_data_req_offset, itv->dma_data_req_size);
674 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
675 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
676 }
677 else {
678 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
679 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
680 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
681 }
682 }
683
684 static void ivtv_irq_vsync(struct ivtv *itv)
685 {
686 /* The vsync interrupt is unusual in that it won't clear until
687 * the end of the first line for the current field, at which
688 * point it clears itself. This can result in repeated vsync
689 * interrupts, or a missed vsync. Read some of the registers
690 * to determine the line being displayed and ensure we handle
691 * one vsync per frame.
692 */
693 unsigned int frame = read_reg(0x28c0) & 1;
694 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
695
696 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
697
698 if (((frame ^ itv->yuv_info.lace_sync_field) == 0 && ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.lace_sync_field)) ||
699 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
700 int next_dma_frame = last_dma_frame;
701
702 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
703 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
704 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
705 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
706 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
707 next_dma_frame = (next_dma_frame + 1) & 0x3;
708 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
709 }
710 }
711 if (frame != (itv->lastVsyncFrame & 1)) {
712 struct ivtv_stream *s = ivtv_get_output_stream(itv);
713
714 itv->lastVsyncFrame += 1;
715 if (frame == 0) {
716 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
717 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
718 }
719 else {
720 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
721 }
722 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
723 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
724 wake_up(&itv->event_waitq);
725 }
726 wake_up(&itv->vsync_waitq);
727 if (s)
728 wake_up(&s->waitq);
729
730 /* Send VBI to saa7127 */
731 if (frame) {
732 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
733 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
734 }
735
736 /* Check if we need to update the yuv registers */
737 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
738 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
739 last_dma_frame = (last_dma_frame - 1) & 3;
740
741 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
742 itv->yuv_info.update_frame = last_dma_frame;
743 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
744 itv->yuv_info.yuv_forced_update = 0;
745 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
746 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
747 }
748 }
749 }
750 }
751
752 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
753
754 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
755 {
756 struct ivtv *itv = (struct ivtv *)dev_id;
757 u32 combo;
758 u32 stat;
759 int i;
760 u8 vsync_force = 0;
761
762 spin_lock(&itv->dma_reg_lock);
763 /* get contents of irq status register */
764 stat = read_reg(IVTV_REG_IRQSTATUS);
765
766 combo = ~itv->irqmask & stat;
767
768 /* Clear out IRQ */
769 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
770
771 if (0 == combo) {
772 /* The vsync interrupt is unusual and clears itself. If we
773 * took too long, we may have missed it. Do some checks
774 */
775 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
776 /* vsync is enabled, see if we're in a new field */
777 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
778 /* New field, looks like we missed it */
779 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
780 vsync_force = 1;
781 }
782 }
783
784 if (!vsync_force) {
785 /* No Vsync expected, wasn't for us */
786 spin_unlock(&itv->dma_reg_lock);
787 return IRQ_NONE;
788 }
789 }
790
791 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
792 these messages */
793 if (combo & ~0xff6d0400)
794 IVTV_DEBUG_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
795
796 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
797 IVTV_DEBUG_IRQ("DEC DMA COMPLETE\n");
798 }
799
800 if (combo & IVTV_IRQ_DMA_READ) {
801 ivtv_irq_dma_read(itv);
802 }
803
804 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
805 ivtv_irq_enc_dma_complete(itv);
806 }
807
808 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
809 ivtv_irq_enc_pio_complete(itv);
810 }
811
812 if (combo & IVTV_IRQ_DMA_ERR) {
813 ivtv_irq_dma_err(itv);
814 }
815
816 if (combo & IVTV_IRQ_ENC_START_CAP) {
817 ivtv_irq_enc_start_cap(itv);
818 }
819
820 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
821 ivtv_irq_enc_vbi_cap(itv);
822 }
823
824 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
825 ivtv_irq_dec_vbi_reinsert(itv);
826 }
827
828 if (combo & IVTV_IRQ_ENC_EOS) {
829 IVTV_DEBUG_IRQ("ENC EOS\n");
830 set_bit(IVTV_F_I_EOS, &itv->i_flags);
831 wake_up(&itv->cap_w);
832 }
833
834 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
835 ivtv_irq_dec_data_req(itv);
836 }
837
838 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
839 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
840 ivtv_irq_vsync(itv);
841 }
842
843 if (combo & IVTV_IRQ_ENC_VIM_RST) {
844 IVTV_DEBUG_IRQ("VIM RST\n");
845 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
846 }
847
848 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
849 IVTV_DEBUG_INFO("Stereo mode changed\n");
850 }
851
852 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
853 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
854 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
855 struct ivtv_stream *s = &itv->streams[idx];
856
857 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
858 continue;
859 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
860 ivtv_dma_dec_start(s);
861 else
862 ivtv_dma_enc_start(s);
863 break;
864 }
865 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
866 ivtv_udma_start(itv);
867 }
868 }
869
870 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
871 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
872 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
873 struct ivtv_stream *s = &itv->streams[idx];
874
875 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
876 continue;
877 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
878 ivtv_dma_enc_start(s);
879 break;
880 }
881 }
882
883 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
884 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
885
886 spin_unlock(&itv->dma_reg_lock);
887
888 /* If we've just handled a 'forced' vsync, it's safest to say it
889 * wasn't ours. Another device may have triggered it at just
890 * the right time.
891 */
892 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
893 }
894
895 void ivtv_unfinished_dma(unsigned long arg)
896 {
897 struct ivtv *itv = (struct ivtv *)arg;
898
899 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
900 return;
901 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
902
903 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
904 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
905 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
906 itv->cur_dma_stream = -1;
907 wake_up(&itv->dma_waitq);
908 }