]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/media/pci/ivtv/ivtv-irq.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / ivtv / ivtv-irq.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1a0adaf3
HV
2/* interrupt handling
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
1a0adaf3
HV
7 */
8
9#include "ivtv-driver.h"
1a0adaf3
HV
10#include "ivtv-queue.h"
11#include "ivtv-udma.h"
12#include "ivtv-irq.h"
1a0adaf3
HV
13#include "ivtv-mailbox.h"
14#include "ivtv-vbi.h"
1e13f9e3 15#include "ivtv-yuv.h"
09250193 16#include <media/v4l2-event.h>
1a0adaf3
HV
17
18#define DMA_MAGIC_COOKIE 0x000001fe
19
1a0adaf3
HV
20static void ivtv_dma_dec_start(struct ivtv_stream *s);
21
22static const int ivtv_stream_map[] = {
23 IVTV_ENC_STREAM_TYPE_MPG,
24 IVTV_ENC_STREAM_TYPE_YUV,
25 IVTV_ENC_STREAM_TYPE_PCM,
26 IVTV_ENC_STREAM_TYPE_VBI,
27};
28
4313902e
AW
29static void ivtv_pcm_work_handler(struct ivtv *itv)
30{
31 struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
32 struct ivtv_buffer *buf;
33
34 /* Pass the PCM data to ivtv-alsa */
35
36 while (1) {
37 /*
38 * Users should not be using both the ALSA and V4L2 PCM audio
39 * capture interfaces at the same time. If the user is doing
40 * this, there maybe a buffer in q_io to grab, use, and put
41 * back in rotation.
42 */
43 buf = ivtv_dequeue(s, &s->q_io);
44 if (buf == NULL)
45 buf = ivtv_dequeue(s, &s->q_full);
46 if (buf == NULL)
47 break;
48
49 if (buf->readpos < buf->bytesused)
50 itv->pcm_announce_callback(itv->alsa,
51 (u8 *)(buf->buf + buf->readpos),
52 (size_t)(buf->bytesused - buf->readpos));
53
54 ivtv_enqueue(s, buf, &s->q_free);
55 }
56}
dc02d50a
HV
57
58static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 59{
dc02d50a
HV
60 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
61 struct ivtv_buffer *buf;
dc02d50a
HV
62 int i = 0;
63
bd58df6d 64 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a 65 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
635d62f0 66 s->vdev.v4l2_dev == NULL || !ivtv_use_pio(s)) {
dc02d50a
HV
67 itv->cur_pio_stream = -1;
68 /* trigger PIO complete user interrupt */
69 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
70 return;
71 }
bd58df6d 72 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
805a4392 73 list_for_each_entry(buf, &s->q_dma.list, list) {
37093b1e 74 u32 size = s->sg_processing[i].size & 0x3ffff;
1a0adaf3 75
dc02d50a
HV
76 /* Copy the data from the card to the buffer */
77 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
37093b1e 78 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
dc02d50a
HV
79 }
80 else {
37093b1e 81 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
dc02d50a 82 }
dc02d50a 83 i++;
37093b1e
HV
84 if (i == s->sg_processing_size)
85 break;
dc02d50a
HV
86 }
87 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
88}
89
7bc46560 90void ivtv_irq_work_handler(struct kthread_work *work)
1e13f9e3 91{
7bc46560 92 struct ivtv *itv = container_of(work, struct ivtv, irq_work);
1e13f9e3 93
dc02d50a
HV
94 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
95 ivtv_pio_work_handler(itv);
96
1e13f9e3 97 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 98 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
99
100 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
101 ivtv_yuv_work_handler(itv);
4313902e
AW
102
103 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags))
104 ivtv_pcm_work_handler(itv);
1e13f9e3
HV
105}
106
1a0adaf3
HV
107/* Determine the required DMA size, setup enough buffers in the predma queue and
108 actually copy the data from the card to the buffers in case a PIO transfer is
109 required for this stream.
110 */
111static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
112{
113 struct ivtv *itv = s->itv;
114 struct ivtv_buffer *buf;
1a0adaf3
HV
115 u32 bytes_needed = 0;
116 u32 offset, size;
117 u32 UVoffset = 0, UVsize = 0;
118 int skip_bufs = s->q_predma.buffers;
37093b1e 119 int idx = s->sg_pending_size;
1a0adaf3
HV
120 int rc;
121
122 /* sanity checks */
635d62f0 123 if (s->vdev.v4l2_dev == NULL) {
1a0adaf3
HV
124 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
125 return -1;
126 }
127 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
128 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
129 return -1;
130 }
131
132 /* determine offset, size and PTS for the various streams */
133 switch (s->type) {
134 case IVTV_ENC_STREAM_TYPE_MPG:
135 offset = data[1];
136 size = data[2];
37093b1e 137 s->pending_pts = 0;
1a0adaf3
HV
138 break;
139
140 case IVTV_ENC_STREAM_TYPE_YUV:
141 offset = data[1];
142 size = data[2];
143 UVoffset = data[3];
144 UVsize = data[4];
37093b1e 145 s->pending_pts = ((u64) data[5] << 32) | data[6];
1a0adaf3
HV
146 break;
147
148 case IVTV_ENC_STREAM_TYPE_PCM:
149 offset = data[1] + 12;
150 size = data[2] - 12;
37093b1e 151 s->pending_pts = read_dec(offset - 8) |
1a0adaf3
HV
152 ((u64)(read_dec(offset - 12)) << 32);
153 if (itv->has_cx23415)
154 offset += IVTV_DECODER_OFFSET;
155 break;
156
157 case IVTV_ENC_STREAM_TYPE_VBI:
158 size = itv->vbi.enc_size * itv->vbi.fpi;
159 offset = read_enc(itv->vbi.enc_start - 4) + 12;
160 if (offset == 12) {
161 IVTV_DEBUG_INFO("VBI offset == 0\n");
162 return -1;
163 }
37093b1e 164 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
1a0adaf3
HV
165 break;
166
167 case IVTV_DEC_STREAM_TYPE_VBI:
168 size = read_dec(itv->vbi.dec_start + 4) + 8;
169 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
37093b1e 170 s->pending_pts = 0;
1a0adaf3
HV
171 offset += IVTV_DECODER_OFFSET;
172 break;
173 default:
174 /* shouldn't happen */
175 return -1;
176 }
177
178 /* if this is the start of the DMA then fill in the magic cookie */
51a99c04 179 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
180 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
181 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
37093b1e 182 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
3efb8ab6 183 write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET);
1a0adaf3
HV
184 }
185 else {
37093b1e 186 s->pending_backup = read_enc(offset);
3efb8ab6 187 write_enc_sync(DMA_MAGIC_COOKIE, offset);
1a0adaf3 188 }
37093b1e 189 s->pending_offset = offset;
1a0adaf3
HV
190 }
191
192 bytes_needed = size;
193 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
194 /* The size for the Y samples needs to be rounded upwards to a
195 multiple of the buf_size. The UV samples then start in the
196 next buffer. */
197 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
198 bytes_needed += UVsize;
199 }
200
bd58df6d 201 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
202 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
203
204 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
205 if (rc < 0) { /* Insufficient buffers */
206 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
207 bytes_needed, s->name);
208 return -1;
209 }
ec105a42 210 if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
1a0adaf3
HV
211 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
212 IVTV_WARN("Cause: the application is not reading fast enough.\n");
213 }
214 s->buffers_stolen = rc;
215
37093b1e 216 /* got the buffers, now fill in sg_pending */
1a0adaf3
HV
217 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
218 memset(buf->buf, 0, 128);
805a4392 219 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3
HV
220 if (skip_bufs-- > 0)
221 continue;
37093b1e
HV
222 s->sg_pending[idx].dst = buf->dma_handle;
223 s->sg_pending[idx].src = offset;
224 s->sg_pending[idx].size = s->buf_size;
14d5deba 225 buf->bytesused = min(size, s->buf_size);
f4071b85 226 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 227
1a0adaf3
HV
228 s->q_predma.bytesused += buf->bytesused;
229 size -= buf->bytesused;
230 offset += s->buf_size;
231
232 /* Sync SG buffers */
233 ivtv_buf_sync_for_device(s, buf);
234
235 if (size == 0) { /* YUV */
236 /* process the UV section */
237 offset = UVoffset;
238 size = UVsize;
239 }
240 idx++;
241 }
37093b1e 242 s->sg_pending_size = idx;
1a0adaf3
HV
243 return 0;
244}
245
246static void dma_post(struct ivtv_stream *s)
247{
248 struct ivtv *itv = s->itv;
249 struct ivtv_buffer *buf = NULL;
250 struct list_head *p;
251 u32 offset;
b0510f8d 252 __le32 *u32buf;
1a0adaf3
HV
253 int x = 0;
254
bd58df6d 255 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
256 s->name, s->dma_offset);
257 list_for_each(p, &s->q_dma.list) {
258 buf = list_entry(p, struct ivtv_buffer, list);
b0510f8d 259 u32buf = (__le32 *)buf->buf;
1a0adaf3
HV
260
261 /* Sync Buffer */
262 ivtv_buf_sync_for_cpu(s, buf);
263
51a99c04 264 if (x == 0 && ivtv_use_dma(s)) {
1a0adaf3 265 offset = s->dma_last_offset;
3efb8ab6 266 if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE)
1a0adaf3 267 {
3efb8ab6
HV
268 for (offset = 0; offset < 64; offset++)
269 if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE)
1a0adaf3 270 break;
1a0adaf3
HV
271 offset *= 4;
272 if (offset == 256) {
273 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
274 offset = s->dma_last_offset;
275 }
276 if (s->dma_last_offset != offset)
277 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
278 s->dma_last_offset = offset;
279 }
280 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
281 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
282 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
283 }
284 else {
285 write_enc_sync(0, s->dma_offset);
286 }
287 if (offset) {
288 buf->bytesused -= offset;
289 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
290 }
291 *u32buf = cpu_to_le32(s->dma_backup);
292 }
293 x++;
294 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
295 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
296 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 297 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
298 }
299 if (buf)
300 buf->bytesused += s->dma_last_offset;
301 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
805a4392 302 list_for_each_entry(buf, &s->q_dma.list, list) {
dc02d50a
HV
303 /* Parse and Groom VBI Data */
304 s->q_dma.bytesused -= buf->bytesused;
305 ivtv_process_vbi_data(itv, buf, 0, s->type);
306 s->q_dma.bytesused += buf->bytesused;
307 }
61bb725e 308 if (s->fh == NULL) {
1a0adaf3
HV
309 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
310 return;
311 }
312 }
4313902e 313
1a0adaf3 314 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
4313902e
AW
315
316 if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
317 itv->pcm_announce_callback != NULL) {
318 /*
319 * Set up the work handler to pass the data to ivtv-alsa.
320 *
321 * We just use q_full and let the work handler race with users
322 * making ivtv-fileops.c calls on the PCM device node.
323 *
324 * Users should not be using both the ALSA and V4L2 PCM audio
325 * capture interfaces at the same time. If the user does this,
326 * fragments of data will just go out each interface as they
327 * race for PCM data.
328 */
329 set_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags);
330 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
331 }
332
61bb725e 333 if (s->fh)
1a0adaf3
HV
334 wake_up(&s->waitq);
335}
336
337void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
338{
339 struct ivtv *itv = s->itv;
77aded6b
IA
340 struct yuv_playback_info *yi = &itv->yuv_info;
341 u8 frame = yi->draw_frame;
342 struct yuv_frame_info *f = &yi->new_frame_info[frame];
1a0adaf3 343 struct ivtv_buffer *buf;
77aded6b 344 u32 y_size = 720 * ((f->src_h + 31) & ~31);
1a0adaf3
HV
345 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
346 int y_done = 0;
347 int bytes_written = 0;
1a0adaf3
HV
348 int idx = 0;
349
bd58df6d 350 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
77aded6b
IA
351
352 /* Insert buffer block for YUV if needed */
353 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
354 if (yi->blanking_dmaptr) {
355 s->sg_pending[idx].src = yi->blanking_dmaptr;
356 s->sg_pending[idx].dst = offset;
357 s->sg_pending[idx].size = 720 * 16;
358 }
359 offset += 720 * 16;
360 idx++;
361 }
362
805a4392 363 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3 364 /* YUV UV Offset from Y Buffer */
c240ad00
IA
365 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
366 (bytes_written + buf->bytesused) >= y_size) {
367 s->sg_pending[idx].src = buf->dma_handle;
368 s->sg_pending[idx].dst = offset;
369 s->sg_pending[idx].size = y_size - bytes_written;
1a0adaf3 370 offset = uv_offset;
c240ad00
IA
371 if (s->sg_pending[idx].size != buf->bytesused) {
372 idx++;
373 s->sg_pending[idx].src =
374 buf->dma_handle + s->sg_pending[idx - 1].size;
375 s->sg_pending[idx].dst = offset;
376 s->sg_pending[idx].size =
377 buf->bytesused - s->sg_pending[idx - 1].size;
378 offset += s->sg_pending[idx].size;
379 }
1a0adaf3 380 y_done = 1;
c240ad00
IA
381 } else {
382 s->sg_pending[idx].src = buf->dma_handle;
383 s->sg_pending[idx].dst = offset;
384 s->sg_pending[idx].size = buf->bytesused;
385 offset += buf->bytesused;
1a0adaf3 386 }
1a0adaf3
HV
387 bytes_written += buf->bytesused;
388
389 /* Sync SG buffers */
390 ivtv_buf_sync_for_device(s, buf);
391 idx++;
392 }
37093b1e 393 s->sg_pending_size = idx;
1a0adaf3
HV
394
395 /* Sync Hardware SG List of buffers */
396 ivtv_stream_sync_for_device(s);
d832672f
HV
397 if (lock) {
398 unsigned long flags = 0;
399
1a0adaf3 400 spin_lock_irqsave(&itv->dma_reg_lock, flags);
d832672f
HV
401 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
402 ivtv_dma_dec_start(s);
403 else
404 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3 405 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
d832672f
HV
406 } else {
407 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
408 ivtv_dma_dec_start(s);
409 else
410 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
411 }
1a0adaf3
HV
412}
413
37093b1e
HV
414static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
415{
416 struct ivtv *itv = s->itv;
417
418 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
419 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
420 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
421 s->sg_processed++;
422 /* Sync Hardware SG List of buffers */
423 ivtv_stream_sync_for_device(s);
424 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
425 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
2968e313 426 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 427 add_timer(&itv->dma_timer);
37093b1e
HV
428}
429
430static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
431{
432 struct ivtv *itv = s->itv;
433
434 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
435 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
436 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
437 s->sg_processed++;
438 /* Sync Hardware SG List of buffers */
439 ivtv_stream_sync_for_device(s);
440 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
441 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
2968e313 442 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 443 add_timer(&itv->dma_timer);
37093b1e
HV
444}
445
1a0adaf3
HV
446/* start the encoder DMA */
447static void ivtv_dma_enc_start(struct ivtv_stream *s)
448{
449 struct ivtv *itv = s->itv;
450 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
451 int i;
452
bd58df6d 453 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 454
1a0adaf3
HV
455 if (s->q_predma.bytesused)
456 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
457
458 if (ivtv_use_dma(s))
37093b1e 459 s->sg_pending[s->sg_pending_size - 1].size += 256;
1a0adaf3
HV
460
461 /* If this is an MPEG stream, and VBI data is also pending, then append the
462 VBI DMA to the MPEG DMA and transfer both sets of data at once.
463
464 VBI DMA is a second class citizen compared to MPEG and mixing them together
465 will confuse the firmware (the end of a VBI DMA is seen as the end of a
466 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
467 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
468 use. This way no conflicts occur. */
469 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
37093b1e
HV
470 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
471 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
1a0adaf3 472 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a 473 if (ivtv_use_dma(s_vbi))
37093b1e
HV
474 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
475 for (i = 0; i < s_vbi->sg_pending_size; i++) {
476 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
1a0adaf3 477 }
37093b1e
HV
478 s_vbi->dma_offset = s_vbi->pending_offset;
479 s_vbi->sg_pending_size = 0;
f4071b85 480 s_vbi->dma_xfer_cnt++;
1a0adaf3 481 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
6b1e5676 482 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
1a0adaf3
HV
483 }
484
f4071b85 485 s->dma_xfer_cnt++;
b0510f8d 486 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
487 s->sg_processing_size = s->sg_pending_size;
488 s->sg_pending_size = 0;
489 s->sg_processed = 0;
490 s->dma_offset = s->pending_offset;
491 s->dma_backup = s->pending_backup;
492 s->dma_pts = s->pending_pts;
dd1e729d 493
dc02d50a 494 if (ivtv_use_pio(s)) {
dc02d50a
HV
495 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
496 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
497 set_bit(IVTV_F_I_PIO, &itv->i_flags);
498 itv->cur_pio_stream = s->type;
499 }
500 else {
37093b1e
HV
501 itv->dma_retries = 0;
502 ivtv_dma_enc_start_xfer(s);
dc02d50a
HV
503 set_bit(IVTV_F_I_DMA, &itv->i_flags);
504 itv->cur_dma_stream = s->type;
dc02d50a 505 }
1a0adaf3
HV
506}
507
508static void ivtv_dma_dec_start(struct ivtv_stream *s)
509{
510 struct ivtv *itv = s->itv;
511
512 if (s->q_predma.bytesused)
513 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
37093b1e 514 s->dma_xfer_cnt++;
b0510f8d 515 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
516 s->sg_processing_size = s->sg_pending_size;
517 s->sg_pending_size = 0;
518 s->sg_processed = 0;
519
bd58df6d 520 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
37093b1e
HV
521 itv->dma_retries = 0;
522 ivtv_dma_dec_start_xfer(s);
1a0adaf3
HV
523 set_bit(IVTV_F_I_DMA, &itv->i_flags);
524 itv->cur_dma_stream = s->type;
1a0adaf3
HV
525}
526
527static void ivtv_irq_dma_read(struct ivtv *itv)
528{
529 struct ivtv_stream *s = NULL;
530 struct ivtv_buffer *buf;
37093b1e 531 int hw_stream_type = 0;
1a0adaf3 532
bd58df6d 533 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
9b2e5c6b
HV
534
535 del_timer(&itv->dma_timer);
536
537 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
37093b1e 538 return;
37093b1e 539
1a0adaf3 540 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
37093b1e
HV
541 s = &itv->streams[itv->cur_dma_stream];
542 ivtv_stream_sync_for_cpu(s);
543
544 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
545 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
546 read_reg(IVTV_REG_DMASTATUS),
547 s->sg_processed, s->sg_processing_size, itv->dma_retries);
548 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
549 if (itv->dma_retries == 3) {
e17a06ba 550 /* Too many retries, give up on this frame */
37093b1e 551 itv->dma_retries = 0;
e17a06ba 552 s->sg_processed = s->sg_processing_size;
37093b1e
HV
553 }
554 else {
555 /* Retry, starting with the first xfer segment.
556 Just retrying the current segment is not sufficient. */
557 s->sg_processed = 0;
558 itv->dma_retries++;
559 }
1a0adaf3 560 }
37093b1e
HV
561 if (s->sg_processed < s->sg_processing_size) {
562 /* DMA next buffer */
563 ivtv_dma_dec_start_xfer(s);
564 return;
1a0adaf3 565 }
37093b1e
HV
566 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
567 hw_stream_type = 2;
bd58df6d 568 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3 569
1a0adaf3
HV
570 /* For some reason must kick the firmware, like PIO mode,
571 I think this tells the firmware we are done and the size
572 of the xfer so it can calculate what we need next.
573 I think we can do this part ourselves but would have to
574 fully calculate xfer info ourselves and not use interrupts
575 */
576 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
577 hw_stream_type);
578
579 /* Free last DMA call */
580 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
581 ivtv_buf_sync_for_cpu(s, buf);
582 ivtv_enqueue(s, buf, &s->q_free);
583 }
584 wake_up(&s->waitq);
585 }
586 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
587 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
588 itv->cur_dma_stream = -1;
589 wake_up(&itv->dma_waitq);
590}
591
592static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
593{
594 u32 data[CX2341X_MBOX_MAX_DATA];
595 struct ivtv_stream *s;
596
587808d5 597 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
37093b1e 598 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
9b2e5c6b
HV
599
600 del_timer(&itv->dma_timer);
601
602 if (itv->cur_dma_stream < 0)
1a0adaf3 603 return;
9b2e5c6b 604
37093b1e
HV
605 s = &itv->streams[itv->cur_dma_stream];
606 ivtv_stream_sync_for_cpu(s);
607
1a0adaf3 608 if (data[0] & 0x18) {
37093b1e
HV
609 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
610 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
1a0adaf3 611 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
37093b1e 612 if (itv->dma_retries == 3) {
e17a06ba 613 /* Too many retries, give up on this frame */
37093b1e 614 itv->dma_retries = 0;
e17a06ba 615 s->sg_processed = s->sg_processing_size;
37093b1e
HV
616 }
617 else {
618 /* Retry, starting with the first xfer segment.
619 Just retrying the current segment is not sufficient. */
620 s->sg_processed = 0;
621 itv->dma_retries++;
622 }
1a0adaf3 623 }
37093b1e
HV
624 if (s->sg_processed < s->sg_processing_size) {
625 /* DMA next buffer */
626 ivtv_dma_enc_start_xfer(s);
627 return;
628 }
1a0adaf3
HV
629 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
630 itv->cur_dma_stream = -1;
631 dma_post(s);
1a0adaf3 632 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
1a0adaf3 633 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
1a0adaf3 634 dma_post(s);
1a0adaf3 635 }
37093b1e
HV
636 s->sg_processing_size = 0;
637 s->sg_processed = 0;
1a0adaf3
HV
638 wake_up(&itv->dma_waitq);
639}
640
dc02d50a
HV
641static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
642{
643 struct ivtv_stream *s;
644
645 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
646 itv->cur_pio_stream = -1;
647 return;
648 }
649 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 650 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
651 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
652 itv->cur_pio_stream = -1;
653 dma_post(s);
654 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
655 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
656 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
657 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
658 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
659 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
660 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
661 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
dc02d50a 662 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dc02d50a 663 dma_post(s);
dc02d50a
HV
664 }
665 wake_up(&itv->dma_waitq);
666}
667
1a0adaf3
HV
668static void ivtv_irq_dma_err(struct ivtv *itv)
669{
670 u32 data[CX2341X_MBOX_MAX_DATA];
d213ad08 671 u32 status;
1a0adaf3
HV
672
673 del_timer(&itv->dma_timer);
d213ad08 674
587808d5 675 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
d213ad08 676 status = read_reg(IVTV_REG_DMASTATUS);
1a0adaf3 677 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
d213ad08
M
678 status, itv->cur_dma_stream);
679 /*
680 * We do *not* write back to the IVTV_REG_DMASTATUS register to
681 * clear the error status, if either the encoder write (0x02) or
682 * decoder read (0x01) bus master DMA operation do not indicate
683 * completed. We can race with the DMA engine, which may have
684 * transitioned to completed status *after* we read the register.
685 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
686 * DMA engine has completed, will cause the DMA engine to stop working.
687 */
688 status &= 0x3;
689 if (status == 0x3)
690 write_reg(status, IVTV_REG_DMASTATUS);
691
1a0adaf3
HV
692 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
693 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
694 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
695
d213ad08
M
696 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
697 /* retry */
698 /*
699 * FIXME - handle cases of DMA error similar to
700 * encoder below, except conditioned on status & 0x1
701 */
1a0adaf3 702 ivtv_dma_dec_start(s);
d213ad08
M
703 return;
704 } else {
705 if ((status & 0x2) == 0) {
706 /*
707 * CX2341x Bus Master DMA write is ongoing.
708 * Reset the timer and let it complete.
709 */
710 itv->dma_timer.expires =
711 jiffies + msecs_to_jiffies(600);
712 add_timer(&itv->dma_timer);
713 return;
714 }
715
716 if (itv->dma_retries < 3) {
717 /*
718 * CX2341x Bus Master DMA write has ended.
719 * Retry the write, starting with the first
720 * xfer segment. Just retrying the current
721 * segment is not sufficient.
722 */
723 s->sg_processed = 0;
724 itv->dma_retries++;
725 ivtv_dma_enc_start_xfer(s);
726 return;
727 }
728 /* Too many retries, give up on this one */
729 }
730
1a0adaf3 731 }
37093b1e
HV
732 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
733 ivtv_udma_start(itv);
734 return;
735 }
1a0adaf3
HV
736 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
737 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
738 itv->cur_dma_stream = -1;
739 wake_up(&itv->dma_waitq);
740}
741
742static void ivtv_irq_enc_start_cap(struct ivtv *itv)
743{
744 u32 data[CX2341X_MBOX_MAX_DATA];
745 struct ivtv_stream *s;
746
747 /* Get DMA destination and size arguments from card */
587808d5 748 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
bd58df6d 749 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
750
751 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
752 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
753 data[0], data[1], data[2]);
754 return;
755 }
1a0adaf3
HV
756 s = &itv->streams[ivtv_stream_map[data[0]]];
757 if (!stream_enc_dma_append(s, data)) {
dc02d50a 758 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
759 }
760}
761
762static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
763{
1a0adaf3
HV
764 u32 data[CX2341X_MBOX_MAX_DATA];
765 struct ivtv_stream *s;
766
bd58df6d 767 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
768 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
769
d526afe0 770 if (!stream_enc_dma_append(s, data))
dc02d50a 771 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
772}
773
dc02d50a 774static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
775{
776 u32 data[CX2341X_MBOX_MAX_DATA];
777 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
778
bd58df6d 779 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
780 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
781 !stream_enc_dma_append(s, data)) {
dc02d50a 782 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
783 }
784}
785
786static void ivtv_irq_dec_data_req(struct ivtv *itv)
787{
788 u32 data[CX2341X_MBOX_MAX_DATA];
789 struct ivtv_stream *s;
790
791 /* YUV or MPG */
1a0adaf3
HV
792
793 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
587808d5 794 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
77aded6b
IA
795 itv->dma_data_req_size =
796 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
797 itv->dma_data_req_offset = data[1];
798 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
799 ivtv_yuv_frame_complete(itv);
1a0adaf3
HV
800 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
801 }
802 else {
587808d5 803 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
14d5deba 804 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
1a0adaf3
HV
805 itv->dma_data_req_offset = data[1];
806 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
807 }
bd58df6d 808 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
809 itv->dma_data_req_offset, itv->dma_data_req_size);
810 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
811 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
812 }
813 else {
77aded6b
IA
814 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
815 ivtv_yuv_setup_stream_frame(itv);
1a0adaf3
HV
816 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
817 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
818 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
819 }
820}
821
822static void ivtv_irq_vsync(struct ivtv *itv)
823{
824 /* The vsync interrupt is unusual in that it won't clear until
825 * the end of the first line for the current field, at which
826 * point it clears itself. This can result in repeated vsync
827 * interrupts, or a missed vsync. Read some of the registers
828 * to determine the line being displayed and ensure we handle
829 * one vsync per frame.
830 */
4e1af31a 831 unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
a3e5f5e2 832 struct yuv_playback_info *yi = &itv->yuv_info;
2bd7ac55 833 int last_dma_frame = atomic_read(&yi->next_dma_frame);
3b5c1c8e 834 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
1a0adaf3
HV
835
836 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
837
3b5c1c8e
IA
838 if (((frame ^ f->sync_field) == 0 &&
839 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
840 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
1a0adaf3
HV
841 int next_dma_frame = last_dma_frame;
842
3b5c1c8e 843 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
a3e5f5e2 844 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
bfd7beac
IA
845 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
846 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
847 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
848 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
a3e5f5e2
IA
849 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
850 atomic_set(&yi->next_dma_frame, next_dma_frame);
851 yi->fields_lapsed = -1;
2bd7ac55 852 yi->running = 1;
bfd7beac 853 }
1a0adaf3
HV
854 }
855 }
a158f355 856 if (frame != (itv->last_vsync_field & 1)) {
09250193
HV
857 static const struct v4l2_event evtop = {
858 .type = V4L2_EVENT_VSYNC,
859 .u.vsync.field = V4L2_FIELD_TOP,
860 };
861 static const struct v4l2_event evbottom = {
862 .type = V4L2_EVENT_VSYNC,
863 .u.vsync.field = V4L2_FIELD_BOTTOM,
864 };
1a0adaf3
HV
865 struct ivtv_stream *s = ivtv_get_output_stream(itv);
866
a158f355 867 itv->last_vsync_field += 1;
1a0adaf3
HV
868 if (frame == 0) {
869 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
870 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
871 }
872 else {
873 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
874 }
875 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
876 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
877 wake_up(&itv->event_waitq);
09250193
HV
878 if (s)
879 wake_up(&s->waitq);
1a0adaf3 880 }
635d62f0
HV
881 if (s && s->vdev.v4l2_dev)
882 v4l2_event_queue(&s->vdev, frame ? &evtop : &evbottom);
1a0adaf3 883 wake_up(&itv->vsync_waitq);
1a0adaf3
HV
884
885 /* Send VBI to saa7127 */
2f3a9893
HV
886 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
887 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
888 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
889 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
1e13f9e3 890 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 891 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 892 }
1a0adaf3
HV
893
894 /* Check if we need to update the yuv registers */
2bd7ac55 895 if (yi->running && (yi->yuv_forced_update || f->update)) {
3b5c1c8e 896 if (!f->update) {
2bd7ac55
IA
897 last_dma_frame =
898 (u8)(atomic_read(&yi->next_dma_frame) -
899 1) % IVTV_YUV_BUFFERS;
3b5c1c8e
IA
900 f = &yi->new_frame_info[last_dma_frame];
901 }
1a0adaf3 902
3b5c1c8e 903 if (f->src_w) {
a3e5f5e2 904 yi->update_frame = last_dma_frame;
3b5c1c8e 905 f->update = 0;
a3e5f5e2 906 yi->yuv_forced_update = 0;
1e13f9e3 907 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 908 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
909 }
910 }
bfd7beac 911
a3e5f5e2 912 yi->fields_lapsed++;
1a0adaf3
HV
913 }
914}
915
2f3a9893 916#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
1a0adaf3
HV
917
918irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
919{
920 struct ivtv *itv = (struct ivtv *)dev_id;
921 u32 combo;
922 u32 stat;
923 int i;
924 u8 vsync_force = 0;
925
926 spin_lock(&itv->dma_reg_lock);
927 /* get contents of irq status register */
928 stat = read_reg(IVTV_REG_IRQSTATUS);
929
930 combo = ~itv->irqmask & stat;
931
932 /* Clear out IRQ */
933 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
934
935 if (0 == combo) {
936 /* The vsync interrupt is unusual and clears itself. If we
937 * took too long, we may have missed it. Do some checks
938 */
939 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
940 /* vsync is enabled, see if we're in a new field */
4e1af31a
AW
941 if ((itv->last_vsync_field & 1) !=
942 (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
1a0adaf3 943 /* New field, looks like we missed it */
4e1af31a
AW
944 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
945 read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
1a0adaf3
HV
946 vsync_force = 1;
947 }
948 }
949
950 if (!vsync_force) {
951 /* No Vsync expected, wasn't for us */
952 spin_unlock(&itv->dma_reg_lock);
953 return IRQ_NONE;
954 }
955 }
956
957 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
958 these messages */
959 if (combo & ~0xff6d0400)
bd58df6d 960 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
961
962 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 963 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
964 }
965
966 if (combo & IVTV_IRQ_DMA_READ) {
967 ivtv_irq_dma_read(itv);
968 }
969
970 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
971 ivtv_irq_enc_dma_complete(itv);
972 }
973
dc02d50a
HV
974 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
975 ivtv_irq_enc_pio_complete(itv);
976 }
977
1a0adaf3
HV
978 if (combo & IVTV_IRQ_DMA_ERR) {
979 ivtv_irq_dma_err(itv);
980 }
981
982 if (combo & IVTV_IRQ_ENC_START_CAP) {
983 ivtv_irq_enc_start_cap(itv);
984 }
985
986 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
987 ivtv_irq_enc_vbi_cap(itv);
988 }
989
990 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 991 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
992 }
993
994 if (combo & IVTV_IRQ_ENC_EOS) {
995 IVTV_DEBUG_IRQ("ENC EOS\n");
996 set_bit(IVTV_F_I_EOS, &itv->i_flags);
fd8b281a 997 wake_up(&itv->eos_waitq);
1a0adaf3
HV
998 }
999
1000 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
1001 ivtv_irq_dec_data_req(itv);
1002 }
1003
1004 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
1005 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
1006 ivtv_irq_vsync(itv);
1007 }
1008
1009 if (combo & IVTV_IRQ_ENC_VIM_RST) {
1010 IVTV_DEBUG_IRQ("VIM RST\n");
1011 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
1012 }
1013
1014 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
1015 IVTV_DEBUG_INFO("Stereo mode changed\n");
1016 }
1017
1018 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 1019 itv->irq_rr_idx++;
1a0adaf3 1020 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 1021 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
1022 struct ivtv_stream *s = &itv->streams[idx];
1023
1024 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
1025 continue;
1026 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
1027 ivtv_dma_dec_start(s);
1028 else
1029 ivtv_dma_enc_start(s);
1030 break;
1031 }
b6e436b2
IA
1032
1033 if (i == IVTV_MAX_STREAMS &&
1034 test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
1a0adaf3 1035 ivtv_udma_start(itv);
1a0adaf3
HV
1036 }
1037
dc02d50a 1038 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 1039 itv->irq_rr_idx++;
dc02d50a 1040 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 1041 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
1042 struct ivtv_stream *s = &itv->streams[idx];
1043
1044 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
1045 continue;
1046 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
1047 ivtv_dma_enc_start(s);
1048 break;
1049 }
1050 }
1051
2f3a9893 1052 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
3989144f 1053 kthread_queue_work(&itv->irq_worker, &itv->irq_work);
2f3a9893 1054 }
dc02d50a 1055
1a0adaf3
HV
1056 spin_unlock(&itv->dma_reg_lock);
1057
1058 /* If we've just handled a 'forced' vsync, it's safest to say it
1059 * wasn't ours. Another device may have triggered it at just
1060 * the right time.
1061 */
1062 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
1063}
1064
162e6376 1065void ivtv_unfinished_dma(struct timer_list *t)
1a0adaf3 1066{
162e6376 1067 struct ivtv *itv = from_timer(itv, t, dma_timer);
1a0adaf3
HV
1068
1069 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
1070 return;
1071 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
1072
1073 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1074 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1075 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1076 itv->cur_dma_stream = -1;
1077 wake_up(&itv->dma_waitq);
1078}