1 // SPDX-License-Identifier: GPL-2.0-or-later
3 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
5 Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
11 #include <linux/init.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/bitmap.h>
16 #include <linux/i2c.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
21 #include <media/v4l2-common.h>
22 #include <media/v4l2-ioctl.h>
23 #include <media/drv-intf/msp3400.h>
24 #include <media/tuner.h>
26 #include "cx231xx-vbi.h"
28 static inline void print_err_status(struct cx231xx
*dev
, int packet
, int status
)
30 char *errmsg
= "Unknown";
34 errmsg
= "unlinked synchronously";
37 errmsg
= "unlinked asynchronously";
40 errmsg
= "Buffer error (overrun)";
43 errmsg
= "Stalled (device not responding)";
46 errmsg
= "Babble (bad cable?)";
49 errmsg
= "Bit-stuff error (bad cable?)";
52 errmsg
= "CRC/Timeout (could be anything)";
55 errmsg
= "Device does not respond";
60 "URB status %d [%s].\n", status
, errmsg
);
63 "URB packet %d, status %d [%s].\n",
64 packet
, status
, errmsg
);
69 * Controls the isoc copy of each urb packet
71 static inline int cx231xx_isoc_vbi_copy(struct cx231xx
*dev
, struct urb
*urb
)
73 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
75 unsigned char *p_buffer
;
76 u32 bytes_parsed
= 0, buffer_size
= 0;
82 if (dev
->state
& DEV_DISCONNECTED
)
85 if (urb
->status
< 0) {
86 print_err_status(dev
, -1, urb
->status
);
87 if (urb
->status
== -ENOENT
)
91 /* get buffer pointer and length */
92 p_buffer
= urb
->transfer_buffer
;
93 buffer_size
= urb
->actual_length
;
95 if (buffer_size
> 0) {
98 if (dma_q
->is_partial_line
) {
99 /* Handle the case where we were working on a partial
101 sav_eav
= dma_q
->last_sav
;
103 /* Check for a SAV/EAV overlapping the
106 sav_eav
= cx231xx_find_boundary_SAV_EAV(p_buffer
,
112 /* Get the first line if we have some portion of an SAV/EAV from
113 the last buffer or a partial line */
115 bytes_parsed
+= cx231xx_get_vbi_line(dev
, dma_q
,
116 sav_eav
, /* SAV/EAV */
117 p_buffer
+ bytes_parsed
, /* p_buffer */
118 buffer_size
- bytes_parsed
); /* buffer size */
121 /* Now parse data that is completely in this buffer */
122 dma_q
->is_partial_line
= 0;
124 while (bytes_parsed
< buffer_size
) {
127 sav_eav
= cx231xx_find_next_SAV_EAV(
128 p_buffer
+ bytes_parsed
, /* p_buffer */
129 buffer_size
- bytes_parsed
, /* buffer size */
130 &bytes_used
); /* bytes used to get SAV/EAV */
132 bytes_parsed
+= bytes_used
;
135 if (sav_eav
&& (bytes_parsed
< buffer_size
)) {
136 bytes_parsed
+= cx231xx_get_vbi_line(dev
,
137 dma_q
, sav_eav
, /* SAV/EAV */
138 p_buffer
+bytes_parsed
, /* p_buffer */
139 buffer_size
-bytes_parsed
);/*buf size*/
143 /* Save the last four bytes of the buffer so we can
144 check the buffer boundary condition next time */
145 memcpy(dma_q
->partial_buf
, p_buffer
+ buffer_size
- 4, 4);
152 /* ------------------------------------------------------------------
154 ------------------------------------------------------------------*/
157 vbi_buffer_setup(struct videobuf_queue
*vq
, unsigned int *count
,
160 struct cx231xx_fh
*fh
= vq
->priv_data
;
161 struct cx231xx
*dev
= fh
->dev
;
164 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
165 PAL_VBI_LINES
: NTSC_VBI_LINES
);
167 *size
= (dev
->width
* height
* 2 * 2);
169 *count
= CX231XX_DEF_VBI_BUF
;
171 if (*count
< CX231XX_MIN_BUF
)
172 *count
= CX231XX_MIN_BUF
;
177 /* This is called *without* dev->slock held; please keep it that way */
178 static void free_buffer(struct videobuf_queue
*vq
, struct cx231xx_buffer
*buf
)
180 struct cx231xx_fh
*fh
= vq
->priv_data
;
181 struct cx231xx
*dev
= fh
->dev
;
182 unsigned long flags
= 0;
183 BUG_ON(in_interrupt());
185 /* We used to wait for the buffer to finish here, but this didn't work
186 because, as we were keeping the state as VIDEOBUF_QUEUED,
187 videobuf_queue_cancel marked it as finished for us.
188 (Also, it could wedge forever if the hardware was misconfigured.)
190 This should be safe; by the time we get here, the buffer isn't
191 queued anymore. If we ever start marking the buffers as
192 VIDEOBUF_ACTIVE, it won't be, though.
194 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
195 if (dev
->vbi_mode
.bulk_ctl
.buf
== buf
)
196 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
197 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
199 videobuf_vmalloc_free(&buf
->vb
);
200 buf
->vb
.state
= VIDEOBUF_NEEDS_INIT
;
204 vbi_buffer_prepare(struct videobuf_queue
*vq
, struct videobuf_buffer
*vb
,
205 enum v4l2_field field
)
207 struct cx231xx_fh
*fh
= vq
->priv_data
;
208 struct cx231xx_buffer
*buf
=
209 container_of(vb
, struct cx231xx_buffer
, vb
);
210 struct cx231xx
*dev
= fh
->dev
;
211 int rc
= 0, urb_init
= 0;
214 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
215 PAL_VBI_LINES
: NTSC_VBI_LINES
);
216 buf
->vb
.size
= ((dev
->width
<< 1) * height
* 2);
218 if (0 != buf
->vb
.baddr
&& buf
->vb
.bsize
< buf
->vb
.size
)
221 buf
->vb
.width
= dev
->width
;
222 buf
->vb
.height
= height
;
223 buf
->vb
.field
= field
;
224 buf
->vb
.field
= V4L2_FIELD_SEQ_TB
;
226 if (VIDEOBUF_NEEDS_INIT
== buf
->vb
.state
) {
227 rc
= videobuf_iolock(vq
, &buf
->vb
, NULL
);
232 if (!dev
->vbi_mode
.bulk_ctl
.num_bufs
)
236 rc
= cx231xx_init_vbi_isoc(dev
, CX231XX_NUM_VBI_PACKETS
,
237 CX231XX_NUM_VBI_BUFS
,
238 dev
->vbi_mode
.alt_max_pkt_size
[0],
239 cx231xx_isoc_vbi_copy
);
244 buf
->vb
.state
= VIDEOBUF_PREPARED
;
248 free_buffer(vq
, buf
);
253 vbi_buffer_queue(struct videobuf_queue
*vq
, struct videobuf_buffer
*vb
)
255 struct cx231xx_buffer
*buf
=
256 container_of(vb
, struct cx231xx_buffer
, vb
);
257 struct cx231xx_fh
*fh
= vq
->priv_data
;
258 struct cx231xx
*dev
= fh
->dev
;
259 struct cx231xx_dmaqueue
*vidq
= &dev
->vbi_mode
.vidq
;
261 buf
->vb
.state
= VIDEOBUF_QUEUED
;
262 list_add_tail(&buf
->vb
.queue
, &vidq
->active
);
266 static void vbi_buffer_release(struct videobuf_queue
*vq
,
267 struct videobuf_buffer
*vb
)
269 struct cx231xx_buffer
*buf
=
270 container_of(vb
, struct cx231xx_buffer
, vb
);
273 free_buffer(vq
, buf
);
276 const struct videobuf_queue_ops cx231xx_vbi_qops
= {
277 .buf_setup
= vbi_buffer_setup
,
278 .buf_prepare
= vbi_buffer_prepare
,
279 .buf_queue
= vbi_buffer_queue
,
280 .buf_release
= vbi_buffer_release
,
283 /* ------------------------------------------------------------------
285 ------------------------------------------------------------------*/
288 * IRQ callback, called by URB callback
290 static void cx231xx_irq_vbi_callback(struct urb
*urb
)
292 struct cx231xx_dmaqueue
*dma_q
= urb
->context
;
293 struct cx231xx_video_mode
*vmode
=
294 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
295 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
298 switch (urb
->status
) {
299 case 0: /* success */
300 case -ETIMEDOUT
: /* NAK */
302 case -ECONNRESET
: /* kill */
308 "urb completion error %d.\n", urb
->status
);
312 /* Copy data from URB */
313 spin_lock_irqsave(&dev
->vbi_mode
.slock
, flags
);
314 dev
->vbi_mode
.bulk_ctl
.bulk_copy(dev
, urb
);
315 spin_unlock_irqrestore(&dev
->vbi_mode
.slock
, flags
);
320 urb
->status
= usb_submit_urb(urb
, GFP_ATOMIC
);
322 dev_err(dev
->dev
, "urb resubmit failed (error=%i)\n",
328 * Stop and Deallocate URBs
330 void cx231xx_uninit_vbi_isoc(struct cx231xx
*dev
)
335 dev_dbg(dev
->dev
, "called cx231xx_uninit_vbi_isoc\n");
337 dev
->vbi_mode
.bulk_ctl
.nfields
= -1;
338 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
339 urb
= dev
->vbi_mode
.bulk_ctl
.urb
[i
];
341 if (!irqs_disabled())
346 if (dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
348 kfree(dev
->vbi_mode
.bulk_ctl
.
350 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
354 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = NULL
;
356 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] = NULL
;
359 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
360 kfree(dev
->vbi_mode
.bulk_ctl
.transfer_buffer
);
362 dev
->vbi_mode
.bulk_ctl
.urb
= NULL
;
363 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
= NULL
;
364 dev
->vbi_mode
.bulk_ctl
.num_bufs
= 0;
366 cx231xx_capture_start(dev
, 0, Vbi
);
368 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc
);
371 * Allocate URBs and start IRQ
373 int cx231xx_init_vbi_isoc(struct cx231xx
*dev
, int max_packets
,
374 int num_bufs
, int max_pkt_size
,
375 int (*bulk_copy
) (struct cx231xx
*dev
,
378 struct cx231xx_dmaqueue
*dma_q
= &dev
->vbi_mode
.vidq
;
384 dev_dbg(dev
->dev
, "called cx231xx_vbi_isoc\n");
386 /* De-allocates all pending stuff */
387 cx231xx_uninit_vbi_isoc(dev
);
389 /* clear if any halt */
390 usb_clear_halt(dev
->udev
,
391 usb_rcvbulkpipe(dev
->udev
,
392 dev
->vbi_mode
.end_point_addr
));
394 dev
->vbi_mode
.bulk_ctl
.bulk_copy
= bulk_copy
;
395 dev
->vbi_mode
.bulk_ctl
.num_bufs
= num_bufs
;
397 dma_q
->is_partial_line
= 0;
399 dma_q
->current_field
= -1;
400 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
401 dma_q
->lines_per_field
= ((dev
->norm
& V4L2_STD_625_50
) ?
402 PAL_VBI_LINES
: NTSC_VBI_LINES
);
403 dma_q
->lines_completed
= 0;
404 for (i
= 0; i
< 8; i
++)
405 dma_q
->partial_buf
[i
] = 0;
407 dev
->vbi_mode
.bulk_ctl
.urb
= kcalloc(num_bufs
, sizeof(void *),
409 if (!dev
->vbi_mode
.bulk_ctl
.urb
) {
411 "cannot alloc memory for usb buffers\n");
415 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
=
416 kcalloc(num_bufs
, sizeof(void *), GFP_KERNEL
);
417 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
) {
419 "cannot allocate memory for usbtransfer\n");
420 kfree(dev
->vbi_mode
.bulk_ctl
.urb
);
424 dev
->vbi_mode
.bulk_ctl
.max_pkt_size
= max_pkt_size
;
425 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
427 sb_size
= max_packets
* dev
->vbi_mode
.bulk_ctl
.max_pkt_size
;
429 /* allocate urbs and transfer buffers */
430 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
432 urb
= usb_alloc_urb(0, GFP_KERNEL
);
434 cx231xx_uninit_vbi_isoc(dev
);
437 dev
->vbi_mode
.bulk_ctl
.urb
[i
] = urb
;
438 urb
->transfer_flags
= 0;
440 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
] =
441 kzalloc(sb_size
, GFP_KERNEL
);
442 if (!dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
]) {
444 "unable to allocate %i bytes for transfer buffer %i%s\n",
446 in_interrupt() ? " while in int" : "");
447 cx231xx_uninit_vbi_isoc(dev
);
451 pipe
= usb_rcvbulkpipe(dev
->udev
, dev
->vbi_mode
.end_point_addr
);
452 usb_fill_bulk_urb(urb
, dev
->udev
, pipe
,
453 dev
->vbi_mode
.bulk_ctl
.transfer_buffer
[i
],
454 sb_size
, cx231xx_irq_vbi_callback
, dma_q
);
457 init_waitqueue_head(&dma_q
->wq
);
459 /* submit urbs and enables IRQ */
460 for (i
= 0; i
< dev
->vbi_mode
.bulk_ctl
.num_bufs
; i
++) {
461 rc
= usb_submit_urb(dev
->vbi_mode
.bulk_ctl
.urb
[i
], GFP_ATOMIC
);
464 "submit of urb %i failed (error=%i)\n", i
, rc
);
465 cx231xx_uninit_vbi_isoc(dev
);
470 cx231xx_capture_start(dev
, 1, Vbi
);
474 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc
);
476 u32
cx231xx_get_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
477 u8 sav_eav
, u8
*p_buffer
, u32 buffer_size
)
479 u32 bytes_copied
= 0;
480 int current_field
= -1;
495 if (current_field
< 0)
498 dma_q
->last_sav
= sav_eav
;
501 cx231xx_copy_vbi_line(dev
, dma_q
, p_buffer
, buffer_size
,
508 * Announces that a buffer were filled and request the next
510 static inline void vbi_buffer_filled(struct cx231xx
*dev
,
511 struct cx231xx_dmaqueue
*dma_q
,
512 struct cx231xx_buffer
*buf
)
514 /* Advice that buffer was filled */
515 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
517 buf
->vb
.state
= VIDEOBUF_DONE
;
518 buf
->vb
.field_count
++;
519 buf
->vb
.ts
= ktime_get_ns();
521 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
523 list_del(&buf
->vb
.queue
);
524 wake_up(&buf
->vb
.done
);
527 u32
cx231xx_copy_vbi_line(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
528 u8
*p_line
, u32 length
, int field_number
)
531 struct cx231xx_buffer
*buf
;
532 u32 _line_size
= dev
->width
* 2;
534 if (dma_q
->current_field
== -1) {
535 /* Just starting up */
536 cx231xx_reset_vbi_buffer(dev
, dma_q
);
539 if (dma_q
->current_field
!= field_number
)
540 dma_q
->lines_completed
= 0;
542 /* get the buffer pointer */
543 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
545 /* Remember the field number for next time */
546 dma_q
->current_field
= field_number
;
548 bytes_to_copy
= dma_q
->bytes_left_in_line
;
549 if (bytes_to_copy
> length
)
550 bytes_to_copy
= length
;
552 if (dma_q
->lines_completed
>= dma_q
->lines_per_field
) {
553 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
554 dma_q
->is_partial_line
=
555 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
559 dma_q
->is_partial_line
= 1;
561 /* If we don't have a buffer, just return the number of bytes we would
562 have copied if we had a buffer. */
564 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
565 dma_q
->is_partial_line
=
566 (dma_q
->bytes_left_in_line
== 0) ? 0 : 1;
567 return bytes_to_copy
;
570 /* copy the data to video buffer */
571 cx231xx_do_vbi_copy(dev
, dma_q
, p_line
, bytes_to_copy
);
573 dma_q
->pos
+= bytes_to_copy
;
574 dma_q
->bytes_left_in_line
-= bytes_to_copy
;
576 if (dma_q
->bytes_left_in_line
== 0) {
578 dma_q
->bytes_left_in_line
= _line_size
;
579 dma_q
->lines_completed
++;
580 dma_q
->is_partial_line
= 0;
582 if (cx231xx_is_vbi_buffer_done(dev
, dma_q
) && buf
) {
584 vbi_buffer_filled(dev
, dma_q
, buf
);
587 dma_q
->lines_completed
= 0;
588 cx231xx_reset_vbi_buffer(dev
, dma_q
);
592 return bytes_to_copy
;
596 * video-buf generic routine to get the next available buffer
598 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue
*dma_q
,
599 struct cx231xx_buffer
**buf
)
601 struct cx231xx_video_mode
*vmode
=
602 container_of(dma_q
, struct cx231xx_video_mode
, vidq
);
603 struct cx231xx
*dev
= container_of(vmode
, struct cx231xx
, vbi_mode
);
606 if (list_empty(&dma_q
->active
)) {
607 dev_err(dev
->dev
, "No active queue to serve\n");
608 dev
->vbi_mode
.bulk_ctl
.buf
= NULL
;
613 /* Get the next buffer */
614 *buf
= list_entry(dma_q
->active
.next
, struct cx231xx_buffer
, vb
.queue
);
616 /* Cleans up buffer - Useful for testing for frame/URB loss */
617 outp
= videobuf_to_vmalloc(&(*buf
)->vb
);
618 memset(outp
, 0, (*buf
)->vb
.size
);
620 dev
->vbi_mode
.bulk_ctl
.buf
= *buf
;
625 void cx231xx_reset_vbi_buffer(struct cx231xx
*dev
,
626 struct cx231xx_dmaqueue
*dma_q
)
628 struct cx231xx_buffer
*buf
;
630 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
633 /* first try to get the buffer */
634 get_next_vbi_buf(dma_q
, &buf
);
637 dma_q
->current_field
= -1;
640 dma_q
->bytes_left_in_line
= dev
->width
<< 1;
641 dma_q
->lines_completed
= 0;
644 int cx231xx_do_vbi_copy(struct cx231xx
*dev
, struct cx231xx_dmaqueue
*dma_q
,
645 u8
*p_buffer
, u32 bytes_to_copy
)
647 u8
*p_out_buffer
= NULL
;
648 u32 current_line_bytes_copied
= 0;
649 struct cx231xx_buffer
*buf
;
650 u32 _line_size
= dev
->width
<< 1;
654 buf
= dev
->vbi_mode
.bulk_ctl
.buf
;
659 p_out_buffer
= videobuf_to_vmalloc(&buf
->vb
);
661 if (dma_q
->bytes_left_in_line
!= _line_size
) {
662 current_line_bytes_copied
=
663 _line_size
- dma_q
->bytes_left_in_line
;
666 offset
= (dma_q
->lines_completed
* _line_size
) +
667 current_line_bytes_copied
;
669 if (dma_q
->current_field
== 2) {
670 /* Populate the second half of the frame */
671 offset
+= (dev
->width
* 2 * dma_q
->lines_per_field
);
674 /* prepare destination address */
675 startwrite
= p_out_buffer
+ offset
;
677 lencopy
= dma_q
->bytes_left_in_line
> bytes_to_copy
?
678 bytes_to_copy
: dma_q
->bytes_left_in_line
;
680 memcpy(startwrite
, p_buffer
, lencopy
);
685 u8
cx231xx_is_vbi_buffer_done(struct cx231xx
*dev
,
686 struct cx231xx_dmaqueue
*dma_q
)
690 height
= ((dev
->norm
& V4L2_STD_625_50
) ?
691 PAL_VBI_LINES
: NTSC_VBI_LINES
);
692 if (dma_q
->lines_completed
== height
&& dma_q
->current_field
== 2)