]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/media/usb/uvc/uvc_queue.c
HID: usbhid: Add HID_QUIRK_NOGET for Aten CS-1758 KVM switch
[mirror_ubuntu-artful-kernel.git] / drivers / media / usb / uvc / uvc_queue.c
CommitLineData
c0efd232
LP
1/*
2 * uvc_queue.c -- USB Video Class driver - Buffers management
3 *
11fc5baf
LP
4 * Copyright (C) 2005-2010
5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
c0efd232
LP
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13
6998b6fb 14#include <linux/atomic.h>
c0efd232 15#include <linux/kernel.h>
27ac792c 16#include <linux/mm.h>
c0efd232
LP
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/usb.h>
20#include <linux/videodev2.h>
21#include <linux/vmalloc.h>
22#include <linux/wait.h>
2d700715 23#include <media/videobuf2-v4l2.h>
6998b6fb 24#include <media/videobuf2-vmalloc.h>
c0efd232
LP
25
26#include "uvcvideo.h"
27
28/* ------------------------------------------------------------------------
29 * Video buffers queue management.
30 *
31 * Video queues is initialized by uvc_queue_init(). The function performs
32 * basic initialization of the uvc_video_queue struct and never fails.
33 *
6998b6fb
LP
34 * Video buffers are managed by videobuf2. The driver uses a mutex to protect
35 * the videobuf2 queue operations by serializing calls to videobuf2 and a
36 * spinlock to protect the IRQ queue that holds the buffers to be processed by
37 * the driver.
c0efd232
LP
38 */
39
bc75d5a0
LP
40static inline struct uvc_streaming *
41uvc_queue_to_stream(struct uvc_video_queue *queue)
42{
43 return container_of(queue, struct uvc_streaming, queue);
44}
45
ef33d901
LP
46/*
47 * Return all queued buffers to videobuf2 in the requested state.
48 *
49 * This function must be called with the queue spinlock held.
50 */
51static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
52 enum uvc_buffer_state state)
53{
54 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
55 ? VB2_BUF_STATE_ERROR
56 : VB2_BUF_STATE_QUEUED;
57
58 while (!list_empty(&queue->irqqueue)) {
59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
60 struct uvc_buffer,
61 queue);
62 list_del(&buf->queue);
63 buf->state = state;
2d700715 64 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
ef33d901
LP
65 }
66}
67
6998b6fb
LP
68/* -----------------------------------------------------------------------------
69 * videobuf2 queue operations
8e815e17 70 */
6998b6fb 71
df9ecb0c 72static int uvc_queue_setup(struct vb2_queue *vq,
6998b6fb 73 unsigned int *nbuffers, unsigned int *nplanes,
36c0f8b3 74 unsigned int sizes[], struct device *alloc_devs[])
8e815e17 75{
6998b6fb 76 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
bc75d5a0 77 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
df9ecb0c 78 unsigned size = stream->ctrl.dwMaxVideoFrameSize;
8e815e17 79
bddb9d0e 80 /* Make sure the image size is large enough. */
df9ecb0c
HV
81 if (*nplanes)
82 return sizes[0] < size ? -EINVAL : 0;
6998b6fb 83 *nplanes = 1;
df9ecb0c 84 sizes[0] = size;
8e815e17
LP
85 return 0;
86}
87
6998b6fb 88static int uvc_buffer_prepare(struct vb2_buffer *vb)
8e815e17 89{
2d700715 90 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
6998b6fb 91 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
2d700715 92 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
8e815e17 93
2d700715 94 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
6998b6fb
LP
95 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
96 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
97 return -EINVAL;
98 }
c0efd232 99
6998b6fb
LP
100 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
101 return -ENODEV;
c0efd232 102
6998b6fb
LP
103 buf->state = UVC_BUF_STATE_QUEUED;
104 buf->error = 0;
105 buf->mem = vb2_plane_vaddr(vb, 0);
106 buf->length = vb2_plane_size(vb, 0);
2d700715 107 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
6998b6fb
LP
108 buf->bytesused = 0;
109 else
110 buf->bytesused = vb2_get_plane_payload(vb, 0);
c0efd232 111
6998b6fb
LP
112 return 0;
113}
c0efd232 114
6998b6fb
LP
115static void uvc_buffer_queue(struct vb2_buffer *vb)
116{
2d700715 117 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
6998b6fb 118 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
2d700715 119 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
6998b6fb 120 unsigned long flags;
c0efd232 121
6998b6fb
LP
122 spin_lock_irqsave(&queue->irqlock, flags);
123 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
124 list_add_tail(&buf->queue, &queue->irqqueue);
125 } else {
126 /* If the device is disconnected return the buffer to userspace
127 * directly. The next QBUF call will fail with -ENODEV.
128 */
129 buf->state = UVC_BUF_STATE_ERROR;
2d700715 130 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
c0efd232
LP
131 }
132
6998b6fb
LP
133 spin_unlock_irqrestore(&queue->irqlock, flags);
134}
c0efd232 135
06470642 136static void uvc_buffer_finish(struct vb2_buffer *vb)
66847ef0 137{
2d700715 138 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
66847ef0 139 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
bc75d5a0 140 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
2d700715 141 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
66847ef0 142
9c0863b1 143 if (vb->state == VB2_BUF_STATE_DONE)
2d700715 144 uvc_video_clock_update(stream, vbuf, buf);
66847ef0
LP
145}
146
a11a03e5
LP
147static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
148{
149 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
150 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
ef33d901
LP
151 unsigned long flags;
152 int ret;
a11a03e5
LP
153
154 queue->buf_used = 0;
155
ef33d901
LP
156 ret = uvc_video_enable(stream, 1);
157 if (ret == 0)
158 return 0;
159
160 spin_lock_irqsave(&queue->irqlock, flags);
161 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
162 spin_unlock_irqrestore(&queue->irqlock, flags);
163
164 return ret;
a11a03e5
LP
165}
166
167static void uvc_stop_streaming(struct vb2_queue *vq)
168{
169 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
170 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
171 unsigned long flags;
172
173 uvc_video_enable(stream, 0);
174
175 spin_lock_irqsave(&queue->irqlock, flags);
ef33d901 176 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
a11a03e5
LP
177 spin_unlock_irqrestore(&queue->irqlock, flags);
178}
179
1bc17717 180static const struct vb2_ops uvc_queue_qops = {
6998b6fb
LP
181 .queue_setup = uvc_queue_setup,
182 .buf_prepare = uvc_buffer_prepare,
183 .buf_queue = uvc_buffer_queue,
66847ef0 184 .buf_finish = uvc_buffer_finish,
5fb3f555
LP
185 .wait_prepare = vb2_ops_wait_prepare,
186 .wait_finish = vb2_ops_wait_finish,
a11a03e5
LP
187 .start_streaming = uvc_start_streaming,
188 .stop_streaming = uvc_stop_streaming,
6998b6fb 189};
c0efd232 190
5712661d 191int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
6998b6fb
LP
192 int drop_corrupted)
193{
5712661d
EG
194 int ret;
195
6998b6fb 196 queue->queue.type = type;
ccc4c539 197 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
6998b6fb
LP
198 queue->queue.drv_priv = queue;
199 queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
200 queue->queue.ops = &uvc_queue_qops;
201 queue->queue.mem_ops = &vb2_vmalloc_memops;
c767492a
SA
202 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
203 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
5fb3f555 204 queue->queue.lock = &queue->mutex;
5712661d
EG
205 ret = vb2_queue_init(&queue->queue);
206 if (ret)
207 return ret;
c0efd232 208
6998b6fb
LP
209 mutex_init(&queue->mutex);
210 spin_lock_init(&queue->irqlock);
211 INIT_LIST_HEAD(&queue->irqqueue);
212 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
5712661d
EG
213
214 return 0;
c0efd232
LP
215}
216
3f02de27
LP
217void uvc_queue_release(struct uvc_video_queue *queue)
218{
219 mutex_lock(&queue->mutex);
220 vb2_queue_release(&queue->queue);
221 mutex_unlock(&queue->mutex);
222}
223
6998b6fb
LP
224/* -----------------------------------------------------------------------------
225 * V4L2 queue operations
23ff6043 226 */
6998b6fb 227
1b7f9c98
LP
228int uvc_request_buffers(struct uvc_video_queue *queue,
229 struct v4l2_requestbuffers *rb)
23ff6043 230{
6998b6fb 231 int ret;
23ff6043
LP
232
233 mutex_lock(&queue->mutex);
6998b6fb 234 ret = vb2_reqbufs(&queue->queue, rb);
23ff6043
LP
235 mutex_unlock(&queue->mutex);
236
6998b6fb 237 return ret ? ret : rb->count;
23ff6043
LP
238}
239
6998b6fb 240int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
c0efd232 241{
6998b6fb 242 int ret;
c0efd232
LP
243
244 mutex_lock(&queue->mutex);
6998b6fb 245 ret = vb2_querybuf(&queue->queue, buf);
2c2d264b 246 mutex_unlock(&queue->mutex);
6998b6fb 247
2c2d264b 248 return ret;
c0efd232
LP
249}
250
6e9179e2
PZ
251int uvc_create_buffers(struct uvc_video_queue *queue,
252 struct v4l2_create_buffers *cb)
253{
254 int ret;
255
256 mutex_lock(&queue->mutex);
257 ret = vb2_create_bufs(&queue->queue, cb);
258 mutex_unlock(&queue->mutex);
259
260 return ret;
261}
262
6998b6fb 263int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
c0efd232 264{
6998b6fb 265 int ret;
c0efd232
LP
266
267 mutex_lock(&queue->mutex);
6998b6fb 268 ret = vb2_qbuf(&queue->queue, buf);
c0efd232 269 mutex_unlock(&queue->mutex);
c0efd232 270
6998b6fb 271 return ret;
c0efd232
LP
272}
273
7195f61b
LP
274int uvc_export_buffer(struct uvc_video_queue *queue,
275 struct v4l2_exportbuffer *exp)
276{
277 int ret;
278
279 mutex_lock(&queue->mutex);
280 ret = vb2_expbuf(&queue->queue, exp);
281 mutex_unlock(&queue->mutex);
282
283 return ret;
284}
285
6998b6fb
LP
286int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
287 int nonblocking)
c0efd232 288{
6998b6fb 289 int ret;
c0efd232
LP
290
291 mutex_lock(&queue->mutex);
6998b6fb 292 ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
c0efd232 293 mutex_unlock(&queue->mutex);
c0efd232 294
6998b6fb 295 return ret;
4aa27597
LP
296}
297
0da4ab98
LP
298int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
299{
300 int ret;
301
302 mutex_lock(&queue->mutex);
303 ret = vb2_streamon(&queue->queue, type);
304 mutex_unlock(&queue->mutex);
305
306 return ret;
307}
308
309int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
310{
311 int ret;
312
313 mutex_lock(&queue->mutex);
314 ret = vb2_streamoff(&queue->queue, type);
315 mutex_unlock(&queue->mutex);
316
317 return ret;
318}
319
4aa27597
LP
320int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
321{
9e68c539 322 return vb2_mmap(&queue->queue, vma);
6998b6fb 323}
4aa27597 324
4742c82e
LP
325#ifndef CONFIG_MMU
326unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
327 unsigned long pgoff)
328{
9e68c539 329 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
4742c82e
LP
330}
331#endif
332
6998b6fb
LP
333unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
334 poll_table *wait)
335{
336 unsigned int ret;
4aa27597 337
6998b6fb
LP
338 mutex_lock(&queue->mutex);
339 ret = vb2_poll(&queue->queue, file, wait);
4aa27597 340 mutex_unlock(&queue->mutex);
6998b6fb 341
4aa27597
LP
342 return ret;
343}
344
6998b6fb 345/* -----------------------------------------------------------------------------
c0efd232 346 *
c0efd232 347 */
6998b6fb
LP
348
349/*
350 * Check if buffers have been allocated.
351 */
352int uvc_queue_allocated(struct uvc_video_queue *queue)
c0efd232 353{
6998b6fb 354 int allocated;
c0efd232
LP
355
356 mutex_lock(&queue->mutex);
6998b6fb 357 allocated = vb2_is_busy(&queue->queue);
c0efd232 358 mutex_unlock(&queue->mutex);
6998b6fb
LP
359
360 return allocated;
c0efd232
LP
361}
362
c0efd232
LP
363/*
364 * Cancel the video buffers queue.
365 *
366 * Cancelling the queue marks all buffers on the irq queue as erroneous,
2c2d264b 367 * wakes them up and removes them from the queue.
c0efd232
LP
368 *
369 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
370 * fail with -ENODEV.
371 *
372 * This function acquires the irq spinlock and can be called from interrupt
373 * context.
374 */
375void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
376{
c0efd232
LP
377 unsigned long flags;
378
379 spin_lock_irqsave(&queue->irqlock, flags);
ef33d901 380 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
c0efd232 381 /* This must be protected by the irqlock spinlock to avoid race
6998b6fb 382 * conditions between uvc_buffer_queue and the disconnection event that
c0efd232 383 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
6998b6fb 384 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
c0efd232
LP
385 * state outside the queue code.
386 */
387 if (disconnect)
388 queue->flags |= UVC_QUEUE_DISCONNECTED;
389 spin_unlock_irqrestore(&queue->irqlock, flags);
390}
391
392struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
393 struct uvc_buffer *buf)
394{
395 struct uvc_buffer *nextbuf;
396 unsigned long flags;
397
9bde9f26
LP
398 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
399 buf->error = 0;
c0efd232 400 buf->state = UVC_BUF_STATE_QUEUED;
8a3f0ede 401 buf->bytesused = 0;
2d700715 402 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
c0efd232
LP
403 return buf;
404 }
405
406 spin_lock_irqsave(&queue->irqlock, flags);
407 list_del(&buf->queue);
408 if (!list_empty(&queue->irqqueue))
409 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
410 queue);
411 else
412 nextbuf = NULL;
413 spin_unlock_irqrestore(&queue->irqlock, flags);
414
6998b6fb 415 buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
2d700715
JS
416 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
417 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
6998b6fb 418
c0efd232
LP
419 return nextbuf;
420}