]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/media/video/uvc/uvc_queue.c
[media] uvcvideo: Move fields from uvc_buffer::buf to uvc_buffer
[mirror_ubuntu-artful-kernel.git] / drivers / media / video / uvc / uvc_queue.c
CommitLineData
c0efd232
LP
1/*
2 * uvc_queue.c -- USB Video Class driver - Buffers management
3 *
11fc5baf
LP
4 * Copyright (C) 2005-2010
5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
c0efd232
LP
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13
14#include <linux/kernel.h>
27ac792c 15#include <linux/mm.h>
c0efd232
LP
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/usb.h>
19#include <linux/videodev2.h>
20#include <linux/vmalloc.h>
21#include <linux/wait.h>
60063497 22#include <linux/atomic.h>
c0efd232
LP
23
24#include "uvcvideo.h"
25
26/* ------------------------------------------------------------------------
27 * Video buffers queue management.
28 *
29 * Video queues is initialized by uvc_queue_init(). The function performs
30 * basic initialization of the uvc_video_queue struct and never fails.
31 *
32 * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
33 * uvc_free_buffers respectively. The former acquires the video queue lock,
34 * while the later must be called with the lock held (so that allocation can
35 * free previously allocated buffers). Trying to free buffers that are mapped
36 * to user space will return -EBUSY.
37 *
38 * Video buffers are managed using two queues. However, unlike most USB video
2c2d264b
LP
39 * drivers that use an in queue and an out queue, we use a main queue to hold
40 * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
41 * hold empty buffers. This design (copied from video-buf) minimizes locking
42 * in interrupt, as only one queue is shared between interrupt and user
43 * contexts.
c0efd232
LP
44 *
45 * Use cases
46 * ---------
47 *
2c2d264b 48 * Unless stated otherwise, all operations that modify the irq buffers queue
c0efd232
LP
49 * are protected by the irq spinlock.
50 *
51 * 1. The user queues the buffers, starts streaming and dequeues a buffer.
52 *
53 * The buffers are added to the main and irq queues. Both operations are
2c2d264b 54 * protected by the queue lock, and the later is protected by the irq
c0efd232
LP
55 * spinlock as well.
56 *
57 * The completion handler fetches a buffer from the irq queue and fills it
58 * with video data. If no buffer is available (irq queue empty), the handler
59 * returns immediately.
60 *
61 * When the buffer is full, the completion handler removes it from the irq
d7c0d439 62 * queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue.
c0efd232 63 * At that point, any process waiting on the buffer will be woken up. If a
d7c0d439 64 * process tries to dequeue a buffer after it has been marked done, the
c0efd232
LP
65 * dequeing will succeed immediately.
66 *
67 * 2. Buffers are queued, user is waiting on a buffer and the device gets
68 * disconnected.
69 *
70 * When the device is disconnected, the kernel calls the completion handler
71 * with an appropriate status code. The handler marks all buffers in the
72 * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
73 * that any process waiting on a buffer gets woken up.
74 *
75 * Waking up up the first buffer on the irq list is not enough, as the
76 * process waiting on the buffer might restart the dequeue operation
77 * immediately.
78 *
79 */
80
9bde9f26
LP
81void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
82 int drop_corrupted)
c0efd232
LP
83{
84 mutex_init(&queue->mutex);
85 spin_lock_init(&queue->irqlock);
86 INIT_LIST_HEAD(&queue->mainqueue);
87 INIT_LIST_HEAD(&queue->irqqueue);
9bde9f26 88 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
ff924203 89 queue->type = type;
c0efd232
LP
90}
91
8e815e17
LP
92/*
93 * Free the video buffers.
94 *
95 * This function must be called with the queue lock held.
96 */
97static int __uvc_free_buffers(struct uvc_video_queue *queue)
98{
99 unsigned int i;
100
101 for (i = 0; i < queue->count; ++i) {
102 if (queue->buffer[i].vma_use_count != 0)
103 return -EBUSY;
104 }
105
106 if (queue->count) {
8ca2c80b
SS
107 uvc_queue_cancel(queue, 0);
108 INIT_LIST_HEAD(&queue->mainqueue);
8e815e17
LP
109 vfree(queue->mem);
110 queue->count = 0;
111 }
112
113 return 0;
114}
115
116int uvc_free_buffers(struct uvc_video_queue *queue)
117{
118 int ret;
119
120 mutex_lock(&queue->mutex);
121 ret = __uvc_free_buffers(queue);
122 mutex_unlock(&queue->mutex);
123
124 return ret;
125}
126
c0efd232
LP
127/*
128 * Allocate the video buffers.
129 *
2c2d264b
LP
130 * Pages are reserved to make sure they will not be swapped, as they will be
131 * filled in the URB completion handler.
c0efd232
LP
132 *
133 * Buffers will be individually mapped, so they must all be page aligned.
134 */
135int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
136 unsigned int buflength)
137{
138 unsigned int bufsize = PAGE_ALIGN(buflength);
139 unsigned int i;
140 void *mem = NULL;
141 int ret;
142
143 if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
144 nbuffers = UVC_MAX_VIDEO_BUFFERS;
145
146 mutex_lock(&queue->mutex);
147
8e815e17 148 if ((ret = __uvc_free_buffers(queue)) < 0)
c0efd232
LP
149 goto done;
150
151 /* Bail out if no buffers should be allocated. */
152 if (nbuffers == 0)
153 goto done;
154
155 /* Decrement the number of buffers until allocation succeeds. */
156 for (; nbuffers > 0; --nbuffers) {
157 mem = vmalloc_32(nbuffers * bufsize);
158 if (mem != NULL)
159 break;
160 }
161
162 if (mem == NULL) {
163 ret = -ENOMEM;
164 goto done;
165 }
166
167 for (i = 0; i < nbuffers; ++i) {
168 memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
169 queue->buffer[i].buf.index = i;
170 queue->buffer[i].buf.m.offset = i * bufsize;
171 queue->buffer[i].buf.length = buflength;
ff924203 172 queue->buffer[i].buf.type = queue->type;
c0efd232
LP
173 queue->buffer[i].buf.field = V4L2_FIELD_NONE;
174 queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
175 queue->buffer[i].buf.flags = 0;
3d95e932
LP
176
177 queue->buffer[i].mem = queue->mem + i * bufsize;
178 queue->buffer[i].length = buflength;
c0efd232
LP
179 init_waitqueue_head(&queue->buffer[i].wait);
180 }
181
182 queue->mem = mem;
183 queue->count = nbuffers;
184 queue->buf_size = bufsize;
185 ret = nbuffers;
186
187done:
188 mutex_unlock(&queue->mutex);
189 return ret;
190}
191
23ff6043
LP
192/*
193 * Check if buffers have been allocated.
194 */
195int uvc_queue_allocated(struct uvc_video_queue *queue)
196{
197 int allocated;
198
199 mutex_lock(&queue->mutex);
200 allocated = queue->count != 0;
201 mutex_unlock(&queue->mutex);
202
203 return allocated;
204}
205
c0efd232
LP
206static void __uvc_query_buffer(struct uvc_buffer *buf,
207 struct v4l2_buffer *v4l2_buf)
208{
209 memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
210
211 if (buf->vma_use_count)
212 v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
213
214 switch (buf->state) {
215 case UVC_BUF_STATE_ERROR:
216 case UVC_BUF_STATE_DONE:
217 v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
218 break;
219 case UVC_BUF_STATE_QUEUED:
220 case UVC_BUF_STATE_ACTIVE:
d7c0d439 221 case UVC_BUF_STATE_READY:
c0efd232
LP
222 v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
223 break;
224 case UVC_BUF_STATE_IDLE:
225 default:
226 break;
227 }
228}
229
230int uvc_query_buffer(struct uvc_video_queue *queue,
231 struct v4l2_buffer *v4l2_buf)
232{
233 int ret = 0;
234
235 mutex_lock(&queue->mutex);
236 if (v4l2_buf->index >= queue->count) {
237 ret = -EINVAL;
238 goto done;
239 }
240
241 __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
242
243done:
2c2d264b
LP
244 mutex_unlock(&queue->mutex);
245 return ret;
c0efd232
LP
246}
247
248/*
249 * Queue a video buffer. Attempting to queue a buffer that has already been
250 * queued will return -EINVAL.
251 */
252int uvc_queue_buffer(struct uvc_video_queue *queue,
253 struct v4l2_buffer *v4l2_buf)
254{
255 struct uvc_buffer *buf;
256 unsigned long flags;
257 int ret = 0;
258
259 uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
260
ff924203 261 if (v4l2_buf->type != queue->type ||
c0efd232
LP
262 v4l2_buf->memory != V4L2_MEMORY_MMAP) {
263 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
264 "and/or memory (%u).\n", v4l2_buf->type,
265 v4l2_buf->memory);
266 return -EINVAL;
267 }
268
269 mutex_lock(&queue->mutex);
2c2d264b 270 if (v4l2_buf->index >= queue->count) {
c0efd232
LP
271 uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
272 ret = -EINVAL;
273 goto done;
274 }
275
276 buf = &queue->buffer[v4l2_buf->index];
277 if (buf->state != UVC_BUF_STATE_IDLE) {
278 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
279 "(%u).\n", buf->state);
280 ret = -EINVAL;
281 goto done;
282 }
283
ff924203
LP
284 if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
285 v4l2_buf->bytesused > buf->buf.length) {
286 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
287 ret = -EINVAL;
288 goto done;
289 }
290
c0efd232
LP
291 spin_lock_irqsave(&queue->irqlock, flags);
292 if (queue->flags & UVC_QUEUE_DISCONNECTED) {
293 spin_unlock_irqrestore(&queue->irqlock, flags);
294 ret = -ENODEV;
295 goto done;
296 }
297 buf->state = UVC_BUF_STATE_QUEUED;
ff924203 298 if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
3d95e932 299 buf->bytesused = 0;
ff924203 300 else
3d95e932 301 buf->bytesused = v4l2_buf->bytesused;
ff924203 302
c0efd232
LP
303 list_add_tail(&buf->stream, &queue->mainqueue);
304 list_add_tail(&buf->queue, &queue->irqqueue);
305 spin_unlock_irqrestore(&queue->irqlock, flags);
306
307done:
308 mutex_unlock(&queue->mutex);
309 return ret;
310}
311
312static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
313{
314 if (nonblocking) {
315 return (buf->state != UVC_BUF_STATE_QUEUED &&
d7c0d439
LP
316 buf->state != UVC_BUF_STATE_ACTIVE &&
317 buf->state != UVC_BUF_STATE_READY)
c0efd232
LP
318 ? 0 : -EAGAIN;
319 }
320
321 return wait_event_interruptible(buf->wait,
322 buf->state != UVC_BUF_STATE_QUEUED &&
d7c0d439
LP
323 buf->state != UVC_BUF_STATE_ACTIVE &&
324 buf->state != UVC_BUF_STATE_READY);
c0efd232
LP
325}
326
327/*
328 * Dequeue a video buffer. If nonblocking is false, block until a buffer is
329 * available.
330 */
331int uvc_dequeue_buffer(struct uvc_video_queue *queue,
332 struct v4l2_buffer *v4l2_buf, int nonblocking)
333{
334 struct uvc_buffer *buf;
335 int ret = 0;
336
ff924203 337 if (v4l2_buf->type != queue->type ||
c0efd232
LP
338 v4l2_buf->memory != V4L2_MEMORY_MMAP) {
339 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
340 "and/or memory (%u).\n", v4l2_buf->type,
341 v4l2_buf->memory);
342 return -EINVAL;
343 }
344
345 mutex_lock(&queue->mutex);
346 if (list_empty(&queue->mainqueue)) {
347 uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
348 ret = -EINVAL;
349 goto done;
350 }
351
352 buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
353 if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
354 goto done;
355
356 uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
357 buf->buf.index, buf->state, buf->buf.bytesused);
358
359 switch (buf->state) {
360 case UVC_BUF_STATE_ERROR:
361 uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
362 "(transmission error).\n");
363 ret = -EIO;
364 case UVC_BUF_STATE_DONE:
365 buf->state = UVC_BUF_STATE_IDLE;
366 break;
367
368 case UVC_BUF_STATE_IDLE:
369 case UVC_BUF_STATE_QUEUED:
370 case UVC_BUF_STATE_ACTIVE:
d7c0d439 371 case UVC_BUF_STATE_READY:
c0efd232
LP
372 default:
373 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
374 "(driver bug?).\n", buf->state);
375 ret = -EINVAL;
376 goto done;
377 }
378
379 list_del(&buf->stream);
380 __uvc_query_buffer(buf, v4l2_buf);
381
382done:
383 mutex_unlock(&queue->mutex);
384 return ret;
385}
386
4aa27597
LP
387/*
388 * VMA operations.
389 */
390static void uvc_vm_open(struct vm_area_struct *vma)
391{
392 struct uvc_buffer *buffer = vma->vm_private_data;
393 buffer->vma_use_count++;
394}
395
396static void uvc_vm_close(struct vm_area_struct *vma)
397{
398 struct uvc_buffer *buffer = vma->vm_private_data;
399 buffer->vma_use_count--;
400}
401
402static const struct vm_operations_struct uvc_vm_ops = {
403 .open = uvc_vm_open,
404 .close = uvc_vm_close,
405};
406
407/*
408 * Memory-map a video buffer.
409 *
410 * This function implements video buffers memory mapping and is intended to be
411 * used by the device mmap handler.
412 */
413int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
414{
415 struct uvc_buffer *uninitialized_var(buffer);
416 struct page *page;
417 unsigned long addr, start, size;
418 unsigned int i;
419 int ret = 0;
420
421 start = vma->vm_start;
422 size = vma->vm_end - vma->vm_start;
423
424 mutex_lock(&queue->mutex);
425
426 for (i = 0; i < queue->count; ++i) {
427 buffer = &queue->buffer[i];
428 if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
429 break;
430 }
431
72969447 432 if (i == queue->count || PAGE_ALIGN(size) != queue->buf_size) {
4aa27597
LP
433 ret = -EINVAL;
434 goto done;
435 }
436
437 /*
438 * VM_IO marks the area as being an mmaped region for I/O to a
439 * device. It also prevents the region from being core dumped.
440 */
441 vma->vm_flags |= VM_IO;
442
3d95e932 443 addr = (unsigned long)buffer->mem;
72969447 444#ifdef CONFIG_MMU
4aa27597
LP
445 while (size > 0) {
446 page = vmalloc_to_page((void *)addr);
447 if ((ret = vm_insert_page(vma, start, page)) < 0)
448 goto done;
449
450 start += PAGE_SIZE;
451 addr += PAGE_SIZE;
452 size -= PAGE_SIZE;
453 }
72969447 454#endif
4aa27597
LP
455
456 vma->vm_ops = &uvc_vm_ops;
457 vma->vm_private_data = buffer;
458 uvc_vm_open(vma);
459
460done:
461 mutex_unlock(&queue->mutex);
462 return ret;
463}
464
c0efd232
LP
465/*
466 * Poll the video queue.
467 *
468 * This function implements video queue polling and is intended to be used by
469 * the device poll handler.
470 */
471unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
472 poll_table *wait)
473{
474 struct uvc_buffer *buf;
475 unsigned int mask = 0;
476
477 mutex_lock(&queue->mutex);
478 if (list_empty(&queue->mainqueue)) {
479 mask |= POLLERR;
480 goto done;
481 }
482 buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
483
484 poll_wait(file, &buf->wait, wait);
485 if (buf->state == UVC_BUF_STATE_DONE ||
ed3de601
LP
486 buf->state == UVC_BUF_STATE_ERROR) {
487 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
488 mask |= POLLIN | POLLRDNORM;
489 else
490 mask |= POLLOUT | POLLWRNORM;
491 }
c0efd232
LP
492
493done:
494 mutex_unlock(&queue->mutex);
495 return mask;
496}
497
72969447
BL
498#ifndef CONFIG_MMU
499/*
500 * Get unmapped area.
501 *
502 * NO-MMU arch need this function to make mmap() work correctly.
503 */
504unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
505 unsigned long pgoff)
506{
507 struct uvc_buffer *buffer;
508 unsigned int i;
509 unsigned long ret;
510
511 mutex_lock(&queue->mutex);
512 for (i = 0; i < queue->count; ++i) {
513 buffer = &queue->buffer[i];
514 if ((buffer->buf.m.offset >> PAGE_SHIFT) == pgoff)
515 break;
516 }
517 if (i == queue->count) {
518 ret = -EINVAL;
519 goto done;
520 }
3d95e932 521 ret = (unsigned long)buf->mem;
72969447
BL
522done:
523 mutex_unlock(&queue->mutex);
524 return ret;
525}
526#endif
527
c0efd232
LP
528/*
529 * Enable or disable the video buffers queue.
530 *
531 * The queue must be enabled before starting video acquisition and must be
532 * disabled after stopping it. This ensures that the video buffers queue
533 * state can be properly initialized before buffers are accessed from the
534 * interrupt handler.
535 *
650b95fe 536 * Enabling the video queue returns -EBUSY if the queue is already enabled.
c0efd232
LP
537 *
538 * Disabling the video queue cancels the queue and removes all buffers from
539 * the main queue.
540 *
541 * This function can't be called from interrupt context. Use
542 * uvc_queue_cancel() instead.
543 */
544int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
545{
546 unsigned int i;
547 int ret = 0;
548
549 mutex_lock(&queue->mutex);
550 if (enable) {
551 if (uvc_queue_streaming(queue)) {
552 ret = -EBUSY;
553 goto done;
554 }
c0efd232 555 queue->flags |= UVC_QUEUE_STREAMING;
ff924203 556 queue->buf_used = 0;
c0efd232
LP
557 } else {
558 uvc_queue_cancel(queue, 0);
559 INIT_LIST_HEAD(&queue->mainqueue);
560
9bde9f26
LP
561 for (i = 0; i < queue->count; ++i) {
562 queue->buffer[i].error = 0;
c0efd232 563 queue->buffer[i].state = UVC_BUF_STATE_IDLE;
9bde9f26 564 }
c0efd232
LP
565
566 queue->flags &= ~UVC_QUEUE_STREAMING;
567 }
568
569done:
570 mutex_unlock(&queue->mutex);
571 return ret;
572}
573
574/*
575 * Cancel the video buffers queue.
576 *
577 * Cancelling the queue marks all buffers on the irq queue as erroneous,
2c2d264b 578 * wakes them up and removes them from the queue.
c0efd232
LP
579 *
580 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
581 * fail with -ENODEV.
582 *
583 * This function acquires the irq spinlock and can be called from interrupt
584 * context.
585 */
586void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
587{
588 struct uvc_buffer *buf;
589 unsigned long flags;
590
591 spin_lock_irqsave(&queue->irqlock, flags);
592 while (!list_empty(&queue->irqqueue)) {
593 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
594 queue);
595 list_del(&buf->queue);
596 buf->state = UVC_BUF_STATE_ERROR;
597 wake_up(&buf->wait);
598 }
599 /* This must be protected by the irqlock spinlock to avoid race
600 * conditions between uvc_queue_buffer and the disconnection event that
601 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
602 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
603 * state outside the queue code.
604 */
605 if (disconnect)
606 queue->flags |= UVC_QUEUE_DISCONNECTED;
607 spin_unlock_irqrestore(&queue->irqlock, flags);
608}
609
610struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
611 struct uvc_buffer *buf)
612{
613 struct uvc_buffer *nextbuf;
614 unsigned long flags;
615
9bde9f26
LP
616 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
617 buf->error = 0;
c0efd232
LP
618 buf->state = UVC_BUF_STATE_QUEUED;
619 buf->buf.bytesused = 0;
620 return buf;
621 }
622
623 spin_lock_irqsave(&queue->irqlock, flags);
624 list_del(&buf->queue);
9bde9f26 625 buf->error = 0;
d7c0d439 626 buf->state = UVC_BUF_STATE_DONE;
3d95e932 627 buf->buf.bytesused = buf->bytesused;
c0efd232
LP
628 if (!list_empty(&queue->irqqueue))
629 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
630 queue);
631 else
632 nextbuf = NULL;
633 spin_unlock_irqrestore(&queue->irqlock, flags);
634
c0efd232
LP
635 wake_up(&buf->wait);
636 return nextbuf;
637}
f87086e3 638