2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/media-device.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/v4l2-mem2mem.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-event.h>
28 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
29 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
30 MODULE_LICENSE("GPL");
33 module_param(debug
, bool, 0644);
35 #define dprintk(fmt, arg...) \
38 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
42 /* Instance is already queued on the job_queue */
43 #define TRANS_QUEUED (1 << 0)
44 /* Instance is currently running in hardware */
45 #define TRANS_RUNNING (1 << 1)
46 /* Instance is currently aborting */
47 #define TRANS_ABORT (1 << 2)
50 /* Offset base for buffers on the destination queue - used to distinguish
51 * between source and destination buffers when mmapping - they receive the same
52 * offsets but for different queues */
53 #define DST_QUEUE_OFF_BASE (1 << 30)
55 enum v4l2_m2m_entity_type
{
56 MEM2MEM_ENT_TYPE_SOURCE
,
57 MEM2MEM_ENT_TYPE_SINK
,
61 static const char * const m2m_entity_name
[] = {
68 * struct v4l2_m2m_dev - per-device context
69 * @source: &struct media_entity pointer with the source entity
70 * Used only when the M2M device is registered via
71 * v4l2_m2m_unregister_media_controller().
72 * @source_pad: &struct media_pad with the source pad.
73 * Used only when the M2M device is registered via
74 * v4l2_m2m_unregister_media_controller().
75 * @sink: &struct media_entity pointer with the sink entity
76 * Used only when the M2M device is registered via
77 * v4l2_m2m_unregister_media_controller().
78 * @sink_pad: &struct media_pad with the sink pad.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @proc: &struct media_entity pointer with the M2M device itself.
82 * @proc_pads: &struct media_pad with the @proc pads.
83 * Used only when the M2M device is registered via
84 * v4l2_m2m_unregister_media_controller().
85 * @intf_devnode: &struct media_intf devnode pointer with the interface
86 * with controls the M2M device.
87 * @curr_ctx: currently running instance
88 * @job_queue: instances queued to run
89 * @job_spinlock: protects job_queue
90 * @job_work: worker to run queued jobs.
91 * @m2m_ops: driver callbacks
94 struct v4l2_m2m_ctx
*curr_ctx
;
95 #ifdef CONFIG_MEDIA_CONTROLLER
96 struct media_entity
*source
;
97 struct media_pad source_pad
;
98 struct media_entity sink
;
99 struct media_pad sink_pad
;
100 struct media_entity proc
;
101 struct media_pad proc_pads
[2];
102 struct media_intf_devnode
*intf_devnode
;
105 struct list_head job_queue
;
106 spinlock_t job_spinlock
;
107 struct work_struct job_work
;
109 const struct v4l2_m2m_ops
*m2m_ops
;
112 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
113 enum v4l2_buf_type type
)
115 if (V4L2_TYPE_IS_OUTPUT(type
))
116 return &m2m_ctx
->out_q_ctx
;
118 return &m2m_ctx
->cap_q_ctx
;
121 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
122 enum v4l2_buf_type type
)
124 struct v4l2_m2m_queue_ctx
*q_ctx
;
126 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
132 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
134 struct vb2_v4l2_buffer
*v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
136 struct v4l2_m2m_buffer
*b
;
139 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
141 if (list_empty(&q_ctx
->rdy_queue
)) {
142 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
146 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
147 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
150 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
152 struct vb2_v4l2_buffer
*v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
154 struct v4l2_m2m_buffer
*b
;
157 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
159 if (list_empty(&q_ctx
->rdy_queue
)) {
160 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
164 b
= list_last_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
165 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
168 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf
);
170 struct vb2_v4l2_buffer
*v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
172 struct v4l2_m2m_buffer
*b
;
175 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
176 if (list_empty(&q_ctx
->rdy_queue
)) {
177 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
180 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
183 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
187 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
189 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx
*q_ctx
,
190 struct vb2_v4l2_buffer
*vbuf
)
192 struct v4l2_m2m_buffer
*b
;
195 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
196 b
= container_of(vbuf
, struct v4l2_m2m_buffer
, vb
);
199 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
201 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf
);
203 struct vb2_v4l2_buffer
*
204 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx
*q_ctx
, unsigned int idx
)
207 struct v4l2_m2m_buffer
*b
, *tmp
;
208 struct vb2_v4l2_buffer
*ret
= NULL
;
211 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
212 list_for_each_entry_safe(b
, tmp
, &q_ctx
->rdy_queue
, list
) {
213 if (b
->vb
.vb2_buf
.index
== idx
) {
220 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
224 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx
);
227 * Scheduling handlers
230 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
235 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
236 if (m2m_dev
->curr_ctx
)
237 ret
= m2m_dev
->curr_ctx
->priv
;
238 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
242 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
245 * v4l2_m2m_try_run() - select next job to perform and run it if possible
246 * @m2m_dev: per-device context
248 * Get next transaction (if present) from the waiting jobs list and run it.
250 * Note that this function can run on a given v4l2_m2m_ctx context,
251 * but call .device_run for another context.
253 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
257 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
258 if (NULL
!= m2m_dev
->curr_ctx
) {
259 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
260 dprintk("Another instance is running, won't run now\n");
264 if (list_empty(&m2m_dev
->job_queue
)) {
265 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
266 dprintk("No job pending\n");
270 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
271 struct v4l2_m2m_ctx
, queue
);
272 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
273 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
275 dprintk("Running job on m2m_ctx: %p\n", m2m_dev
->curr_ctx
);
276 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
280 * __v4l2_m2m_try_queue() - queue a job
281 * @m2m_dev: m2m device
282 * @m2m_ctx: m2m context
284 * Check if this context is ready to queue a job.
286 * This function can run in interrupt context.
288 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev
*m2m_dev
,
289 struct v4l2_m2m_ctx
*m2m_ctx
)
291 unsigned long flags_job
, flags_out
, flags_cap
;
293 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
295 if (!m2m_ctx
->out_q_ctx
.q
.streaming
296 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
297 dprintk("Streaming needs to be on for both queues\n");
301 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
303 /* If the context is aborted then don't schedule it */
304 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
305 dprintk("Aborted context\n");
309 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
310 dprintk("On job queue already\n");
314 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
315 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)
316 && !m2m_ctx
->out_q_ctx
.buffered
) {
317 dprintk("No input buffers available\n");
320 spin_lock_irqsave(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
321 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)
322 && !m2m_ctx
->cap_q_ctx
.buffered
) {
323 dprintk("No output buffers available\n");
326 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
327 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
329 if (m2m_dev
->m2m_ops
->job_ready
330 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
331 dprintk("Driver not ready\n");
335 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
336 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
338 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
342 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
344 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
346 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
350 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
351 * @m2m_ctx: m2m context
353 * Check if this context is ready to queue a job. If suitable,
354 * run the next queued job on the mem2mem device.
356 * This function shouldn't run in interrupt context.
358 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
359 * and then run another job for another context.
361 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
363 struct v4l2_m2m_dev
*m2m_dev
= m2m_ctx
->m2m_dev
;
365 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
366 v4l2_m2m_try_run(m2m_dev
);
368 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
371 * v4l2_m2m_device_run_work() - run pending jobs for the context
372 * @work: Work structure used for scheduling the execution of this function.
374 static void v4l2_m2m_device_run_work(struct work_struct
*work
)
376 struct v4l2_m2m_dev
*m2m_dev
=
377 container_of(work
, struct v4l2_m2m_dev
, job_work
);
379 v4l2_m2m_try_run(m2m_dev
);
383 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
384 * @m2m_ctx: m2m context with jobs to be canceled
386 * In case of streamoff or release called on any context,
387 * 1] If the context is currently running, then abort job will be called
388 * 2] If the context is queued, then the context will be removed from
391 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
393 struct v4l2_m2m_dev
*m2m_dev
;
396 m2m_dev
= m2m_ctx
->m2m_dev
;
397 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
399 m2m_ctx
->job_flags
|= TRANS_ABORT
;
400 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
401 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
402 if (m2m_dev
->m2m_ops
->job_abort
)
403 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
404 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx
);
405 wait_event(m2m_ctx
->finished
,
406 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
407 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
408 list_del(&m2m_ctx
->queue
);
409 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
410 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
411 dprintk("m2m_ctx: %p had been on queue and was removed\n",
414 /* Do nothing, was not on queue/running */
415 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
419 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
420 struct v4l2_m2m_ctx
*m2m_ctx
)
424 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
425 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
426 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
427 dprintk("Called by an instance not currently running\n");
431 list_del(&m2m_dev
->curr_ctx
->queue
);
432 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
433 wake_up(&m2m_dev
->curr_ctx
->finished
);
434 m2m_dev
->curr_ctx
= NULL
;
436 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
438 /* This instance might have more buffers ready, but since we do not
439 * allow more than one job on the job_queue per instance, each has
440 * to be scheduled separately after the previous one finishes. */
441 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
443 /* We might be running in atomic context,
444 * but the job must be run in non-atomic context.
446 schedule_work(&m2m_dev
->job_work
);
448 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
450 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
451 struct v4l2_requestbuffers
*reqbufs
)
453 struct vb2_queue
*vq
;
456 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
457 ret
= vb2_reqbufs(vq
, reqbufs
);
458 /* If count == 0, then the owner has released all buffers and he
459 is no longer owner of the queue. Otherwise we have an owner. */
461 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
465 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
467 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
468 struct v4l2_buffer
*buf
)
470 struct vb2_queue
*vq
;
474 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
475 ret
= vb2_querybuf(vq
, buf
);
477 /* Adjust MMAP memory offsets for the CAPTURE queue */
478 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
479 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
480 for (i
= 0; i
< buf
->length
; ++i
)
481 buf
->m
.planes
[i
].m
.mem_offset
482 += DST_QUEUE_OFF_BASE
;
484 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
490 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
492 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
493 struct v4l2_buffer
*buf
)
495 struct video_device
*vdev
= video_devdata(file
);
496 struct vb2_queue
*vq
;
499 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
500 if (!V4L2_TYPE_IS_OUTPUT(vq
->type
) &&
501 (buf
->flags
& V4L2_BUF_FLAG_REQUEST_FD
)) {
502 dprintk("%s: requests cannot be used with capture buffers\n",
506 ret
= vb2_qbuf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
507 if (!ret
&& !(buf
->flags
& V4L2_BUF_FLAG_IN_REQUEST
))
508 v4l2_m2m_try_schedule(m2m_ctx
);
512 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
514 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
515 struct v4l2_buffer
*buf
)
517 struct vb2_queue
*vq
;
519 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
520 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
522 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
524 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
525 struct v4l2_buffer
*buf
)
527 struct video_device
*vdev
= video_devdata(file
);
528 struct vb2_queue
*vq
;
530 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
531 return vb2_prepare_buf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
533 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
535 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
536 struct v4l2_create_buffers
*create
)
538 struct vb2_queue
*vq
;
540 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
541 return vb2_create_bufs(vq
, create
);
543 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
545 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
546 struct v4l2_exportbuffer
*eb
)
548 struct vb2_queue
*vq
;
550 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
551 return vb2_expbuf(vq
, eb
);
553 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
555 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
556 enum v4l2_buf_type type
)
558 struct vb2_queue
*vq
;
561 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
562 ret
= vb2_streamon(vq
, type
);
564 v4l2_m2m_try_schedule(m2m_ctx
);
568 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
570 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
571 enum v4l2_buf_type type
)
573 struct v4l2_m2m_dev
*m2m_dev
;
574 struct v4l2_m2m_queue_ctx
*q_ctx
;
575 unsigned long flags_job
, flags
;
578 /* wait until the current context is dequeued from job_queue */
579 v4l2_m2m_cancel_job(m2m_ctx
);
581 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
582 ret
= vb2_streamoff(&q_ctx
->q
, type
);
586 m2m_dev
= m2m_ctx
->m2m_dev
;
587 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
588 /* We should not be scheduled anymore, since we're dropping a queue. */
589 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
590 list_del(&m2m_ctx
->queue
);
591 m2m_ctx
->job_flags
= 0;
593 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
594 /* Drop queue, since streamoff returns device to the same state as after
595 * calling reqbufs. */
596 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
598 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
600 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
601 m2m_dev
->curr_ctx
= NULL
;
602 wake_up(&m2m_ctx
->finished
);
604 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
608 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
610 __poll_t
v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
611 struct poll_table_struct
*wait
)
613 struct video_device
*vfd
= video_devdata(file
);
614 __poll_t req_events
= poll_requested_events(wait
);
615 struct vb2_queue
*src_q
, *dst_q
;
616 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
620 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
621 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
623 poll_wait(file
, &src_q
->done_wq
, wait
);
624 poll_wait(file
, &dst_q
->done_wq
, wait
);
626 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
627 struct v4l2_fh
*fh
= file
->private_data
;
629 poll_wait(file
, &fh
->wait
, wait
);
630 if (v4l2_event_pending(fh
))
632 if (!(req_events
& (EPOLLOUT
| EPOLLWRNORM
| EPOLLIN
| EPOLLRDNORM
)))
637 * There has to be at least one buffer queued on each queued_list, which
638 * means either in driver already or waiting for driver to claim it
639 * and start processing.
641 if ((!src_q
->streaming
|| src_q
->error
||
642 list_empty(&src_q
->queued_list
)) &&
643 (!dst_q
->streaming
|| dst_q
->error
||
644 list_empty(&dst_q
->queued_list
))) {
649 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
650 if (list_empty(&dst_q
->done_list
)) {
652 * If the last buffer was dequeued from the capture queue,
653 * return immediately. DQBUF will return -EPIPE.
655 if (dst_q
->last_buffer_dequeued
) {
656 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
657 return rc
| EPOLLIN
| EPOLLRDNORM
;
660 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
662 spin_lock_irqsave(&src_q
->done_lock
, flags
);
663 if (!list_empty(&src_q
->done_list
))
664 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
666 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
667 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
668 rc
|= EPOLLOUT
| EPOLLWRNORM
;
669 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
671 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
672 if (!list_empty(&dst_q
->done_list
))
673 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
675 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
676 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
677 rc
|= EPOLLIN
| EPOLLRDNORM
;
678 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
683 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
685 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
686 struct vm_area_struct
*vma
)
688 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
689 struct vb2_queue
*vq
;
691 if (offset
< DST_QUEUE_OFF_BASE
) {
692 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
694 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
695 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
698 return vb2_mmap(vq
, vma
);
700 EXPORT_SYMBOL(v4l2_m2m_mmap
);
702 #if defined(CONFIG_MEDIA_CONTROLLER)
703 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev
*m2m_dev
)
705 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
706 media_devnode_remove(m2m_dev
->intf_devnode
);
708 media_entity_remove_links(m2m_dev
->source
);
709 media_entity_remove_links(&m2m_dev
->sink
);
710 media_entity_remove_links(&m2m_dev
->proc
);
711 media_device_unregister_entity(m2m_dev
->source
);
712 media_device_unregister_entity(&m2m_dev
->sink
);
713 media_device_unregister_entity(&m2m_dev
->proc
);
714 kfree(m2m_dev
->source
->name
);
715 kfree(m2m_dev
->sink
.name
);
716 kfree(m2m_dev
->proc
.name
);
718 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller
);
720 static int v4l2_m2m_register_entity(struct media_device
*mdev
,
721 struct v4l2_m2m_dev
*m2m_dev
, enum v4l2_m2m_entity_type type
,
722 struct video_device
*vdev
, int function
)
724 struct media_entity
*entity
;
725 struct media_pad
*pads
;
732 case MEM2MEM_ENT_TYPE_SOURCE
:
733 entity
= m2m_dev
->source
;
734 pads
= &m2m_dev
->source_pad
;
735 pads
[0].flags
= MEDIA_PAD_FL_SOURCE
;
738 case MEM2MEM_ENT_TYPE_SINK
:
739 entity
= &m2m_dev
->sink
;
740 pads
= &m2m_dev
->sink_pad
;
741 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
744 case MEM2MEM_ENT_TYPE_PROC
:
745 entity
= &m2m_dev
->proc
;
746 pads
= m2m_dev
->proc_pads
;
747 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
748 pads
[1].flags
= MEDIA_PAD_FL_SOURCE
;
755 entity
->obj_type
= MEDIA_ENTITY_TYPE_BASE
;
756 if (type
!= MEM2MEM_ENT_TYPE_PROC
) {
757 entity
->info
.dev
.major
= VIDEO_MAJOR
;
758 entity
->info
.dev
.minor
= vdev
->minor
;
760 len
= strlen(vdev
->name
) + 2 + strlen(m2m_entity_name
[type
]);
761 name
= kmalloc(len
, GFP_KERNEL
);
764 snprintf(name
, len
, "%s-%s", vdev
->name
, m2m_entity_name
[type
]);
766 entity
->function
= function
;
768 ret
= media_entity_pads_init(entity
, num_pads
, pads
);
771 ret
= media_device_register_entity(mdev
, entity
);
778 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev
*m2m_dev
,
779 struct video_device
*vdev
, int function
)
781 struct media_device
*mdev
= vdev
->v4l2_dev
->mdev
;
782 struct media_link
*link
;
788 /* A memory-to-memory device consists in two
789 * DMA engine and one video processing entities.
790 * The DMA engine entities are linked to a V4L interface
793 /* Create the three entities with their pads */
794 m2m_dev
->source
= &vdev
->entity
;
795 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
796 MEM2MEM_ENT_TYPE_SOURCE
, vdev
, MEDIA_ENT_F_IO_V4L
);
799 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
800 MEM2MEM_ENT_TYPE_PROC
, vdev
, function
);
802 goto err_rel_entity0
;
803 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
804 MEM2MEM_ENT_TYPE_SINK
, vdev
, MEDIA_ENT_F_IO_V4L
);
806 goto err_rel_entity1
;
808 /* Connect the three entities */
809 ret
= media_create_pad_link(m2m_dev
->source
, 0, &m2m_dev
->proc
, 1,
810 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
812 goto err_rel_entity2
;
814 ret
= media_create_pad_link(&m2m_dev
->proc
, 0, &m2m_dev
->sink
, 0,
815 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
819 /* Create video interface */
820 m2m_dev
->intf_devnode
= media_devnode_create(mdev
,
821 MEDIA_INTF_T_V4L_VIDEO
, 0,
822 VIDEO_MAJOR
, vdev
->minor
);
823 if (!m2m_dev
->intf_devnode
) {
828 /* Connect the two DMA engines to the interface */
829 link
= media_create_intf_link(m2m_dev
->source
,
830 &m2m_dev
->intf_devnode
->intf
,
831 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
837 link
= media_create_intf_link(&m2m_dev
->sink
,
838 &m2m_dev
->intf_devnode
->intf
,
839 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
842 goto err_rm_intf_link
;
847 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
849 media_devnode_remove(m2m_dev
->intf_devnode
);
851 media_entity_remove_links(&m2m_dev
->sink
);
853 media_entity_remove_links(&m2m_dev
->proc
);
854 media_entity_remove_links(m2m_dev
->source
);
856 media_device_unregister_entity(&m2m_dev
->proc
);
857 kfree(m2m_dev
->proc
.name
);
859 media_device_unregister_entity(&m2m_dev
->sink
);
860 kfree(m2m_dev
->sink
.name
);
862 media_device_unregister_entity(m2m_dev
->source
);
863 kfree(m2m_dev
->source
->name
);
867 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller
);
870 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
872 struct v4l2_m2m_dev
*m2m_dev
;
874 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
))
875 return ERR_PTR(-EINVAL
);
877 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
879 return ERR_PTR(-ENOMEM
);
881 m2m_dev
->curr_ctx
= NULL
;
882 m2m_dev
->m2m_ops
= m2m_ops
;
883 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
884 spin_lock_init(&m2m_dev
->job_spinlock
);
885 INIT_WORK(&m2m_dev
->job_work
, v4l2_m2m_device_run_work
);
889 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
891 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
895 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
897 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
899 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
901 struct v4l2_m2m_ctx
*m2m_ctx
;
902 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
905 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
907 return ERR_PTR(-ENOMEM
);
909 m2m_ctx
->priv
= drv_priv
;
910 m2m_ctx
->m2m_dev
= m2m_dev
;
911 init_waitqueue_head(&m2m_ctx
->finished
);
913 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
914 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
916 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
917 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
918 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
919 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
921 INIT_LIST_HEAD(&m2m_ctx
->queue
);
923 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
928 * Both queues should use same the mutex to lock the m2m context.
929 * This lock is used in some v4l2_m2m_* helpers.
931 if (WARN_ON(out_q_ctx
->q
.lock
!= cap_q_ctx
->q
.lock
)) {
935 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
942 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
944 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
946 /* wait until the current context is dequeued from job_queue */
947 v4l2_m2m_cancel_job(m2m_ctx
);
949 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
950 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
954 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
956 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
957 struct vb2_v4l2_buffer
*vbuf
)
959 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
960 struct v4l2_m2m_buffer
, vb
);
961 struct v4l2_m2m_queue_ctx
*q_ctx
;
964 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
968 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
969 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
971 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
973 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
975 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer
*out_vb
,
976 struct vb2_v4l2_buffer
*cap_vb
,
977 bool copy_frame_flags
)
979 u32 mask
= V4L2_BUF_FLAG_TIMECODE
| V4L2_BUF_FLAG_TSTAMP_SRC_MASK
;
981 if (copy_frame_flags
)
982 mask
|= V4L2_BUF_FLAG_KEYFRAME
| V4L2_BUF_FLAG_PFRAME
|
983 V4L2_BUF_FLAG_BFRAME
;
985 cap_vb
->vb2_buf
.timestamp
= out_vb
->vb2_buf
.timestamp
;
987 if (out_vb
->flags
& V4L2_BUF_FLAG_TIMECODE
)
988 cap_vb
->timecode
= out_vb
->timecode
;
989 cap_vb
->field
= out_vb
->field
;
990 cap_vb
->flags
&= ~mask
;
991 cap_vb
->flags
|= out_vb
->flags
& mask
;
992 cap_vb
->vb2_buf
.copied_timestamp
= 1;
994 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata
);
996 void v4l2_m2m_request_queue(struct media_request
*req
)
998 struct media_request_object
*obj
, *obj_safe
;
999 struct v4l2_m2m_ctx
*m2m_ctx
= NULL
;
1002 * Queue all objects. Note that buffer objects are at the end of the
1003 * objects list, after all other object types. Once buffer objects
1004 * are queued, the driver might delete them immediately (if the driver
1005 * processes the buffer at once), so we have to use
1006 * list_for_each_entry_safe() to handle the case where the object we
1009 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
1010 struct v4l2_m2m_ctx
*m2m_ctx_obj
;
1011 struct vb2_buffer
*vb
;
1013 if (!obj
->ops
->queue
)
1016 if (vb2_request_object_is_buffer(obj
)) {
1018 vb
= container_of(obj
, struct vb2_buffer
, req_obj
);
1019 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb
->vb2_queue
->type
));
1020 m2m_ctx_obj
= container_of(vb
->vb2_queue
,
1021 struct v4l2_m2m_ctx
,
1023 WARN_ON(m2m_ctx
&& m2m_ctx_obj
!= m2m_ctx
);
1024 m2m_ctx
= m2m_ctx_obj
;
1028 * The buffer we queue here can in theory be immediately
1029 * unbound, hence the use of list_for_each_entry_safe()
1030 * above and why we call the queue op last.
1032 obj
->ops
->queue(obj
);
1038 v4l2_m2m_try_schedule(m2m_ctx
);
1040 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue
);
1042 /* Videobuf2 ioctl helpers */
1044 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
1045 struct v4l2_requestbuffers
*rb
)
1047 struct v4l2_fh
*fh
= file
->private_data
;
1049 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
1051 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
1053 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
1054 struct v4l2_create_buffers
*create
)
1056 struct v4l2_fh
*fh
= file
->private_data
;
1058 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
1060 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
1062 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
1063 struct v4l2_buffer
*buf
)
1065 struct v4l2_fh
*fh
= file
->private_data
;
1067 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
1069 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
1071 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
1072 struct v4l2_buffer
*buf
)
1074 struct v4l2_fh
*fh
= file
->private_data
;
1076 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
1078 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
1080 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
1081 struct v4l2_buffer
*buf
)
1083 struct v4l2_fh
*fh
= file
->private_data
;
1085 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
1087 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
1089 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
1090 struct v4l2_buffer
*buf
)
1092 struct v4l2_fh
*fh
= file
->private_data
;
1094 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
1096 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
1098 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
1099 struct v4l2_exportbuffer
*eb
)
1101 struct v4l2_fh
*fh
= file
->private_data
;
1103 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
1105 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
1107 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
1108 enum v4l2_buf_type type
)
1110 struct v4l2_fh
*fh
= file
->private_data
;
1112 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
1114 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
1116 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
1117 enum v4l2_buf_type type
)
1119 struct v4l2_fh
*fh
= file
->private_data
;
1121 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
1123 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
1126 * v4l2_file_operations helpers. It is assumed here same lock is used
1127 * for the output and the capture buffer queue.
1130 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1132 struct v4l2_fh
*fh
= file
->private_data
;
1134 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
1136 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
1138 __poll_t
v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
1140 struct v4l2_fh
*fh
= file
->private_data
;
1141 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
1144 if (m2m_ctx
->q_lock
)
1145 mutex_lock(m2m_ctx
->q_lock
);
1147 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
1149 if (m2m_ctx
->q_lock
)
1150 mutex_unlock(m2m_ctx
->q_lock
);
1154 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);