]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/media/v4l2-core/v4l2-mem2mem.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-eoan-kernel.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
7f98639d
PO
2/*
3 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 *
5 * Helper functions for devices that use videobuf buffers for both their
6 * source and destination.
7 *
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
95072084 9 * Pawel Osciak, <pawel@osciak.com>
7f98639d 10 * Marek Szyprowski, <m.szyprowski@samsung.com>
7f98639d
PO
11 */
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15
be2fff65 16#include <media/media-device.h>
c139990e 17#include <media/videobuf2-v4l2.h>
7f98639d 18#include <media/v4l2-mem2mem.h>
08eb8510 19#include <media/v4l2-dev.h>
be2fff65 20#include <media/v4l2-device.h>
08eb8510
HV
21#include <media/v4l2-fh.h>
22#include <media/v4l2-event.h>
7f98639d
PO
23
24MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
95072084 25MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
7f98639d
PO
26MODULE_LICENSE("GPL");
27
28static bool debug;
29module_param(debug, bool, 0644);
30
31#define dprintk(fmt, arg...) \
32 do { \
33 if (debug) \
34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
35 } while (0)
36
37
38/* Instance is already queued on the job_queue */
39#define TRANS_QUEUED (1 << 0)
40/* Instance is currently running in hardware */
41#define TRANS_RUNNING (1 << 1)
2ad5389b
SAB
42/* Instance is currently aborting */
43#define TRANS_ABORT (1 << 2)
7f98639d
PO
44
45
46/* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49#define DST_QUEUE_OFF_BASE (1 << 30)
50
be2fff65
EG
51enum v4l2_m2m_entity_type {
52 MEM2MEM_ENT_TYPE_SOURCE,
53 MEM2MEM_ENT_TYPE_SINK,
54 MEM2MEM_ENT_TYPE_PROC
55};
56
57static const char * const m2m_entity_name[] = {
58 "source",
59 "sink",
60 "proc"
61};
7f98639d
PO
62
63/**
64 * struct v4l2_m2m_dev - per-device context
b07b6849
MCC
65 * @source: &struct media_entity pointer with the source entity
66 * Used only when the M2M device is registered via
67 * v4l2_m2m_unregister_media_controller().
68 * @source_pad: &struct media_pad with the source pad.
69 * Used only when the M2M device is registered via
70 * v4l2_m2m_unregister_media_controller().
71 * @sink: &struct media_entity pointer with the sink entity
72 * Used only when the M2M device is registered via
73 * v4l2_m2m_unregister_media_controller().
74 * @sink_pad: &struct media_pad with the sink pad.
75 * Used only when the M2M device is registered via
76 * v4l2_m2m_unregister_media_controller().
77 * @proc: &struct media_entity pointer with the M2M device itself.
78 * @proc_pads: &struct media_pad with the @proc pads.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @intf_devnode: &struct media_intf devnode pointer with the interface
82 * with controls the M2M device.
7f98639d
PO
83 * @curr_ctx: currently running instance
84 * @job_queue: instances queued to run
85 * @job_spinlock: protects job_queue
cbd9463d 86 * @job_work: worker to run queued jobs.
7f98639d
PO
87 * @m2m_ops: driver callbacks
88 */
89struct v4l2_m2m_dev {
90 struct v4l2_m2m_ctx *curr_ctx;
be2fff65
EG
91#ifdef CONFIG_MEDIA_CONTROLLER
92 struct media_entity *source;
93 struct media_pad source_pad;
94 struct media_entity sink;
95 struct media_pad sink_pad;
96 struct media_entity proc;
97 struct media_pad proc_pads[2];
98 struct media_intf_devnode *intf_devnode;
99#endif
7f98639d
PO
100
101 struct list_head job_queue;
102 spinlock_t job_spinlock;
cbd9463d 103 struct work_struct job_work;
7f98639d 104
b1252eb8 105 const struct v4l2_m2m_ops *m2m_ops;
7f98639d
PO
106};
107
108static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
109 enum v4l2_buf_type type)
110{
908a0d7c 111 if (V4L2_TYPE_IS_OUTPUT(type))
7f98639d 112 return &m2m_ctx->out_q_ctx;
908a0d7c
MS
113 else
114 return &m2m_ctx->cap_q_ctx;
7f98639d
PO
115}
116
908a0d7c 117struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
7f98639d
PO
118 enum v4l2_buf_type type)
119{
120 struct v4l2_m2m_queue_ctx *q_ctx;
121
122 q_ctx = get_queue_ctx(m2m_ctx, type);
123 if (!q_ctx)
124 return NULL;
125
126 return &q_ctx->q;
127}
128EXPORT_SYMBOL(v4l2_m2m_get_vq);
129
8dd22b28 130struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
7f98639d 131{
d5451c1d 132 struct v4l2_m2m_buffer *b;
7f98639d
PO
133 unsigned long flags;
134
908a0d7c 135 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
7f98639d 136
a6bd62be
AP
137 if (list_empty(&q_ctx->rdy_queue)) {
138 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
139 return NULL;
140 }
7f98639d 141
c392e9e1 142 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
908a0d7c
MS
143 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
144 return &b->vb;
7f98639d
PO
145}
146EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
147
8dd22b28 148struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
ee1228cc
HV
149{
150 struct v4l2_m2m_buffer *b;
151 unsigned long flags;
152
153 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
154
155 if (list_empty(&q_ctx->rdy_queue)) {
156 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
157 return NULL;
158 }
159
160 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
161 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
162 return &b->vb;
163}
164EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
165
8dd22b28 166struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
7f98639d 167{
d5451c1d 168 struct v4l2_m2m_buffer *b;
7f98639d
PO
169 unsigned long flags;
170
908a0d7c 171 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
a6bd62be
AP
172 if (list_empty(&q_ctx->rdy_queue)) {
173 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
174 return NULL;
7f98639d 175 }
c392e9e1 176 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
a6bd62be
AP
177 list_del(&b->list);
178 q_ctx->num_rdy--;
908a0d7c 179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
7f98639d 180
908a0d7c 181 return &b->vb;
7f98639d
PO
182}
183EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
184
d4987564
SV
185void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
186 struct vb2_v4l2_buffer *vbuf)
187{
188 struct v4l2_m2m_buffer *b;
189 unsigned long flags;
190
191 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
192 b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
193 list_del(&b->list);
194 q_ctx->num_rdy--;
195 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
196}
197EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
198
199struct vb2_v4l2_buffer *
200v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
201
202{
203 struct v4l2_m2m_buffer *b, *tmp;
204 struct vb2_v4l2_buffer *ret = NULL;
205 unsigned long flags;
206
207 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
208 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
209 if (b->vb.vb2_buf.index == idx) {
210 list_del(&b->list);
211 q_ctx->num_rdy--;
212 ret = &b->vb;
213 break;
214 }
215 }
216 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
217
218 return ret;
219}
220EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
221
7f98639d
PO
222/*
223 * Scheduling handlers
224 */
225
7f98639d
PO
226void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
227{
228 unsigned long flags;
229 void *ret = NULL;
230
231 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
232 if (m2m_dev->curr_ctx)
233 ret = m2m_dev->curr_ctx->priv;
234 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
235
236 return ret;
237}
238EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
239
240/**
241 * v4l2_m2m_try_run() - select next job to perform and run it if possible
d28b2cf9 242 * @m2m_dev: per-device context
7f98639d
PO
243 *
244 * Get next transaction (if present) from the waiting jobs list and run it.
cbd9463d
EG
245 *
246 * Note that this function can run on a given v4l2_m2m_ctx context,
247 * but call .device_run for another context.
7f98639d
PO
248 */
249static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
250{
251 unsigned long flags;
252
253 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
254 if (NULL != m2m_dev->curr_ctx) {
255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
256 dprintk("Another instance is running, won't run now\n");
257 return;
258 }
259
260 if (list_empty(&m2m_dev->job_queue)) {
261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
262 dprintk("No job pending\n");
263 return;
264 }
265
c392e9e1 266 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
7f98639d
PO
267 struct v4l2_m2m_ctx, queue);
268 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
269 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
270
9db3bbf5 271 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
7f98639d
PO
272 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
273}
274
9db3bbf5
EG
275/*
276 * __v4l2_m2m_try_queue() - queue a job
277 * @m2m_dev: m2m device
278 * @m2m_ctx: m2m context
279 *
280 * Check if this context is ready to queue a job.
281 *
282 * This function can run in interrupt context.
283 */
284static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
285 struct v4l2_m2m_ctx *m2m_ctx)
7f98639d 286{
b730627a 287 unsigned long flags_job, flags_out, flags_cap;
7f98639d 288
7f98639d
PO
289 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
290
291 if (!m2m_ctx->out_q_ctx.q.streaming
292 || !m2m_ctx->cap_q_ctx.q.streaming) {
293 dprintk("Streaming needs to be on for both queues\n");
294 return;
295 }
296
297 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
2ad5389b
SAB
298
299 /* If the context is aborted then don't schedule it */
300 if (m2m_ctx->job_flags & TRANS_ABORT) {
2ad5389b 301 dprintk("Aborted context\n");
cbec2836 302 goto job_unlock;
2ad5389b
SAB
303 }
304
7f98639d 305 if (m2m_ctx->job_flags & TRANS_QUEUED) {
7f98639d 306 dprintk("On job queue already\n");
cbec2836 307 goto job_unlock;
7f98639d
PO
308 }
309
b730627a 310 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
33bdd5a8
PZ
311 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
312 && !m2m_ctx->out_q_ctx.buffered) {
7f98639d 313 dprintk("No input buffers available\n");
cbec2836 314 goto out_unlock;
7f98639d 315 }
b730627a 316 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
33bdd5a8
PZ
317 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
318 && !m2m_ctx->cap_q_ctx.buffered) {
7f98639d 319 dprintk("No output buffers available\n");
cbec2836 320 goto cap_unlock;
7f98639d 321 }
b730627a
JS
322 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
323 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
7f98639d
PO
324
325 if (m2m_dev->m2m_ops->job_ready
326 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
7f98639d 327 dprintk("Driver not ready\n");
cbec2836 328 goto job_unlock;
7f98639d
PO
329 }
330
331 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
332 m2m_ctx->job_flags |= TRANS_QUEUED;
333
334 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
cbec2836
SA
335 return;
336
337cap_unlock:
338 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
339out_unlock:
340 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
341job_unlock:
342 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
9db3bbf5
EG
343}
344
345/**
346 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
347 * @m2m_ctx: m2m context
348 *
349 * Check if this context is ready to queue a job. If suitable,
350 * run the next queued job on the mem2mem device.
351 *
352 * This function shouldn't run in interrupt context.
353 *
354 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
355 * and then run another job for another context.
356 */
357void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
358{
359 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
7f98639d 360
9db3bbf5 361 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
7f98639d
PO
362 v4l2_m2m_try_run(m2m_dev);
363}
1190a419 364EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
7f98639d 365
cbd9463d
EG
366/**
367 * v4l2_m2m_device_run_work() - run pending jobs for the context
368 * @work: Work structure used for scheduling the execution of this function.
369 */
370static void v4l2_m2m_device_run_work(struct work_struct *work)
371{
372 struct v4l2_m2m_dev *m2m_dev =
373 container_of(work, struct v4l2_m2m_dev, job_work);
374
375 v4l2_m2m_try_run(m2m_dev);
376}
377
fea564a5
SAB
378/**
379 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
d28b2cf9 380 * @m2m_ctx: m2m context with jobs to be canceled
fea564a5
SAB
381 *
382 * In case of streamoff or release called on any context,
383 * 1] If the context is currently running, then abort job will be called
384 * 2] If the context is queued, then the context will be removed from
385 * the job_queue
386 */
387static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
388{
389 struct v4l2_m2m_dev *m2m_dev;
390 unsigned long flags;
391
392 m2m_dev = m2m_ctx->m2m_dev;
393 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
2ad5389b
SAB
394
395 m2m_ctx->job_flags |= TRANS_ABORT;
fea564a5
SAB
396 if (m2m_ctx->job_flags & TRANS_RUNNING) {
397 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
5525b831
EG
398 if (m2m_dev->m2m_ops->job_abort)
399 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
803a7ab7 400 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
fea564a5
SAB
401 wait_event(m2m_ctx->finished,
402 !(m2m_ctx->job_flags & TRANS_RUNNING));
403 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
404 list_del(&m2m_ctx->queue);
405 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
406 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
407 dprintk("m2m_ctx: %p had been on queue and was removed\n",
408 m2m_ctx);
409 } else {
410 /* Do nothing, was not on queue/running */
411 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
412 }
413}
414
7f98639d
PO
415void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
416 struct v4l2_m2m_ctx *m2m_ctx)
417{
418 unsigned long flags;
419
420 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
421 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
422 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
423 dprintk("Called by an instance not currently running\n");
424 return;
425 }
426
427 list_del(&m2m_dev->curr_ctx->queue);
428 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
908a0d7c 429 wake_up(&m2m_dev->curr_ctx->finished);
7f98639d
PO
430 m2m_dev->curr_ctx = NULL;
431
432 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
433
434 /* This instance might have more buffers ready, but since we do not
435 * allow more than one job on the job_queue per instance, each has
436 * to be scheduled separately after the previous one finishes. */
cbd9463d
EG
437 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
438
439 /* We might be running in atomic context,
440 * but the job must be run in non-atomic context.
441 */
442 schedule_work(&m2m_dev->job_work);
7f98639d
PO
443}
444EXPORT_SYMBOL(v4l2_m2m_job_finish);
445
7f98639d
PO
446int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
447 struct v4l2_requestbuffers *reqbufs)
448{
908a0d7c 449 struct vb2_queue *vq;
c13a5ccf 450 int ret;
7f98639d
PO
451
452 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
c13a5ccf
PZ
453 ret = vb2_reqbufs(vq, reqbufs);
454 /* If count == 0, then the owner has released all buffers and he
455 is no longer owner of the queue. Otherwise we have an owner. */
456 if (ret == 0)
457 vq->owner = reqbufs->count ? file->private_data : NULL;
458
459 return ret;
7f98639d
PO
460}
461EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
462
7f98639d
PO
463int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
464 struct v4l2_buffer *buf)
465{
908a0d7c
MS
466 struct vb2_queue *vq;
467 int ret = 0;
468 unsigned int i;
7f98639d
PO
469
470 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
908a0d7c
MS
471 ret = vb2_querybuf(vq, buf);
472
473 /* Adjust MMAP memory offsets for the CAPTURE queue */
474 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
475 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
476 for (i = 0; i < buf->length; ++i)
477 buf->m.planes[i].m.mem_offset
478 += DST_QUEUE_OFF_BASE;
479 } else {
480 buf->m.offset += DST_QUEUE_OFF_BASE;
481 }
7f98639d
PO
482 }
483
484 return ret;
485}
486EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
487
7f98639d
PO
488int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
489 struct v4l2_buffer *buf)
490{
394dc588 491 struct video_device *vdev = video_devdata(file);
908a0d7c 492 struct vb2_queue *vq;
7f98639d
PO
493 int ret;
494
495 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
803a7ab7
HV
496 if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
497 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
498 dprintk("%s: requests cannot be used with capture buffers\n",
499 __func__);
500 return -EPERM;
501 }
394dc588 502 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
803a7ab7 503 if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
7f98639d
PO
504 v4l2_m2m_try_schedule(m2m_ctx);
505
506 return ret;
507}
508EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
509
7f98639d
PO
510int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
511 struct v4l2_buffer *buf)
512{
908a0d7c 513 struct vb2_queue *vq;
7f98639d
PO
514
515 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
908a0d7c 516 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
7f98639d
PO
517}
518EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
519
e68cf471
HV
520int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
521 struct v4l2_buffer *buf)
522{
394dc588 523 struct video_device *vdev = video_devdata(file);
e68cf471 524 struct vb2_queue *vq;
e68cf471
HV
525
526 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
803a7ab7 527 return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
e68cf471
HV
528}
529EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
530
8b94ca61
PZ
531int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
532 struct v4l2_create_buffers *create)
533{
534 struct vb2_queue *vq;
535
536 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
537 return vb2_create_bufs(vq, create);
538}
539EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
540
83ae7c5a
TS
541int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
542 struct v4l2_exportbuffer *eb)
543{
544 struct vb2_queue *vq;
545
546 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
547 return vb2_expbuf(vq, eb);
548}
549EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
4781646c 550
7f98639d
PO
551int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
552 enum v4l2_buf_type type)
553{
908a0d7c 554 struct vb2_queue *vq;
7f98639d
PO
555 int ret;
556
557 vq = v4l2_m2m_get_vq(m2m_ctx, type);
908a0d7c 558 ret = vb2_streamon(vq, type);
7f98639d
PO
559 if (!ret)
560 v4l2_m2m_try_schedule(m2m_ctx);
561
562 return ret;
563}
564EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
565
7f98639d
PO
566int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
567 enum v4l2_buf_type type)
568{
401f6a27
JS
569 struct v4l2_m2m_dev *m2m_dev;
570 struct v4l2_m2m_queue_ctx *q_ctx;
571 unsigned long flags_job, flags;
572 int ret;
7f98639d 573
fea564a5
SAB
574 /* wait until the current context is dequeued from job_queue */
575 v4l2_m2m_cancel_job(m2m_ctx);
576
401f6a27
JS
577 q_ctx = get_queue_ctx(m2m_ctx, type);
578 ret = vb2_streamoff(&q_ctx->q, type);
579 if (ret)
580 return ret;
581
582 m2m_dev = m2m_ctx->m2m_dev;
583 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
584 /* We should not be scheduled anymore, since we're dropping a queue. */
d7bb0ce8
PZ
585 if (m2m_ctx->job_flags & TRANS_QUEUED)
586 list_del(&m2m_ctx->queue);
401f6a27
JS
587 m2m_ctx->job_flags = 0;
588
589 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
590 /* Drop queue, since streamoff returns device to the same state as after
591 * calling reqbufs. */
592 INIT_LIST_HEAD(&q_ctx->rdy_queue);
84e68098 593 q_ctx->num_rdy = 0;
401f6a27
JS
594 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
595
596 if (m2m_dev->curr_ctx == m2m_ctx) {
597 m2m_dev->curr_ctx = NULL;
598 wake_up(&m2m_ctx->finished);
599 }
600 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
601
602 return 0;
7f98639d
PO
603}
604EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
605
c23e0cb8 606__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
7f98639d
PO
607 struct poll_table_struct *wait)
608{
08eb8510 609 struct video_device *vfd = video_devdata(file);
01699437 610 __poll_t req_events = poll_requested_events(wait);
908a0d7c
MS
611 struct vb2_queue *src_q, *dst_q;
612 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
c23e0cb8 613 __poll_t rc = 0;
908a0d7c 614 unsigned long flags;
7f98639d 615
dd8695e4
HV
616 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
617 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
618
619 poll_wait(file, &src_q->done_wq, wait);
620 poll_wait(file, &dst_q->done_wq, wait);
621
08eb8510
HV
622 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
623 struct v4l2_fh *fh = file->private_data;
624
dd8695e4 625 poll_wait(file, &fh->wait, wait);
08eb8510 626 if (v4l2_event_pending(fh))
a9a08845 627 rc = EPOLLPRI;
a9a08845 628 if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
08eb8510
HV
629 return rc;
630 }
631
908a0d7c
MS
632 /*
633 * There has to be at least one buffer queued on each queued_list, which
634 * means either in driver already or waiting for driver to claim it
635 * and start processing.
636 */
96ebc0ca
HV
637 if ((!src_q->streaming || src_q->error ||
638 list_empty(&src_q->queued_list)) &&
639 (!dst_q->streaming || dst_q->error ||
640 list_empty(&dst_q->queued_list))) {
a9a08845 641 rc |= EPOLLERR;
7f98639d
PO
642 goto end;
643 }
644
f1a81afc 645 spin_lock_irqsave(&dst_q->done_lock, flags);
c1621840
PZ
646 if (list_empty(&dst_q->done_list)) {
647 /*
648 * If the last buffer was dequeued from the capture queue,
649 * return immediately. DQBUF will return -EPIPE.
650 */
f1a81afc
ZD
651 if (dst_q->last_buffer_dequeued) {
652 spin_unlock_irqrestore(&dst_q->done_lock, flags);
a9a08845 653 return rc | EPOLLIN | EPOLLRDNORM;
f1a81afc 654 }
c1621840 655 }
f1a81afc 656 spin_unlock_irqrestore(&dst_q->done_lock, flags);
908a0d7c
MS
657
658 spin_lock_irqsave(&src_q->done_lock, flags);
659 if (!list_empty(&src_q->done_list))
660 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
661 done_entry);
662 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
663 || src_vb->state == VB2_BUF_STATE_ERROR))
a9a08845 664 rc |= EPOLLOUT | EPOLLWRNORM;
908a0d7c
MS
665 spin_unlock_irqrestore(&src_q->done_lock, flags);
666
667 spin_lock_irqsave(&dst_q->done_lock, flags);
668 if (!list_empty(&dst_q->done_list))
669 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
670 done_entry);
671 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
672 || dst_vb->state == VB2_BUF_STATE_ERROR))
a9a08845 673 rc |= EPOLLIN | EPOLLRDNORM;
908a0d7c 674 spin_unlock_irqrestore(&dst_q->done_lock, flags);
7f98639d
PO
675
676end:
7f98639d
PO
677 return rc;
678}
679EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
680
7f98639d
PO
681int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
682 struct vm_area_struct *vma)
683{
684 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
908a0d7c 685 struct vb2_queue *vq;
7f98639d
PO
686
687 if (offset < DST_QUEUE_OFF_BASE) {
688 vq = v4l2_m2m_get_src_vq(m2m_ctx);
689 } else {
690 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
691 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
692 }
693
908a0d7c 694 return vb2_mmap(vq, vma);
7f98639d
PO
695}
696EXPORT_SYMBOL(v4l2_m2m_mmap);
697
be2fff65
EG
698#if defined(CONFIG_MEDIA_CONTROLLER)
699void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
700{
701 media_remove_intf_links(&m2m_dev->intf_devnode->intf);
702 media_devnode_remove(m2m_dev->intf_devnode);
703
704 media_entity_remove_links(m2m_dev->source);
705 media_entity_remove_links(&m2m_dev->sink);
706 media_entity_remove_links(&m2m_dev->proc);
707 media_device_unregister_entity(m2m_dev->source);
708 media_device_unregister_entity(&m2m_dev->sink);
709 media_device_unregister_entity(&m2m_dev->proc);
710 kfree(m2m_dev->source->name);
711 kfree(m2m_dev->sink.name);
712 kfree(m2m_dev->proc.name);
713}
714EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
715
716static int v4l2_m2m_register_entity(struct media_device *mdev,
717 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
718 struct video_device *vdev, int function)
719{
720 struct media_entity *entity;
721 struct media_pad *pads;
722 char *name;
723 unsigned int len;
724 int num_pads;
725 int ret;
726
727 switch (type) {
728 case MEM2MEM_ENT_TYPE_SOURCE:
729 entity = m2m_dev->source;
730 pads = &m2m_dev->source_pad;
731 pads[0].flags = MEDIA_PAD_FL_SOURCE;
732 num_pads = 1;
733 break;
734 case MEM2MEM_ENT_TYPE_SINK:
735 entity = &m2m_dev->sink;
736 pads = &m2m_dev->sink_pad;
737 pads[0].flags = MEDIA_PAD_FL_SINK;
738 num_pads = 1;
739 break;
740 case MEM2MEM_ENT_TYPE_PROC:
741 entity = &m2m_dev->proc;
742 pads = m2m_dev->proc_pads;
743 pads[0].flags = MEDIA_PAD_FL_SINK;
744 pads[1].flags = MEDIA_PAD_FL_SOURCE;
745 num_pads = 2;
746 break;
747 default:
748 return -EINVAL;
749 }
750
751 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
752 if (type != MEM2MEM_ENT_TYPE_PROC) {
753 entity->info.dev.major = VIDEO_MAJOR;
754 entity->info.dev.minor = vdev->minor;
755 }
756 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
757 name = kmalloc(len, GFP_KERNEL);
758 if (!name)
759 return -ENOMEM;
760 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
761 entity->name = name;
762 entity->function = function;
763
764 ret = media_entity_pads_init(entity, num_pads, pads);
765 if (ret)
766 return ret;
767 ret = media_device_register_entity(mdev, entity);
768 if (ret)
769 return ret;
770
771 return 0;
772}
773
774int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
775 struct video_device *vdev, int function)
776{
777 struct media_device *mdev = vdev->v4l2_dev->mdev;
778 struct media_link *link;
779 int ret;
780
781 if (!mdev)
782 return 0;
783
784 /* A memory-to-memory device consists in two
785 * DMA engine and one video processing entities.
786 * The DMA engine entities are linked to a V4L interface
787 */
788
789 /* Create the three entities with their pads */
790 m2m_dev->source = &vdev->entity;
791 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
792 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
793 if (ret)
794 return ret;
795 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
796 MEM2MEM_ENT_TYPE_PROC, vdev, function);
797 if (ret)
798 goto err_rel_entity0;
799 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
800 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
801 if (ret)
802 goto err_rel_entity1;
803
804 /* Connect the three entities */
805 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
806 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
807 if (ret)
808 goto err_rel_entity2;
809
810 ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
811 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
812 if (ret)
813 goto err_rm_links0;
814
815 /* Create video interface */
816 m2m_dev->intf_devnode = media_devnode_create(mdev,
817 MEDIA_INTF_T_V4L_VIDEO, 0,
818 VIDEO_MAJOR, vdev->minor);
819 if (!m2m_dev->intf_devnode) {
820 ret = -ENOMEM;
821 goto err_rm_links1;
822 }
823
824 /* Connect the two DMA engines to the interface */
825 link = media_create_intf_link(m2m_dev->source,
826 &m2m_dev->intf_devnode->intf,
827 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
828 if (!link) {
829 ret = -ENOMEM;
830 goto err_rm_devnode;
831 }
832
833 link = media_create_intf_link(&m2m_dev->sink,
834 &m2m_dev->intf_devnode->intf,
835 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
836 if (!link) {
837 ret = -ENOMEM;
838 goto err_rm_intf_link;
839 }
840 return 0;
841
842err_rm_intf_link:
843 media_remove_intf_links(&m2m_dev->intf_devnode->intf);
844err_rm_devnode:
845 media_devnode_remove(m2m_dev->intf_devnode);
846err_rm_links1:
847 media_entity_remove_links(&m2m_dev->sink);
848err_rm_links0:
849 media_entity_remove_links(&m2m_dev->proc);
850 media_entity_remove_links(m2m_dev->source);
851err_rel_entity2:
852 media_device_unregister_entity(&m2m_dev->proc);
853 kfree(m2m_dev->proc.name);
854err_rel_entity1:
855 media_device_unregister_entity(&m2m_dev->sink);
856 kfree(m2m_dev->sink.name);
857err_rel_entity0:
858 media_device_unregister_entity(m2m_dev->source);
859 kfree(m2m_dev->source->name);
860 return ret;
861 return 0;
862}
863EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
864#endif
865
b1252eb8 866struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
7f98639d
PO
867{
868 struct v4l2_m2m_dev *m2m_dev;
869
5525b831 870 if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
7f98639d
PO
871 return ERR_PTR(-EINVAL);
872
7f98639d
PO
873 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
874 if (!m2m_dev)
875 return ERR_PTR(-ENOMEM);
876
877 m2m_dev->curr_ctx = NULL;
878 m2m_dev->m2m_ops = m2m_ops;
879 INIT_LIST_HEAD(&m2m_dev->job_queue);
880 spin_lock_init(&m2m_dev->job_spinlock);
cbd9463d 881 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
7f98639d
PO
882
883 return m2m_dev;
884}
885EXPORT_SYMBOL_GPL(v4l2_m2m_init);
886
7f98639d
PO
887void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
888{
889 kfree(m2m_dev);
890}
891EXPORT_SYMBOL_GPL(v4l2_m2m_release);
892
908a0d7c
MS
893struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
894 void *drv_priv,
895 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
7f98639d
PO
896{
897 struct v4l2_m2m_ctx *m2m_ctx;
898 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
908a0d7c 899 int ret;
7f98639d
PO
900
901 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
902 if (!m2m_ctx)
903 return ERR_PTR(-ENOMEM);
904
908a0d7c 905 m2m_ctx->priv = drv_priv;
7f98639d 906 m2m_ctx->m2m_dev = m2m_dev;
908a0d7c 907 init_waitqueue_head(&m2m_ctx->finished);
7f98639d 908
908a0d7c
MS
909 out_q_ctx = &m2m_ctx->out_q_ctx;
910 cap_q_ctx = &m2m_ctx->cap_q_ctx;
7f98639d
PO
911
912 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
913 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
908a0d7c
MS
914 spin_lock_init(&out_q_ctx->rdy_spinlock);
915 spin_lock_init(&cap_q_ctx->rdy_spinlock);
7f98639d
PO
916
917 INIT_LIST_HEAD(&m2m_ctx->queue);
918
908a0d7c
MS
919 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
920
921 if (ret)
922 goto err;
8e6e8f93 923 /*
913f3ec2
EG
924 * Both queues should use same the mutex to lock the m2m context.
925 * This lock is used in some v4l2_m2m_* helpers.
8e6e8f93 926 */
913f3ec2
EG
927 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
928 ret = -EINVAL;
929 goto err;
930 }
931 m2m_ctx->q_lock = out_q_ctx->q.lock;
7f98639d
PO
932
933 return m2m_ctx;
908a0d7c
MS
934err:
935 kfree(m2m_ctx);
936 return ERR_PTR(ret);
7f98639d
PO
937}
938EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
939
7f98639d
PO
940void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
941{
fea564a5
SAB
942 /* wait until the current context is dequeued from job_queue */
943 v4l2_m2m_cancel_job(m2m_ctx);
7f98639d 944
908a0d7c
MS
945 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
946 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
7f98639d
PO
947
948 kfree(m2m_ctx);
949}
950EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
951
2d700715
JS
952void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
953 struct vb2_v4l2_buffer *vbuf)
7f98639d 954{
2d700715
JS
955 struct v4l2_m2m_buffer *b = container_of(vbuf,
956 struct v4l2_m2m_buffer, vb);
7f98639d 957 struct v4l2_m2m_queue_ctx *q_ctx;
908a0d7c 958 unsigned long flags;
7f98639d 959
2d700715 960 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
7f98639d
PO
961 if (!q_ctx)
962 return;
963
908a0d7c
MS
964 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
965 list_add_tail(&b->list, &q_ctx->rdy_queue);
7f98639d 966 q_ctx->num_rdy++;
908a0d7c 967 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
7f98639d
PO
968}
969EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
970
a4d3d612
EG
971void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
972 struct vb2_v4l2_buffer *cap_vb,
973 bool copy_frame_flags)
e2d8ffe2
HV
974{
975 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
976
977 if (copy_frame_flags)
978 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
979 V4L2_BUF_FLAG_BFRAME;
980
981 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
982
983 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
984 cap_vb->timecode = out_vb->timecode;
985 cap_vb->field = out_vb->field;
986 cap_vb->flags &= ~mask;
987 cap_vb->flags |= out_vb->flags & mask;
7e4e7162 988 cap_vb->vb2_buf.copied_timestamp = 1;
e2d8ffe2 989}
a4d3d612 990EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
e2d8ffe2 991
ef86eaf9 992void v4l2_m2m_request_queue(struct media_request *req)
803a7ab7
HV
993{
994 struct media_request_object *obj, *obj_safe;
995 struct v4l2_m2m_ctx *m2m_ctx = NULL;
996
997 /*
998 * Queue all objects. Note that buffer objects are at the end of the
999 * objects list, after all other object types. Once buffer objects
1000 * are queued, the driver might delete them immediately (if the driver
1001 * processes the buffer at once), so we have to use
1002 * list_for_each_entry_safe() to handle the case where the object we
1003 * queue is deleted.
1004 */
1005 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
1006 struct v4l2_m2m_ctx *m2m_ctx_obj;
1007 struct vb2_buffer *vb;
1008
1009 if (!obj->ops->queue)
1010 continue;
1011
1012 if (vb2_request_object_is_buffer(obj)) {
1013 /* Sanity checks */
1014 vb = container_of(obj, struct vb2_buffer, req_obj);
1015 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
1016 m2m_ctx_obj = container_of(vb->vb2_queue,
1017 struct v4l2_m2m_ctx,
1018 out_q_ctx.q);
1019 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
1020 m2m_ctx = m2m_ctx_obj;
1021 }
1022
1023 /*
1024 * The buffer we queue here can in theory be immediately
1025 * unbound, hence the use of list_for_each_entry_safe()
1026 * above and why we call the queue op last.
1027 */
1028 obj->ops->queue(obj);
1029 }
1030
1031 WARN_ON(!m2m_ctx);
1032
1033 if (m2m_ctx)
1034 v4l2_m2m_try_schedule(m2m_ctx);
1035}
ef86eaf9 1036EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
803a7ab7 1037
8e6e8f93
SN
1038/* Videobuf2 ioctl helpers */
1039
1040int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
1041 struct v4l2_requestbuffers *rb)
1042{
1043 struct v4l2_fh *fh = file->private_data;
1044
1045 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
1046}
1047EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
1048
1049int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
1050 struct v4l2_create_buffers *create)
1051{
1052 struct v4l2_fh *fh = file->private_data;
1053
1054 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
1055}
1056EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
1057
1058int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
1059 struct v4l2_buffer *buf)
1060{
1061 struct v4l2_fh *fh = file->private_data;
1062
1063 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
1064}
1065EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
1066
1067int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
1068 struct v4l2_buffer *buf)
1069{
1070 struct v4l2_fh *fh = file->private_data;
1071
1072 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
1073}
1074EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
1075
1076int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
1077 struct v4l2_buffer *buf)
1078{
1079 struct v4l2_fh *fh = file->private_data;
1080
1081 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
1082}
1083EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
1084
e68cf471
HV
1085int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1086 struct v4l2_buffer *buf)
1087{
1088 struct v4l2_fh *fh = file->private_data;
1089
1090 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1091}
1092EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1093
8e6e8f93
SN
1094int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1095 struct v4l2_exportbuffer *eb)
1096{
1097 struct v4l2_fh *fh = file->private_data;
1098
1099 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1100}
1101EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1102
1103int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1104 enum v4l2_buf_type type)
1105{
1106 struct v4l2_fh *fh = file->private_data;
1107
1108 return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1109}
1110EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1111
1112int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1113 enum v4l2_buf_type type)
1114{
1115 struct v4l2_fh *fh = file->private_data;
1116
1117 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1118}
1119EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1120
1121/*
1122 * v4l2_file_operations helpers. It is assumed here same lock is used
1123 * for the output and the capture buffer queue.
1124 */
1125
1126int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1127{
1128 struct v4l2_fh *fh = file->private_data;
8e6e8f93 1129
e752577e 1130 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
8e6e8f93
SN
1131}
1132EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1133
c23e0cb8 1134__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
8e6e8f93
SN
1135{
1136 struct v4l2_fh *fh = file->private_data;
1137 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
c23e0cb8 1138 __poll_t ret;
8e6e8f93
SN
1139
1140 if (m2m_ctx->q_lock)
1141 mutex_lock(m2m_ctx->q_lock);
1142
1143 ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1144
1145 if (m2m_ctx->q_lock)
1146 mutex_unlock(m2m_ctx->q_lock);
1147
1148 return ret;
1149}
1150EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
1151