]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/media/platform/exynos-gsc/gsc-m2m.c
[media] exynos-gsc: propagate timestamps from src to dst buffers
[mirror_ubuntu-artful-kernel.git] / drivers / media / platform / exynos-gsc / gsc-m2m.c
CommitLineData
5d718338
SK
1/*
2 * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Samsung EXYNOS5 SoC series G-Scaler driver
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation, either version 2 of the License,
10 * or (at your option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
5d718338
SK
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/bug.h>
18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/list.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25#include <linux/clk.h>
26
27#include <media/v4l2-ioctl.h>
28
29#include "gsc-core.h"
30
31static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
32{
33 struct gsc_ctx *curr_ctx;
34 struct gsc_dev *gsc = ctx->gsc_dev;
35 int ret;
36
37 curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
38 if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
39 return 0;
40
41 gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
42 ret = wait_event_timeout(gsc->irq_queue,
43 !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
44 GSC_SHUTDOWN_TIMEOUT);
45
46 return ret == 0 ? -ETIMEDOUT : ret;
47}
48
49static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
50{
51 struct gsc_ctx *ctx = q->drv_priv;
52 int ret;
53
54 ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
55 return ret > 0 ? 0 : ret;
56}
57
58static int gsc_m2m_stop_streaming(struct vb2_queue *q)
59{
60 struct gsc_ctx *ctx = q->drv_priv;
61 int ret;
62
63 ret = gsc_m2m_ctx_stop_req(ctx);
64 if (ret == -ETIMEDOUT)
65 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
66
67 pm_runtime_put(&ctx->gsc_dev->pdev->dev);
68
69 return 0;
70}
71
72void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
73{
74 struct vb2_buffer *src_vb, *dst_vb;
75
76 if (!ctx || !ctx->m2m_ctx)
77 return;
78
79 src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
80 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
81
82 if (src_vb && dst_vb) {
83 v4l2_m2m_buf_done(src_vb, vb_state);
84 v4l2_m2m_buf_done(dst_vb, vb_state);
85
86 v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
87 ctx->m2m_ctx);
88 }
89}
90
91
92static void gsc_m2m_job_abort(void *priv)
93{
94 struct gsc_ctx *ctx = priv;
95 int ret;
96
97 ret = gsc_m2m_ctx_stop_req(ctx);
98 if (ret == -ETIMEDOUT)
99 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
100}
101
f60e160e 102static int gsc_get_bufs(struct gsc_ctx *ctx)
5d718338
SK
103{
104 struct gsc_frame *s_frame, *d_frame;
f60e160e 105 struct vb2_buffer *src_vb, *dst_vb;
5d718338
SK
106 int ret;
107
108 s_frame = &ctx->s_frame;
109 d_frame = &ctx->d_frame;
110
f60e160e
SAB
111 src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
112 ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
113 if (ret)
114 return ret;
115
116 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
117 ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
5d718338
SK
118 if (ret)
119 return ret;
120
f60e160e
SAB
121 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
122
123 return 0;
5d718338
SK
124}
125
126static void gsc_m2m_device_run(void *priv)
127{
128 struct gsc_ctx *ctx = priv;
129 struct gsc_dev *gsc;
130 unsigned long flags;
4bd0e030 131 int ret;
5d718338
SK
132 bool is_set = false;
133
134 if (WARN(!ctx, "null hardware context\n"))
135 return;
136
137 gsc = ctx->gsc_dev;
138 spin_lock_irqsave(&gsc->slock, flags);
139
140 set_bit(ST_M2M_PEND, &gsc->state);
141
142 /* Reconfigure hardware if the context has changed. */
143 if (gsc->m2m.ctx != ctx) {
144 pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
145 gsc->m2m.ctx, ctx);
146 ctx->state |= GSC_PARAMS;
147 gsc->m2m.ctx = ctx;
148 }
149
150 is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0;
151 ctx->state &= ~GSC_CTX_STOP_REQ;
152 if (is_set) {
153 wake_up(&gsc->irq_queue);
154 goto put_device;
155 }
156
f60e160e 157 ret = gsc_get_bufs(ctx);
5d718338
SK
158 if (ret) {
159 pr_err("Wrong address");
160 goto put_device;
161 }
162
163 gsc_set_prefbuf(gsc, &ctx->s_frame);
164 gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
165 gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
166
167 if (ctx->state & GSC_PARAMS) {
168 gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
169 gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
170 gsc_hw_set_frm_done_irq_mask(gsc, false);
171 gsc_hw_set_gsc_irq_enable(gsc, true);
172
173 if (gsc_set_scaler_info(ctx)) {
174 pr_err("Scaler setup error");
175 goto put_device;
176 }
177
178 gsc_hw_set_input_path(ctx);
179 gsc_hw_set_in_size(ctx);
180 gsc_hw_set_in_image_format(ctx);
181
182 gsc_hw_set_output_path(ctx);
183 gsc_hw_set_out_size(ctx);
184 gsc_hw_set_out_image_format(ctx);
185
186 gsc_hw_set_prescaler(ctx);
187 gsc_hw_set_mainscaler(ctx);
188 gsc_hw_set_rotation(ctx);
189 gsc_hw_set_global_alpha(ctx);
190 }
191
192 /* update shadow registers */
193 gsc_hw_set_sfr_update(ctx);
194
195 ctx->state &= ~GSC_PARAMS;
196 gsc_hw_enable_control(gsc, true);
197
198 spin_unlock_irqrestore(&gsc->slock, flags);
199 return;
200
201put_device:
202 ctx->state &= ~GSC_PARAMS;
203 spin_unlock_irqrestore(&gsc->slock, flags);
204}
205
206static int gsc_m2m_queue_setup(struct vb2_queue *vq,
207 const struct v4l2_format *fmt,
208 unsigned int *num_buffers, unsigned int *num_planes,
209 unsigned int sizes[], void *allocators[])
210{
211 struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
212 struct gsc_frame *frame;
213 int i;
214
215 frame = ctx_get_frame(ctx, vq->type);
216 if (IS_ERR(frame))
217 return PTR_ERR(frame);
218
219 if (!frame->fmt)
220 return -EINVAL;
221
222 *num_planes = frame->fmt->num_planes;
223 for (i = 0; i < frame->fmt->num_planes; i++) {
224 sizes[i] = frame->payload[i];
225 allocators[i] = ctx->gsc_dev->alloc_ctx;
226 }
227 return 0;
228}
229
230static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
231{
232 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
233 struct gsc_frame *frame;
234 int i;
235
236 frame = ctx_get_frame(ctx, vb->vb2_queue->type);
237 if (IS_ERR(frame))
238 return PTR_ERR(frame);
239
240 if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
241 for (i = 0; i < frame->fmt->num_planes; i++)
242 vb2_set_plane_payload(vb, i, frame->payload[i]);
243 }
244
245 return 0;
246}
247
248static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
249{
250 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
251
252 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
253
254 if (ctx->m2m_ctx)
255 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
256}
257
d2331c99 258static struct vb2_ops gsc_m2m_qops = {
5d718338
SK
259 .queue_setup = gsc_m2m_queue_setup,
260 .buf_prepare = gsc_m2m_buf_prepare,
261 .buf_queue = gsc_m2m_buf_queue,
262 .wait_prepare = gsc_unlock,
263 .wait_finish = gsc_lock,
264 .stop_streaming = gsc_m2m_stop_streaming,
265 .start_streaming = gsc_m2m_start_streaming,
266};
267
268static int gsc_m2m_querycap(struct file *file, void *fh,
269 struct v4l2_capability *cap)
270{
271 struct gsc_ctx *ctx = fh_to_ctx(fh);
272 struct gsc_dev *gsc = ctx->gsc_dev;
273
274 strlcpy(cap->driver, gsc->pdev->name, sizeof(cap->driver));
275 strlcpy(cap->card, gsc->pdev->name, sizeof(cap->card));
276 strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
277 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
278 V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
279
280 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
281 return 0;
282}
283
284static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
285 struct v4l2_fmtdesc *f)
286{
287 return gsc_enum_fmt_mplane(f);
288}
289
290static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
291 struct v4l2_format *f)
292{
293 struct gsc_ctx *ctx = fh_to_ctx(fh);
294
295 return gsc_g_fmt_mplane(ctx, f);
296}
297
298static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
299 struct v4l2_format *f)
300{
301 struct gsc_ctx *ctx = fh_to_ctx(fh);
302
303 return gsc_try_fmt_mplane(ctx, f);
304}
305
306static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
307 struct v4l2_format *f)
308{
309 struct gsc_ctx *ctx = fh_to_ctx(fh);
310 struct vb2_queue *vq;
311 struct gsc_frame *frame;
312 struct v4l2_pix_format_mplane *pix;
313 int i, ret = 0;
314
315 ret = gsc_m2m_try_fmt_mplane(file, fh, f);
316 if (ret)
317 return ret;
318
319 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
320
321 if (vb2_is_streaming(vq)) {
322 pr_err("queue (%d) busy", f->type);
323 return -EBUSY;
324 }
325
326 if (V4L2_TYPE_IS_OUTPUT(f->type))
327 frame = &ctx->s_frame;
328 else
329 frame = &ctx->d_frame;
330
331 pix = &f->fmt.pix_mp;
332 frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
333 frame->colorspace = pix->colorspace;
334 if (!frame->fmt)
335 return -EINVAL;
336
337 for (i = 0; i < frame->fmt->num_planes; i++)
338 frame->payload[i] = pix->plane_fmt[i].sizeimage;
339
340 gsc_set_frame_size(frame, pix->width, pix->height);
341
342 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
343 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
344 else
345 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
346
347 pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
348
349 return 0;
350}
351
352static int gsc_m2m_reqbufs(struct file *file, void *fh,
353 struct v4l2_requestbuffers *reqbufs)
354{
355 struct gsc_ctx *ctx = fh_to_ctx(fh);
356 struct gsc_dev *gsc = ctx->gsc_dev;
357 struct gsc_frame *frame;
358 u32 max_cnt;
359
360 max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
361 gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
362 if (reqbufs->count > max_cnt) {
363 return -EINVAL;
364 } else if (reqbufs->count == 0) {
365 if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
366 gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
367 else
368 gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
369 }
370
371 frame = ctx_get_frame(ctx, reqbufs->type);
372
373 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
374}
375
376static int gsc_m2m_querybuf(struct file *file, void *fh,
377 struct v4l2_buffer *buf)
378{
379 struct gsc_ctx *ctx = fh_to_ctx(fh);
380 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
381}
382
383static int gsc_m2m_qbuf(struct file *file, void *fh,
384 struct v4l2_buffer *buf)
385{
386 struct gsc_ctx *ctx = fh_to_ctx(fh);
387 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
388}
389
390static int gsc_m2m_dqbuf(struct file *file, void *fh,
391 struct v4l2_buffer *buf)
392{
393 struct gsc_ctx *ctx = fh_to_ctx(fh);
394 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
395}
396
397static int gsc_m2m_streamon(struct file *file, void *fh,
398 enum v4l2_buf_type type)
399{
400 struct gsc_ctx *ctx = fh_to_ctx(fh);
401
402 /* The source and target color format need to be set */
403 if (V4L2_TYPE_IS_OUTPUT(type)) {
404 if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
405 return -EINVAL;
406 } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
407 return -EINVAL;
408 }
409
410 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
411}
412
413static int gsc_m2m_streamoff(struct file *file, void *fh,
414 enum v4l2_buf_type type)
415{
416 struct gsc_ctx *ctx = fh_to_ctx(fh);
417 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
418}
419
420/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
421static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
422{
423 if (a->left < b->left || a->top < b->top)
424 return 0;
425
426 if (a->left + a->width > b->left + b->width)
427 return 0;
428
429 if (a->top + a->height > b->top + b->height)
430 return 0;
431
432 return 1;
433}
434
435static int gsc_m2m_g_selection(struct file *file, void *fh,
436 struct v4l2_selection *s)
437{
438 struct gsc_frame *frame;
439 struct gsc_ctx *ctx = fh_to_ctx(fh);
440
441 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
442 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
443 return -EINVAL;
444
445 frame = ctx_get_frame(ctx, s->type);
446 if (IS_ERR(frame))
447 return PTR_ERR(frame);
448
449 switch (s->target) {
450 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
451 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
452 case V4L2_SEL_TGT_CROP_BOUNDS:
453 case V4L2_SEL_TGT_CROP_DEFAULT:
454 s->r.left = 0;
455 s->r.top = 0;
456 s->r.width = frame->f_width;
457 s->r.height = frame->f_height;
458 return 0;
459
460 case V4L2_SEL_TGT_COMPOSE:
461 case V4L2_SEL_TGT_CROP:
462 s->r.left = frame->crop.left;
463 s->r.top = frame->crop.top;
464 s->r.width = frame->crop.width;
465 s->r.height = frame->crop.height;
466 return 0;
467 }
468
469 return -EINVAL;
470}
471
472static int gsc_m2m_s_selection(struct file *file, void *fh,
473 struct v4l2_selection *s)
474{
475 struct gsc_frame *frame;
476 struct gsc_ctx *ctx = fh_to_ctx(fh);
477 struct v4l2_crop cr;
478 struct gsc_variant *variant = ctx->gsc_dev->variant;
479 int ret;
480
481 cr.type = s->type;
482 cr.c = s->r;
483
484 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
485 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
486 return -EINVAL;
487
488 ret = gsc_try_crop(ctx, &cr);
489 if (ret)
490 return ret;
491
492 if (s->flags & V4L2_SEL_FLAG_LE &&
493 !is_rectangle_enclosed(&cr.c, &s->r))
494 return -ERANGE;
495
496 if (s->flags & V4L2_SEL_FLAG_GE &&
497 !is_rectangle_enclosed(&s->r, &cr.c))
498 return -ERANGE;
499
500 s->r = cr.c;
501
502 switch (s->target) {
503 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
504 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
505 case V4L2_SEL_TGT_COMPOSE:
506 frame = &ctx->s_frame;
507 break;
508
509 case V4L2_SEL_TGT_CROP_BOUNDS:
510 case V4L2_SEL_TGT_CROP:
511 case V4L2_SEL_TGT_CROP_DEFAULT:
512 frame = &ctx->d_frame;
513 break;
514
515 default:
516 return -EINVAL;
517 }
518
519 /* Check to see if scaling ratio is within supported range */
520 if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
521 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
522 ret = gsc_check_scaler_ratio(variant, cr.c.width,
523 cr.c.height, ctx->d_frame.crop.width,
524 ctx->d_frame.crop.height,
525 ctx->gsc_ctrls.rotate->val, ctx->out_path);
526 } else {
527 ret = gsc_check_scaler_ratio(variant,
528 ctx->s_frame.crop.width,
529 ctx->s_frame.crop.height, cr.c.width,
530 cr.c.height, ctx->gsc_ctrls.rotate->val,
531 ctx->out_path);
532 }
533
534 if (ret) {
535 pr_err("Out of scaler range");
536 return -EINVAL;
537 }
538 }
539
540 frame->crop = cr.c;
541
542 gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
543 return 0;
544}
545
546static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
547 .vidioc_querycap = gsc_m2m_querycap,
548 .vidioc_enum_fmt_vid_cap_mplane = gsc_m2m_enum_fmt_mplane,
549 .vidioc_enum_fmt_vid_out_mplane = gsc_m2m_enum_fmt_mplane,
550 .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
551 .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
552 .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
553 .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
554 .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
555 .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
556 .vidioc_reqbufs = gsc_m2m_reqbufs,
557 .vidioc_querybuf = gsc_m2m_querybuf,
558 .vidioc_qbuf = gsc_m2m_qbuf,
559 .vidioc_dqbuf = gsc_m2m_dqbuf,
560 .vidioc_streamon = gsc_m2m_streamon,
561 .vidioc_streamoff = gsc_m2m_streamoff,
562 .vidioc_g_selection = gsc_m2m_g_selection,
563 .vidioc_s_selection = gsc_m2m_s_selection
564};
565
566static int queue_init(void *priv, struct vb2_queue *src_vq,
567 struct vb2_queue *dst_vq)
568{
569 struct gsc_ctx *ctx = priv;
570 int ret;
571
572 memset(src_vq, 0, sizeof(*src_vq));
573 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
574 src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
575 src_vq->drv_priv = ctx;
576 src_vq->ops = &gsc_m2m_qops;
577 src_vq->mem_ops = &vb2_dma_contig_memops;
578 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
579
580 ret = vb2_queue_init(src_vq);
581 if (ret)
582 return ret;
583
584 memset(dst_vq, 0, sizeof(*dst_vq));
585 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
586 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
587 dst_vq->drv_priv = ctx;
588 dst_vq->ops = &gsc_m2m_qops;
589 dst_vq->mem_ops = &vb2_dma_contig_memops;
590 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
591
592 return vb2_queue_init(dst_vq);
593}
594
595static int gsc_m2m_open(struct file *file)
596{
597 struct gsc_dev *gsc = video_drvdata(file);
598 struct gsc_ctx *ctx = NULL;
599 int ret;
600
601 pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
602
603 if (mutex_lock_interruptible(&gsc->lock))
604 return -ERESTARTSYS;
605
606 ctx = kzalloc(sizeof (*ctx), GFP_KERNEL);
607 if (!ctx) {
608 ret = -ENOMEM;
609 goto unlock;
610 }
611
612 v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
613 ret = gsc_ctrls_create(ctx);
614 if (ret)
615 goto error_fh;
616
617 /* Use separate control handler per file handle */
618 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
619 file->private_data = &ctx->fh;
620 v4l2_fh_add(&ctx->fh);
621
622 ctx->gsc_dev = gsc;
623 /* Default color format */
624 ctx->s_frame.fmt = get_format(0);
625 ctx->d_frame.fmt = get_format(0);
626 /* Setup the device context for mem2mem mode. */
627 ctx->state = GSC_CTX_M2M;
628 ctx->flags = 0;
629 ctx->in_path = GSC_DMA;
630 ctx->out_path = GSC_DMA;
631
632 ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
633 if (IS_ERR(ctx->m2m_ctx)) {
634 pr_err("Failed to initialize m2m context");
635 ret = PTR_ERR(ctx->m2m_ctx);
636 goto error_ctrls;
637 }
638
639 if (gsc->m2m.refcnt++ == 0)
640 set_bit(ST_M2M_OPEN, &gsc->state);
641
642 pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
643
644 mutex_unlock(&gsc->lock);
645 return 0;
646
647error_ctrls:
648 gsc_ctrls_delete(ctx);
649error_fh:
650 v4l2_fh_del(&ctx->fh);
651 v4l2_fh_exit(&ctx->fh);
652 kfree(ctx);
653unlock:
654 mutex_unlock(&gsc->lock);
655 return ret;
656}
657
658static int gsc_m2m_release(struct file *file)
659{
660 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
661 struct gsc_dev *gsc = ctx->gsc_dev;
662
663 pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
664 task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
665
98680180 666 mutex_lock(&gsc->lock);
5d718338
SK
667
668 v4l2_m2m_ctx_release(ctx->m2m_ctx);
669 gsc_ctrls_delete(ctx);
670 v4l2_fh_del(&ctx->fh);
671 v4l2_fh_exit(&ctx->fh);
672
673 if (--gsc->m2m.refcnt <= 0)
674 clear_bit(ST_M2M_OPEN, &gsc->state);
675 kfree(ctx);
676
677 mutex_unlock(&gsc->lock);
678 return 0;
679}
680
681static unsigned int gsc_m2m_poll(struct file *file,
682 struct poll_table_struct *wait)
683{
684 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
685 struct gsc_dev *gsc = ctx->gsc_dev;
686 int ret;
687
688 if (mutex_lock_interruptible(&gsc->lock))
689 return -ERESTARTSYS;
690
691 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
692 mutex_unlock(&gsc->lock);
693
694 return ret;
695}
696
697static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
698{
699 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
700 struct gsc_dev *gsc = ctx->gsc_dev;
701 int ret;
702
703 if (mutex_lock_interruptible(&gsc->lock))
704 return -ERESTARTSYS;
705
706 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
707 mutex_unlock(&gsc->lock);
708
709 return ret;
710}
711
712static const struct v4l2_file_operations gsc_m2m_fops = {
713 .owner = THIS_MODULE,
714 .open = gsc_m2m_open,
715 .release = gsc_m2m_release,
716 .poll = gsc_m2m_poll,
717 .unlocked_ioctl = video_ioctl2,
718 .mmap = gsc_m2m_mmap,
719};
720
721static struct v4l2_m2m_ops gsc_m2m_ops = {
722 .device_run = gsc_m2m_device_run,
723 .job_abort = gsc_m2m_job_abort,
724};
725
726int gsc_register_m2m_device(struct gsc_dev *gsc)
727{
728 struct platform_device *pdev;
729 int ret;
730
731 if (!gsc)
732 return -ENODEV;
733
734 pdev = gsc->pdev;
735
736 gsc->vdev.fops = &gsc_m2m_fops;
737 gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
738 gsc->vdev.release = video_device_release_empty;
739 gsc->vdev.lock = &gsc->lock;
24fc681a 740 gsc->vdev.vfl_dir = VFL_DIR_M2M;
5d718338
SK
741 snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
742 GSC_MODULE_NAME, gsc->id);
743
744 video_set_drvdata(&gsc->vdev, gsc);
745
746 gsc->m2m.vfd = &gsc->vdev;
747 gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
748 if (IS_ERR(gsc->m2m.m2m_dev)) {
749 dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
750 ret = PTR_ERR(gsc->m2m.m2m_dev);
751 goto err_m2m_r1;
752 }
753
754 ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
755 if (ret) {
756 dev_err(&pdev->dev,
757 "%s(): failed to register video device\n", __func__);
758 goto err_m2m_r2;
759 }
760
761 pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
762 return 0;
763
764err_m2m_r2:
765 v4l2_m2m_release(gsc->m2m.m2m_dev);
766err_m2m_r1:
767 video_device_release(gsc->m2m.vfd);
768
769 return ret;
770}
771
772void gsc_unregister_m2m_device(struct gsc_dev *gsc)
773{
774 if (gsc)
775 v4l2_m2m_release(gsc->m2m.m2m_dev);
776}