]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/media/platform/vsp1/vsp1_video.c
[media] v4l: vsp1: Pass display list explicitly to configure functions
[mirror_ubuntu-zesty-kernel.git] / drivers / media / platform / vsp1 / vsp1_video.c
1 /*
2 * vsp1_video.c -- R-Car VSP1 Video Node
3 *
4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/v4l2-mediabus.h>
19 #include <linux/videodev2.h>
20 #include <linux/wait.h>
21
22 #include <media/media-entity.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-fh.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/v4l2-subdev.h>
27 #include <media/videobuf2-v4l2.h>
28 #include <media/videobuf2-dma-contig.h>
29
30 #include "vsp1.h"
31 #include "vsp1_bru.h"
32 #include "vsp1_dl.h"
33 #include "vsp1_entity.h"
34 #include "vsp1_pipe.h"
35 #include "vsp1_rwpf.h"
36 #include "vsp1_uds.h"
37 #include "vsp1_video.h"
38
39 #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
40 #define VSP1_VIDEO_DEF_WIDTH 1024
41 #define VSP1_VIDEO_DEF_HEIGHT 768
42
43 #define VSP1_VIDEO_MIN_WIDTH 2U
44 #define VSP1_VIDEO_MAX_WIDTH 8190U
45 #define VSP1_VIDEO_MIN_HEIGHT 2U
46 #define VSP1_VIDEO_MAX_HEIGHT 8190U
47
48 /* -----------------------------------------------------------------------------
49 * Helper functions
50 */
51
52 static struct v4l2_subdev *
53 vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
54 {
55 struct media_pad *remote;
56
57 remote = media_entity_remote_pad(local);
58 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
59 return NULL;
60
61 if (pad)
62 *pad = remote->index;
63
64 return media_entity_to_v4l2_subdev(remote->entity);
65 }
66
67 static int vsp1_video_verify_format(struct vsp1_video *video)
68 {
69 struct v4l2_subdev_format fmt;
70 struct v4l2_subdev *subdev;
71 int ret;
72
73 subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
74 if (subdev == NULL)
75 return -EINVAL;
76
77 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
78 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
79 if (ret < 0)
80 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
81
82 if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
83 video->rwpf->format.height != fmt.format.height ||
84 video->rwpf->format.width != fmt.format.width)
85 return -EINVAL;
86
87 return 0;
88 }
89
90 static int __vsp1_video_try_format(struct vsp1_video *video,
91 struct v4l2_pix_format_mplane *pix,
92 const struct vsp1_format_info **fmtinfo)
93 {
94 static const u32 xrgb_formats[][2] = {
95 { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
96 { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
97 { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
98 { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
99 };
100
101 const struct vsp1_format_info *info;
102 unsigned int width = pix->width;
103 unsigned int height = pix->height;
104 unsigned int i;
105
106 /* Backward compatibility: replace deprecated RGB formats by their XRGB
107 * equivalent. This selects the format older userspace applications want
108 * while still exposing the new format.
109 */
110 for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
111 if (xrgb_formats[i][0] == pix->pixelformat) {
112 pix->pixelformat = xrgb_formats[i][1];
113 break;
114 }
115 }
116
117 /* Retrieve format information and select the default format if the
118 * requested format isn't supported.
119 */
120 info = vsp1_get_format_info(pix->pixelformat);
121 if (info == NULL)
122 info = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
123
124 pix->pixelformat = info->fourcc;
125 pix->colorspace = V4L2_COLORSPACE_SRGB;
126 pix->field = V4L2_FIELD_NONE;
127 memset(pix->reserved, 0, sizeof(pix->reserved));
128
129 /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
130 width = round_down(width, info->hsub);
131 height = round_down(height, info->vsub);
132
133 /* Clamp the width and height. */
134 pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
135 pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
136 VSP1_VIDEO_MAX_HEIGHT);
137
138 /* Compute and clamp the stride and image size. While not documented in
139 * the datasheet, strides not aligned to a multiple of 128 bytes result
140 * in image corruption.
141 */
142 for (i = 0; i < min(info->planes, 2U); ++i) {
143 unsigned int hsub = i > 0 ? info->hsub : 1;
144 unsigned int vsub = i > 0 ? info->vsub : 1;
145 unsigned int align = 128;
146 unsigned int bpl;
147
148 bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
149 pix->width / hsub * info->bpp[i] / 8,
150 round_down(65535U, align));
151
152 pix->plane_fmt[i].bytesperline = round_up(bpl, align);
153 pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
154 * pix->height / vsub;
155 }
156
157 if (info->planes == 3) {
158 /* The second and third planes must have the same stride. */
159 pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
160 pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
161 }
162
163 pix->num_planes = info->planes;
164
165 if (fmtinfo)
166 *fmtinfo = info;
167
168 return 0;
169 }
170
171 /* -----------------------------------------------------------------------------
172 * Pipeline Management
173 */
174
175 static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe,
176 struct vsp1_rwpf *input,
177 struct vsp1_rwpf *output)
178 {
179 struct media_entity_enum ent_enum;
180 struct vsp1_entity *entity;
181 struct media_pad *pad;
182 bool bru_found = false;
183 int ret;
184
185 ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
186 if (ret < 0)
187 return ret;
188
189 pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
190
191 while (1) {
192 if (pad == NULL) {
193 ret = -EPIPE;
194 goto out;
195 }
196
197 /* We've reached a video node, that shouldn't have happened. */
198 if (!is_media_entity_v4l2_subdev(pad->entity)) {
199 ret = -EPIPE;
200 goto out;
201 }
202
203 entity = to_vsp1_entity(
204 media_entity_to_v4l2_subdev(pad->entity));
205
206 /* A BRU is present in the pipeline, store the BRU input pad
207 * number in the input RPF for use when configuring the RPF.
208 */
209 if (entity->type == VSP1_ENTITY_BRU) {
210 struct vsp1_bru *bru = to_bru(&entity->subdev);
211
212 bru->inputs[pad->index].rpf = input;
213 input->bru_input = pad->index;
214
215 bru_found = true;
216 }
217
218 /* We've reached the WPF, we're done. */
219 if (entity->type == VSP1_ENTITY_WPF)
220 break;
221
222 /* Ensure the branch has no loop. */
223 if (media_entity_enum_test_and_set(&ent_enum,
224 &entity->subdev.entity)) {
225 ret = -EPIPE;
226 goto out;
227 }
228
229 /* UDS can't be chained. */
230 if (entity->type == VSP1_ENTITY_UDS) {
231 if (pipe->uds) {
232 ret = -EPIPE;
233 goto out;
234 }
235
236 pipe->uds = entity;
237 pipe->uds_input = bru_found ? pipe->bru
238 : &input->entity;
239 }
240
241 /* Follow the source link. The link setup operations ensure
242 * that the output fan-out can't be more than one, there is thus
243 * no need to verify here that only a single source link is
244 * activated.
245 */
246 pad = &entity->pads[entity->source_pad];
247 pad = media_entity_remote_pad(pad);
248 }
249
250 /* The last entity must be the output WPF. */
251 if (entity != &output->entity)
252 ret = -EPIPE;
253
254 out:
255 media_entity_enum_cleanup(&ent_enum);
256
257 return ret;
258 }
259
260 static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
261 struct vsp1_video *video)
262 {
263 struct media_entity_graph graph;
264 struct media_entity *entity = &video->video.entity;
265 struct media_device *mdev = entity->graph_obj.mdev;
266 unsigned int i;
267 int ret;
268
269 mutex_lock(&mdev->graph_mutex);
270
271 /* Walk the graph to locate the entities and video nodes. */
272 ret = media_entity_graph_walk_init(&graph, mdev);
273 if (ret) {
274 mutex_unlock(&mdev->graph_mutex);
275 return ret;
276 }
277
278 media_entity_graph_walk_start(&graph, entity);
279
280 while ((entity = media_entity_graph_walk_next(&graph))) {
281 struct v4l2_subdev *subdev;
282 struct vsp1_rwpf *rwpf;
283 struct vsp1_entity *e;
284
285 if (!is_media_entity_v4l2_subdev(entity))
286 continue;
287
288 subdev = media_entity_to_v4l2_subdev(entity);
289 e = to_vsp1_entity(subdev);
290 list_add_tail(&e->list_pipe, &pipe->entities);
291
292 if (e->type == VSP1_ENTITY_RPF) {
293 rwpf = to_rwpf(subdev);
294 pipe->inputs[rwpf->entity.index] = rwpf;
295 rwpf->video->pipe_index = ++pipe->num_inputs;
296 } else if (e->type == VSP1_ENTITY_WPF) {
297 rwpf = to_rwpf(subdev);
298 pipe->output = rwpf;
299 rwpf->video->pipe_index = 0;
300 } else if (e->type == VSP1_ENTITY_LIF) {
301 pipe->lif = e;
302 } else if (e->type == VSP1_ENTITY_BRU) {
303 pipe->bru = e;
304 }
305 }
306
307 mutex_unlock(&mdev->graph_mutex);
308
309 media_entity_graph_walk_cleanup(&graph);
310
311 /* We need one output and at least one input. */
312 if (pipe->num_inputs == 0 || !pipe->output) {
313 ret = -EPIPE;
314 goto error;
315 }
316
317 /* Follow links downstream for each input and make sure the graph
318 * contains no loop and that all branches end at the output WPF.
319 */
320 for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
321 if (!pipe->inputs[i])
322 continue;
323
324 ret = vsp1_video_pipeline_validate_branch(pipe, pipe->inputs[i],
325 pipe->output);
326 if (ret < 0)
327 goto error;
328 }
329
330 return 0;
331
332 error:
333 vsp1_pipeline_reset(pipe);
334 return ret;
335 }
336
337 static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
338 struct vsp1_video *video)
339 {
340 int ret;
341
342 mutex_lock(&pipe->lock);
343
344 /* If we're the first user validate and initialize the pipeline. */
345 if (pipe->use_count == 0) {
346 ret = vsp1_video_pipeline_validate(pipe, video);
347 if (ret < 0)
348 goto done;
349 }
350
351 pipe->use_count++;
352 ret = 0;
353
354 done:
355 mutex_unlock(&pipe->lock);
356 return ret;
357 }
358
359 static void vsp1_video_pipeline_cleanup(struct vsp1_pipeline *pipe)
360 {
361 mutex_lock(&pipe->lock);
362
363 /* If we're the last user clean up the pipeline. */
364 if (--pipe->use_count == 0)
365 vsp1_pipeline_reset(pipe);
366
367 mutex_unlock(&pipe->lock);
368 }
369
370 /*
371 * vsp1_video_complete_buffer - Complete the current buffer
372 * @video: the video node
373 *
374 * This function completes the current buffer by filling its sequence number,
375 * time stamp and payload size, and hands it back to the videobuf core.
376 *
377 * When operating in DU output mode (deep pipeline to the DU through the LIF),
378 * the VSP1 needs to constantly supply frames to the display. In that case, if
379 * no other buffer is queued, reuse the one that has just been processed instead
380 * of handing it back to the videobuf core.
381 *
382 * Return the next queued buffer or NULL if the queue is empty.
383 */
384 static struct vsp1_vb2_buffer *
385 vsp1_video_complete_buffer(struct vsp1_video *video)
386 {
387 struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
388 struct vsp1_vb2_buffer *next = NULL;
389 struct vsp1_vb2_buffer *done;
390 unsigned long flags;
391 unsigned int i;
392
393 spin_lock_irqsave(&video->irqlock, flags);
394
395 if (list_empty(&video->irqqueue)) {
396 spin_unlock_irqrestore(&video->irqlock, flags);
397 return NULL;
398 }
399
400 done = list_first_entry(&video->irqqueue,
401 struct vsp1_vb2_buffer, queue);
402
403 /* In DU output mode reuse the buffer if the list is singular. */
404 if (pipe->lif && list_is_singular(&video->irqqueue)) {
405 spin_unlock_irqrestore(&video->irqlock, flags);
406 return done;
407 }
408
409 list_del(&done->queue);
410
411 if (!list_empty(&video->irqqueue))
412 next = list_first_entry(&video->irqqueue,
413 struct vsp1_vb2_buffer, queue);
414
415 spin_unlock_irqrestore(&video->irqlock, flags);
416
417 done->buf.sequence = video->sequence++;
418 done->buf.vb2_buf.timestamp = ktime_get_ns();
419 for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
420 vb2_set_plane_payload(&done->buf.vb2_buf, i,
421 vb2_plane_size(&done->buf.vb2_buf, i));
422 vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
423
424 return next;
425 }
426
427 static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
428 struct vsp1_rwpf *rwpf)
429 {
430 struct vsp1_video *video = rwpf->video;
431 struct vsp1_vb2_buffer *buf;
432 unsigned long flags;
433
434 buf = vsp1_video_complete_buffer(video);
435 if (buf == NULL)
436 return;
437
438 spin_lock_irqsave(&pipe->irqlock, flags);
439
440 video->rwpf->mem = buf->mem;
441 pipe->buffers_ready |= 1 << video->pipe_index;
442
443 spin_unlock_irqrestore(&pipe->irqlock, flags);
444 }
445
446 static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
447 {
448 struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
449 unsigned int i;
450
451 if (!pipe->dl)
452 pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
453
454 for (i = 0; i < vsp1->info->rpf_count; ++i) {
455 struct vsp1_rwpf *rwpf = pipe->inputs[i];
456
457 if (rwpf)
458 vsp1_rwpf_set_memory(rwpf, pipe->dl);
459 }
460
461 if (!pipe->lif)
462 vsp1_rwpf_set_memory(pipe->output, pipe->dl);
463
464 vsp1_dl_list_commit(pipe->dl);
465 pipe->dl = NULL;
466
467 vsp1_pipeline_run(pipe);
468 }
469
470 static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
471 {
472 struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
473 enum vsp1_pipeline_state state;
474 unsigned long flags;
475 unsigned int i;
476
477 /* Complete buffers on all video nodes. */
478 for (i = 0; i < vsp1->info->rpf_count; ++i) {
479 if (!pipe->inputs[i])
480 continue;
481
482 vsp1_video_frame_end(pipe, pipe->inputs[i]);
483 }
484
485 vsp1_video_frame_end(pipe, pipe->output);
486
487 spin_lock_irqsave(&pipe->irqlock, flags);
488
489 state = pipe->state;
490 pipe->state = VSP1_PIPELINE_STOPPED;
491
492 /* If a stop has been requested, mark the pipeline as stopped and
493 * return. Otherwise restart the pipeline if ready.
494 */
495 if (state == VSP1_PIPELINE_STOPPING)
496 wake_up(&pipe->wq);
497 else if (vsp1_pipeline_ready(pipe))
498 vsp1_video_pipeline_run(pipe);
499
500 spin_unlock_irqrestore(&pipe->irqlock, flags);
501 }
502
503 /* -----------------------------------------------------------------------------
504 * videobuf2 Queue Operations
505 */
506
507 static int
508 vsp1_video_queue_setup(struct vb2_queue *vq,
509 unsigned int *nbuffers, unsigned int *nplanes,
510 unsigned int sizes[], void *alloc_ctxs[])
511 {
512 struct vsp1_video *video = vb2_get_drv_priv(vq);
513 const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
514 unsigned int i;
515
516 if (*nplanes) {
517 if (*nplanes != format->num_planes)
518 return -EINVAL;
519
520 for (i = 0; i < *nplanes; i++) {
521 if (sizes[i] < format->plane_fmt[i].sizeimage)
522 return -EINVAL;
523 alloc_ctxs[i] = video->alloc_ctx;
524 }
525 return 0;
526 }
527
528 *nplanes = format->num_planes;
529
530 for (i = 0; i < format->num_planes; ++i) {
531 sizes[i] = format->plane_fmt[i].sizeimage;
532 alloc_ctxs[i] = video->alloc_ctx;
533 }
534
535 return 0;
536 }
537
538 static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
539 {
540 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
541 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
542 struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
543 const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
544 unsigned int i;
545
546 if (vb->num_planes < format->num_planes)
547 return -EINVAL;
548
549 for (i = 0; i < vb->num_planes; ++i) {
550 buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
551
552 if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
553 return -EINVAL;
554 }
555
556 for ( ; i < 3; ++i)
557 buf->mem.addr[i] = 0;
558
559 return 0;
560 }
561
562 static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
563 {
564 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
565 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
566 struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
567 struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
568 unsigned long flags;
569 bool empty;
570
571 spin_lock_irqsave(&video->irqlock, flags);
572 empty = list_empty(&video->irqqueue);
573 list_add_tail(&buf->queue, &video->irqqueue);
574 spin_unlock_irqrestore(&video->irqlock, flags);
575
576 if (!empty)
577 return;
578
579 spin_lock_irqsave(&pipe->irqlock, flags);
580
581 video->rwpf->mem = buf->mem;
582 pipe->buffers_ready |= 1 << video->pipe_index;
583
584 if (vb2_is_streaming(&video->queue) &&
585 vsp1_pipeline_ready(pipe))
586 vsp1_video_pipeline_run(pipe);
587
588 spin_unlock_irqrestore(&pipe->irqlock, flags);
589 }
590
591 static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
592 {
593 struct vsp1_entity *entity;
594
595 /* Prepare the display list. */
596 pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
597 if (!pipe->dl)
598 return -ENOMEM;
599
600 if (pipe->uds) {
601 struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
602
603 /* If a BRU is present in the pipeline before the UDS, the alpha
604 * component doesn't need to be scaled as the BRU output alpha
605 * value is fixed to 255. Otherwise we need to scale the alpha
606 * component only when available at the input RPF.
607 */
608 if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
609 uds->scale_alpha = false;
610 } else {
611 struct vsp1_rwpf *rpf =
612 to_rwpf(&pipe->uds_input->subdev);
613
614 uds->scale_alpha = rpf->fmtinfo->alpha;
615 }
616 }
617
618 list_for_each_entry(entity, &pipe->entities, list_pipe) {
619 vsp1_entity_route_setup(entity, pipe->dl);
620
621 if (entity->ops->configure)
622 entity->ops->configure(entity, pipe->dl);
623 }
624
625 return 0;
626 }
627
628 static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
629 {
630 struct vsp1_video *video = vb2_get_drv_priv(vq);
631 struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
632 unsigned long flags;
633 int ret;
634
635 mutex_lock(&pipe->lock);
636 if (pipe->stream_count == pipe->num_inputs) {
637 ret = vsp1_video_setup_pipeline(pipe);
638 if (ret < 0) {
639 mutex_unlock(&pipe->lock);
640 return ret;
641 }
642 }
643
644 pipe->stream_count++;
645 mutex_unlock(&pipe->lock);
646
647 spin_lock_irqsave(&pipe->irqlock, flags);
648 if (vsp1_pipeline_ready(pipe))
649 vsp1_video_pipeline_run(pipe);
650 spin_unlock_irqrestore(&pipe->irqlock, flags);
651
652 return 0;
653 }
654
655 static void vsp1_video_stop_streaming(struct vb2_queue *vq)
656 {
657 struct vsp1_video *video = vb2_get_drv_priv(vq);
658 struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
659 struct vsp1_vb2_buffer *buffer;
660 unsigned long flags;
661 int ret;
662
663 mutex_lock(&pipe->lock);
664 if (--pipe->stream_count == 0) {
665 /* Stop the pipeline. */
666 ret = vsp1_pipeline_stop(pipe);
667 if (ret == -ETIMEDOUT)
668 dev_err(video->vsp1->dev, "pipeline stop timeout\n");
669
670 vsp1_dl_list_put(pipe->dl);
671 pipe->dl = NULL;
672 }
673 mutex_unlock(&pipe->lock);
674
675 vsp1_video_pipeline_cleanup(pipe);
676 media_entity_pipeline_stop(&video->video.entity);
677
678 /* Remove all buffers from the IRQ queue. */
679 spin_lock_irqsave(&video->irqlock, flags);
680 list_for_each_entry(buffer, &video->irqqueue, queue)
681 vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
682 INIT_LIST_HEAD(&video->irqqueue);
683 spin_unlock_irqrestore(&video->irqlock, flags);
684 }
685
686 static struct vb2_ops vsp1_video_queue_qops = {
687 .queue_setup = vsp1_video_queue_setup,
688 .buf_prepare = vsp1_video_buffer_prepare,
689 .buf_queue = vsp1_video_buffer_queue,
690 .wait_prepare = vb2_ops_wait_prepare,
691 .wait_finish = vb2_ops_wait_finish,
692 .start_streaming = vsp1_video_start_streaming,
693 .stop_streaming = vsp1_video_stop_streaming,
694 };
695
696 /* -----------------------------------------------------------------------------
697 * V4L2 ioctls
698 */
699
700 static int
701 vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
702 {
703 struct v4l2_fh *vfh = file->private_data;
704 struct vsp1_video *video = to_vsp1_video(vfh->vdev);
705
706 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
707 | V4L2_CAP_VIDEO_CAPTURE_MPLANE
708 | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
709
710 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
711 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
712 | V4L2_CAP_STREAMING;
713 else
714 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
715 | V4L2_CAP_STREAMING;
716
717 strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
718 strlcpy(cap->card, video->video.name, sizeof(cap->card));
719 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
720 dev_name(video->vsp1->dev));
721
722 return 0;
723 }
724
725 static int
726 vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
727 {
728 struct v4l2_fh *vfh = file->private_data;
729 struct vsp1_video *video = to_vsp1_video(vfh->vdev);
730
731 if (format->type != video->queue.type)
732 return -EINVAL;
733
734 mutex_lock(&video->lock);
735 format->fmt.pix_mp = video->rwpf->format;
736 mutex_unlock(&video->lock);
737
738 return 0;
739 }
740
741 static int
742 vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
743 {
744 struct v4l2_fh *vfh = file->private_data;
745 struct vsp1_video *video = to_vsp1_video(vfh->vdev);
746
747 if (format->type != video->queue.type)
748 return -EINVAL;
749
750 return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
751 }
752
753 static int
754 vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
755 {
756 struct v4l2_fh *vfh = file->private_data;
757 struct vsp1_video *video = to_vsp1_video(vfh->vdev);
758 const struct vsp1_format_info *info;
759 int ret;
760
761 if (format->type != video->queue.type)
762 return -EINVAL;
763
764 ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
765 if (ret < 0)
766 return ret;
767
768 mutex_lock(&video->lock);
769
770 if (vb2_is_busy(&video->queue)) {
771 ret = -EBUSY;
772 goto done;
773 }
774
775 video->rwpf->format = format->fmt.pix_mp;
776 video->rwpf->fmtinfo = info;
777
778 done:
779 mutex_unlock(&video->lock);
780 return ret;
781 }
782
783 static int
784 vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
785 {
786 struct v4l2_fh *vfh = file->private_data;
787 struct vsp1_video *video = to_vsp1_video(vfh->vdev);
788 struct vsp1_pipeline *pipe;
789 int ret;
790
791 if (video->queue.owner && video->queue.owner != file->private_data)
792 return -EBUSY;
793
794 video->sequence = 0;
795
796 /* Start streaming on the pipeline. No link touching an entity in the
797 * pipeline can be activated or deactivated once streaming is started.
798 *
799 * Use the VSP1 pipeline object embedded in the first video object that
800 * starts streaming.
801 *
802 * FIXME: This is racy, the ioctl is only protected by the video node
803 * lock.
804 */
805 pipe = video->video.entity.pipe
806 ? to_vsp1_pipeline(&video->video.entity) : &video->pipe;
807
808 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
809 if (ret < 0)
810 return ret;
811
812 /* Verify that the configured format matches the output of the connected
813 * subdev.
814 */
815 ret = vsp1_video_verify_format(video);
816 if (ret < 0)
817 goto err_stop;
818
819 ret = vsp1_video_pipeline_init(pipe, video);
820 if (ret < 0)
821 goto err_stop;
822
823 /* Start the queue. */
824 ret = vb2_streamon(&video->queue, type);
825 if (ret < 0)
826 goto err_cleanup;
827
828 return 0;
829
830 err_cleanup:
831 vsp1_video_pipeline_cleanup(pipe);
832 err_stop:
833 media_entity_pipeline_stop(&video->video.entity);
834 return ret;
835 }
836
837 static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
838 .vidioc_querycap = vsp1_video_querycap,
839 .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
840 .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
841 .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
842 .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
843 .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
844 .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
845 .vidioc_reqbufs = vb2_ioctl_reqbufs,
846 .vidioc_querybuf = vb2_ioctl_querybuf,
847 .vidioc_qbuf = vb2_ioctl_qbuf,
848 .vidioc_dqbuf = vb2_ioctl_dqbuf,
849 .vidioc_create_bufs = vb2_ioctl_create_bufs,
850 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
851 .vidioc_streamon = vsp1_video_streamon,
852 .vidioc_streamoff = vb2_ioctl_streamoff,
853 };
854
855 /* -----------------------------------------------------------------------------
856 * V4L2 File Operations
857 */
858
859 static int vsp1_video_open(struct file *file)
860 {
861 struct vsp1_video *video = video_drvdata(file);
862 struct v4l2_fh *vfh;
863 int ret = 0;
864
865 vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
866 if (vfh == NULL)
867 return -ENOMEM;
868
869 v4l2_fh_init(vfh, &video->video);
870 v4l2_fh_add(vfh);
871
872 file->private_data = vfh;
873
874 ret = vsp1_device_get(video->vsp1);
875 if (ret < 0) {
876 v4l2_fh_del(vfh);
877 kfree(vfh);
878 }
879
880 return ret;
881 }
882
883 static int vsp1_video_release(struct file *file)
884 {
885 struct vsp1_video *video = video_drvdata(file);
886 struct v4l2_fh *vfh = file->private_data;
887
888 mutex_lock(&video->lock);
889 if (video->queue.owner == vfh) {
890 vb2_queue_release(&video->queue);
891 video->queue.owner = NULL;
892 }
893 mutex_unlock(&video->lock);
894
895 vsp1_device_put(video->vsp1);
896
897 v4l2_fh_release(file);
898
899 file->private_data = NULL;
900
901 return 0;
902 }
903
904 static struct v4l2_file_operations vsp1_video_fops = {
905 .owner = THIS_MODULE,
906 .unlocked_ioctl = video_ioctl2,
907 .open = vsp1_video_open,
908 .release = vsp1_video_release,
909 .poll = vb2_fop_poll,
910 .mmap = vb2_fop_mmap,
911 };
912
913 /* -----------------------------------------------------------------------------
914 * Initialization and Cleanup
915 */
916
917 struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
918 struct vsp1_rwpf *rwpf)
919 {
920 struct vsp1_video *video;
921 const char *direction;
922 int ret;
923
924 video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
925 if (!video)
926 return ERR_PTR(-ENOMEM);
927
928 rwpf->video = video;
929
930 video->vsp1 = vsp1;
931 video->rwpf = rwpf;
932
933 if (rwpf->entity.type == VSP1_ENTITY_RPF) {
934 direction = "input";
935 video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
936 video->pad.flags = MEDIA_PAD_FL_SOURCE;
937 video->video.vfl_dir = VFL_DIR_TX;
938 } else {
939 direction = "output";
940 video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
941 video->pad.flags = MEDIA_PAD_FL_SINK;
942 video->video.vfl_dir = VFL_DIR_RX;
943 }
944
945 mutex_init(&video->lock);
946 spin_lock_init(&video->irqlock);
947 INIT_LIST_HEAD(&video->irqqueue);
948
949 vsp1_pipeline_init(&video->pipe);
950 video->pipe.frame_end = vsp1_video_pipeline_frame_end;
951
952 /* Initialize the media entity... */
953 ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
954 if (ret < 0)
955 return ERR_PTR(ret);
956
957 /* ... and the format ... */
958 rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
959 rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
960 rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
961 __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
962
963 /* ... and the video node... */
964 video->video.v4l2_dev = &video->vsp1->v4l2_dev;
965 video->video.fops = &vsp1_video_fops;
966 snprintf(video->video.name, sizeof(video->video.name), "%s %s",
967 rwpf->entity.subdev.name, direction);
968 video->video.vfl_type = VFL_TYPE_GRABBER;
969 video->video.release = video_device_release_empty;
970 video->video.ioctl_ops = &vsp1_video_ioctl_ops;
971
972 video_set_drvdata(&video->video, video);
973
974 /* ... and the buffers queue... */
975 video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
976 if (IS_ERR(video->alloc_ctx)) {
977 ret = PTR_ERR(video->alloc_ctx);
978 goto error;
979 }
980
981 video->queue.type = video->type;
982 video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
983 video->queue.lock = &video->lock;
984 video->queue.drv_priv = video;
985 video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
986 video->queue.ops = &vsp1_video_queue_qops;
987 video->queue.mem_ops = &vb2_dma_contig_memops;
988 video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
989 ret = vb2_queue_init(&video->queue);
990 if (ret < 0) {
991 dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
992 goto error;
993 }
994
995 /* ... and register the video device. */
996 video->video.queue = &video->queue;
997 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
998 if (ret < 0) {
999 dev_err(video->vsp1->dev, "failed to register video device\n");
1000 goto error;
1001 }
1002
1003 return video;
1004
1005 error:
1006 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1007 vsp1_video_cleanup(video);
1008 return ERR_PTR(ret);
1009 }
1010
1011 void vsp1_video_cleanup(struct vsp1_video *video)
1012 {
1013 if (video_is_registered(&video->video))
1014 video_unregister_device(&video->video);
1015
1016 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1017 media_entity_cleanup(&video->video.entity);
1018 }