1 // SPDX-License-Identifier: GPL-2.0+
3 * vsp1_video.c -- R-Car VSP1 Video Node
5 * Copyright (C) 2013-2015 Renesas Electronics Corporation
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/v4l2-mediabus.h>
15 #include <linux/videodev2.h>
16 #include <linux/wait.h>
18 #include <media/media-entity.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-fh.h>
21 #include <media/v4l2-ioctl.h>
22 #include <media/v4l2-subdev.h>
23 #include <media/videobuf2-v4l2.h>
24 #include <media/videobuf2-dma-contig.h>
29 #include "vsp1_entity.h"
32 #include "vsp1_pipe.h"
33 #include "vsp1_rwpf.h"
35 #include "vsp1_video.h"
37 #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
38 #define VSP1_VIDEO_DEF_WIDTH 1024
39 #define VSP1_VIDEO_DEF_HEIGHT 768
41 #define VSP1_VIDEO_MAX_WIDTH 8190U
42 #define VSP1_VIDEO_MAX_HEIGHT 8190U
44 /* -----------------------------------------------------------------------------
48 static struct v4l2_subdev
*
49 vsp1_video_remote_subdev(struct media_pad
*local
, u32
*pad
)
51 struct media_pad
*remote
;
53 remote
= media_entity_remote_pad(local
);
54 if (!remote
|| !is_media_entity_v4l2_subdev(remote
->entity
))
60 return media_entity_to_v4l2_subdev(remote
->entity
);
63 static int vsp1_video_verify_format(struct vsp1_video
*video
)
65 struct v4l2_subdev_format fmt
;
66 struct v4l2_subdev
*subdev
;
69 subdev
= vsp1_video_remote_subdev(&video
->pad
, &fmt
.pad
);
73 fmt
.which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
74 ret
= v4l2_subdev_call(subdev
, pad
, get_fmt
, NULL
, &fmt
);
76 return ret
== -ENOIOCTLCMD
? -EINVAL
: ret
;
78 if (video
->rwpf
->fmtinfo
->mbus
!= fmt
.format
.code
||
79 video
->rwpf
->format
.height
!= fmt
.format
.height
||
80 video
->rwpf
->format
.width
!= fmt
.format
.width
)
86 static int __vsp1_video_try_format(struct vsp1_video
*video
,
87 struct v4l2_pix_format_mplane
*pix
,
88 const struct vsp1_format_info
**fmtinfo
)
90 static const u32 xrgb_formats
[][2] = {
91 { V4L2_PIX_FMT_RGB444
, V4L2_PIX_FMT_XRGB444
},
92 { V4L2_PIX_FMT_RGB555
, V4L2_PIX_FMT_XRGB555
},
93 { V4L2_PIX_FMT_BGR32
, V4L2_PIX_FMT_XBGR32
},
94 { V4L2_PIX_FMT_RGB32
, V4L2_PIX_FMT_XRGB32
},
97 const struct vsp1_format_info
*info
;
98 unsigned int width
= pix
->width
;
99 unsigned int height
= pix
->height
;
103 * Backward compatibility: replace deprecated RGB formats by their XRGB
104 * equivalent. This selects the format older userspace applications want
105 * while still exposing the new format.
107 for (i
= 0; i
< ARRAY_SIZE(xrgb_formats
); ++i
) {
108 if (xrgb_formats
[i
][0] == pix
->pixelformat
) {
109 pix
->pixelformat
= xrgb_formats
[i
][1];
115 * Retrieve format information and select the default format if the
116 * requested format isn't supported.
118 info
= vsp1_get_format_info(video
->vsp1
, pix
->pixelformat
);
120 info
= vsp1_get_format_info(video
->vsp1
, VSP1_VIDEO_DEF_FORMAT
);
122 pix
->pixelformat
= info
->fourcc
;
123 pix
->colorspace
= V4L2_COLORSPACE_SRGB
;
124 pix
->field
= V4L2_FIELD_NONE
;
126 if (info
->fourcc
== V4L2_PIX_FMT_HSV24
||
127 info
->fourcc
== V4L2_PIX_FMT_HSV32
)
128 pix
->hsv_enc
= V4L2_HSV_ENC_256
;
130 memset(pix
->reserved
, 0, sizeof(pix
->reserved
));
132 /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
133 width
= round_down(width
, info
->hsub
);
134 height
= round_down(height
, info
->vsub
);
136 /* Clamp the width and height. */
137 pix
->width
= clamp(width
, info
->hsub
, VSP1_VIDEO_MAX_WIDTH
);
138 pix
->height
= clamp(height
, info
->vsub
, VSP1_VIDEO_MAX_HEIGHT
);
141 * Compute and clamp the stride and image size. While not documented in
142 * the datasheet, strides not aligned to a multiple of 128 bytes result
143 * in image corruption.
145 for (i
= 0; i
< min(info
->planes
, 2U); ++i
) {
146 unsigned int hsub
= i
> 0 ? info
->hsub
: 1;
147 unsigned int vsub
= i
> 0 ? info
->vsub
: 1;
148 unsigned int align
= 128;
151 bpl
= clamp_t(unsigned int, pix
->plane_fmt
[i
].bytesperline
,
152 pix
->width
/ hsub
* info
->bpp
[i
] / 8,
153 round_down(65535U, align
));
155 pix
->plane_fmt
[i
].bytesperline
= round_up(bpl
, align
);
156 pix
->plane_fmt
[i
].sizeimage
= pix
->plane_fmt
[i
].bytesperline
157 * pix
->height
/ vsub
;
160 if (info
->planes
== 3) {
161 /* The second and third planes must have the same stride. */
162 pix
->plane_fmt
[2].bytesperline
= pix
->plane_fmt
[1].bytesperline
;
163 pix
->plane_fmt
[2].sizeimage
= pix
->plane_fmt
[1].sizeimage
;
166 pix
->num_planes
= info
->planes
;
174 /* -----------------------------------------------------------------------------
175 * VSP1 Partition Algorithm support
179 * vsp1_video_calculate_partition - Calculate the active partition output window
181 * @pipe: the pipeline
182 * @partition: partition that will hold the calculated values
183 * @div_size: pre-determined maximum partition division size
184 * @index: partition index
186 static void vsp1_video_calculate_partition(struct vsp1_pipeline
*pipe
,
187 struct vsp1_partition
*partition
,
188 unsigned int div_size
,
191 const struct v4l2_mbus_framefmt
*format
;
192 struct vsp1_partition_window window
;
193 unsigned int modulus
;
196 * Partitions are computed on the size before rotation, use the format
199 format
= vsp1_entity_get_pad_format(&pipe
->output
->entity
,
200 pipe
->output
->entity
.config
,
203 /* A single partition simply processes the output size in full. */
204 if (pipe
->partitions
<= 1) {
206 window
.width
= format
->width
;
208 vsp1_pipeline_propagate_partition(pipe
, partition
, index
,
213 /* Initialise the partition with sane starting conditions. */
214 window
.left
= index
* div_size
;
215 window
.width
= div_size
;
217 modulus
= format
->width
% div_size
;
220 * We need to prevent the last partition from being smaller than the
221 * *minimum* width of the hardware capabilities.
223 * If the modulus is less than half of the partition size,
224 * the penultimate partition is reduced to half, which is added
225 * to the final partition: |1234|1234|1234|12|341|
226 * to prevent this: |1234|1234|1234|1234|1|.
230 * pipe->partitions is 1 based, whilst index is a 0 based index.
231 * Normalise this locally.
233 unsigned int partitions
= pipe
->partitions
- 1;
235 if (modulus
< div_size
/ 2) {
236 if (index
== partitions
- 1) {
237 /* Halve the penultimate partition. */
238 window
.width
= div_size
/ 2;
239 } else if (index
== partitions
) {
240 /* Increase the final partition. */
241 window
.width
= (div_size
/ 2) + modulus
;
242 window
.left
-= div_size
/ 2;
244 } else if (index
== partitions
) {
245 window
.width
= modulus
;
249 vsp1_pipeline_propagate_partition(pipe
, partition
, index
, &window
);
252 static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline
*pipe
)
254 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
255 const struct v4l2_mbus_framefmt
*format
;
256 struct vsp1_entity
*entity
;
257 unsigned int div_size
;
261 * Partitions are computed on the size before rotation, use the format
264 format
= vsp1_entity_get_pad_format(&pipe
->output
->entity
,
265 pipe
->output
->entity
.config
,
267 div_size
= format
->width
;
270 * Only Gen3 hardware requires image partitioning, Gen2 will operate
271 * with a single partition that covers the whole output.
273 if (vsp1
->info
->gen
== 3) {
274 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
275 unsigned int entity_max
;
277 if (!entity
->ops
->max_width
)
280 entity_max
= entity
->ops
->max_width(entity
, pipe
);
282 div_size
= min(div_size
, entity_max
);
286 pipe
->partitions
= DIV_ROUND_UP(format
->width
, div_size
);
287 pipe
->part_table
= kcalloc(pipe
->partitions
, sizeof(*pipe
->part_table
),
289 if (!pipe
->part_table
)
292 for (i
= 0; i
< pipe
->partitions
; ++i
)
293 vsp1_video_calculate_partition(pipe
, &pipe
->part_table
[i
],
299 /* -----------------------------------------------------------------------------
300 * Pipeline Management
304 * vsp1_video_complete_buffer - Complete the current buffer
305 * @video: the video node
307 * This function completes the current buffer by filling its sequence number,
308 * time stamp and payload size, and hands it back to the videobuf core.
310 * When operating in DU output mode (deep pipeline to the DU through the LIF),
311 * the VSP1 needs to constantly supply frames to the display. In that case, if
312 * no other buffer is queued, reuse the one that has just been processed instead
313 * of handing it back to the videobuf core.
315 * Return the next queued buffer or NULL if the queue is empty.
317 static struct vsp1_vb2_buffer
*
318 vsp1_video_complete_buffer(struct vsp1_video
*video
)
320 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
321 struct vsp1_vb2_buffer
*next
= NULL
;
322 struct vsp1_vb2_buffer
*done
;
326 spin_lock_irqsave(&video
->irqlock
, flags
);
328 if (list_empty(&video
->irqqueue
)) {
329 spin_unlock_irqrestore(&video
->irqlock
, flags
);
333 done
= list_first_entry(&video
->irqqueue
,
334 struct vsp1_vb2_buffer
, queue
);
336 /* In DU output mode reuse the buffer if the list is singular. */
337 if (pipe
->lif
&& list_is_singular(&video
->irqqueue
)) {
338 spin_unlock_irqrestore(&video
->irqlock
, flags
);
342 list_del(&done
->queue
);
344 if (!list_empty(&video
->irqqueue
))
345 next
= list_first_entry(&video
->irqqueue
,
346 struct vsp1_vb2_buffer
, queue
);
348 spin_unlock_irqrestore(&video
->irqlock
, flags
);
350 done
->buf
.sequence
= pipe
->sequence
;
351 done
->buf
.vb2_buf
.timestamp
= ktime_get_ns();
352 for (i
= 0; i
< done
->buf
.vb2_buf
.num_planes
; ++i
)
353 vb2_set_plane_payload(&done
->buf
.vb2_buf
, i
,
354 vb2_plane_size(&done
->buf
.vb2_buf
, i
));
355 vb2_buffer_done(&done
->buf
.vb2_buf
, VB2_BUF_STATE_DONE
);
360 static void vsp1_video_frame_end(struct vsp1_pipeline
*pipe
,
361 struct vsp1_rwpf
*rwpf
)
363 struct vsp1_video
*video
= rwpf
->video
;
364 struct vsp1_vb2_buffer
*buf
;
366 buf
= vsp1_video_complete_buffer(video
);
370 video
->rwpf
->mem
= buf
->mem
;
371 pipe
->buffers_ready
|= 1 << video
->pipe_index
;
374 static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline
*pipe
,
375 struct vsp1_dl_list
*dl
,
376 unsigned int partition
)
378 struct vsp1_dl_body
*dlb
= vsp1_dl_list_get_body0(dl
);
379 struct vsp1_entity
*entity
;
381 pipe
->partition
= &pipe
->part_table
[partition
];
383 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
)
384 vsp1_entity_configure_partition(entity
, pipe
, dl
, dlb
);
387 static void vsp1_video_pipeline_run(struct vsp1_pipeline
*pipe
)
389 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
390 struct vsp1_entity
*entity
;
391 struct vsp1_dl_body
*dlb
;
392 struct vsp1_dl_list
*dl
;
393 unsigned int partition
;
395 dl
= vsp1_dl_list_get(pipe
->output
->dlm
);
398 * If the VSP hardware isn't configured yet (which occurs either when
399 * processing the first frame or after a system suspend/resume), add the
400 * cached stream configuration to the display list to perform a full
403 if (!pipe
->configured
)
404 vsp1_dl_list_add_body(dl
, pipe
->stream_config
);
406 dlb
= vsp1_dl_list_get_body0(dl
);
408 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
)
409 vsp1_entity_configure_frame(entity
, pipe
, dl
, dlb
);
411 /* Run the first partition. */
412 vsp1_video_pipeline_run_partition(pipe
, dl
, 0);
414 /* Process consecutive partitions as necessary. */
415 for (partition
= 1; partition
< pipe
->partitions
; ++partition
) {
416 struct vsp1_dl_list
*dl_next
;
418 dl_next
= vsp1_dl_list_get(pipe
->output
->dlm
);
421 * An incomplete chain will still function, but output only
422 * the partitions that had a dl available. The frame end
423 * interrupt will be marked on the last dl in the chain.
426 dev_err(vsp1
->dev
, "Failed to obtain a dl list. Frame will be incomplete\n");
430 vsp1_video_pipeline_run_partition(pipe
, dl_next
, partition
);
431 vsp1_dl_list_add_chain(dl
, dl_next
);
434 /* Complete, and commit the head display list. */
435 vsp1_dl_list_commit(dl
, false);
436 pipe
->configured
= true;
438 vsp1_pipeline_run(pipe
);
441 static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline
*pipe
,
442 unsigned int completion
)
444 struct vsp1_device
*vsp1
= pipe
->output
->entity
.vsp1
;
445 enum vsp1_pipeline_state state
;
449 /* M2M Pipelines should never call here with an incomplete frame. */
450 WARN_ON_ONCE(!(completion
& VSP1_DL_FRAME_END_COMPLETED
));
452 spin_lock_irqsave(&pipe
->irqlock
, flags
);
454 /* Complete buffers on all video nodes. */
455 for (i
= 0; i
< vsp1
->info
->rpf_count
; ++i
) {
456 if (!pipe
->inputs
[i
])
459 vsp1_video_frame_end(pipe
, pipe
->inputs
[i
]);
462 vsp1_video_frame_end(pipe
, pipe
->output
);
465 pipe
->state
= VSP1_PIPELINE_STOPPED
;
468 * If a stop has been requested, mark the pipeline as stopped and
469 * return. Otherwise restart the pipeline if ready.
471 if (state
== VSP1_PIPELINE_STOPPING
)
473 else if (vsp1_pipeline_ready(pipe
))
474 vsp1_video_pipeline_run(pipe
);
476 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
479 static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline
*pipe
,
480 struct vsp1_rwpf
*input
,
481 struct vsp1_rwpf
*output
)
483 struct media_entity_enum ent_enum
;
484 struct vsp1_entity
*entity
;
485 struct media_pad
*pad
;
486 struct vsp1_brx
*brx
= NULL
;
489 ret
= media_entity_enum_init(&ent_enum
, &input
->entity
.vsp1
->media_dev
);
494 * The main data path doesn't include the HGO or HGT, use
495 * vsp1_entity_remote_pad() to traverse the graph.
498 pad
= vsp1_entity_remote_pad(&input
->entity
.pads
[RWPF_PAD_SOURCE
]);
506 /* We've reached a video node, that shouldn't have happened. */
507 if (!is_media_entity_v4l2_subdev(pad
->entity
)) {
512 entity
= to_vsp1_entity(
513 media_entity_to_v4l2_subdev(pad
->entity
));
516 * A BRU or BRS is present in the pipeline, store its input pad
517 * number in the input RPF for use when configuring the RPF.
519 if (entity
->type
== VSP1_ENTITY_BRU
||
520 entity
->type
== VSP1_ENTITY_BRS
) {
521 /* BRU and BRS can't be chained. */
527 brx
= to_brx(&entity
->subdev
);
528 brx
->inputs
[pad
->index
].rpf
= input
;
529 input
->brx_input
= pad
->index
;
532 /* We've reached the WPF, we're done. */
533 if (entity
->type
== VSP1_ENTITY_WPF
)
536 /* Ensure the branch has no loop. */
537 if (media_entity_enum_test_and_set(&ent_enum
,
538 &entity
->subdev
.entity
)) {
543 /* UDS can't be chained. */
544 if (entity
->type
== VSP1_ENTITY_UDS
) {
551 pipe
->uds_input
= brx
? &brx
->entity
: &input
->entity
;
554 /* Follow the source link, ignoring any HGO or HGT. */
555 pad
= &entity
->pads
[entity
->source_pad
];
556 pad
= vsp1_entity_remote_pad(pad
);
559 /* The last entity must be the output WPF. */
560 if (entity
!= &output
->entity
)
564 media_entity_enum_cleanup(&ent_enum
);
569 static int vsp1_video_pipeline_build(struct vsp1_pipeline
*pipe
,
570 struct vsp1_video
*video
)
572 struct media_graph graph
;
573 struct media_entity
*entity
= &video
->video
.entity
;
574 struct media_device
*mdev
= entity
->graph_obj
.mdev
;
578 /* Walk the graph to locate the entities and video nodes. */
579 ret
= media_graph_walk_init(&graph
, mdev
);
583 media_graph_walk_start(&graph
, entity
);
585 while ((entity
= media_graph_walk_next(&graph
))) {
586 struct v4l2_subdev
*subdev
;
587 struct vsp1_rwpf
*rwpf
;
588 struct vsp1_entity
*e
;
590 if (!is_media_entity_v4l2_subdev(entity
))
593 subdev
= media_entity_to_v4l2_subdev(entity
);
594 e
= to_vsp1_entity(subdev
);
595 list_add_tail(&e
->list_pipe
, &pipe
->entities
);
599 case VSP1_ENTITY_RPF
:
600 rwpf
= to_rwpf(subdev
);
601 pipe
->inputs
[rwpf
->entity
.index
] = rwpf
;
602 rwpf
->video
->pipe_index
= ++pipe
->num_inputs
;
605 case VSP1_ENTITY_WPF
:
606 rwpf
= to_rwpf(subdev
);
608 rwpf
->video
->pipe_index
= 0;
611 case VSP1_ENTITY_LIF
:
615 case VSP1_ENTITY_BRU
:
616 case VSP1_ENTITY_BRS
:
620 case VSP1_ENTITY_HGO
:
624 case VSP1_ENTITY_HGT
:
633 media_graph_walk_cleanup(&graph
);
635 /* We need one output and at least one input. */
636 if (pipe
->num_inputs
== 0 || !pipe
->output
)
640 * Follow links downstream for each input and make sure the graph
641 * contains no loop and that all branches end at the output WPF.
643 for (i
= 0; i
< video
->vsp1
->info
->rpf_count
; ++i
) {
644 if (!pipe
->inputs
[i
])
647 ret
= vsp1_video_pipeline_build_branch(pipe
, pipe
->inputs
[i
],
656 static int vsp1_video_pipeline_init(struct vsp1_pipeline
*pipe
,
657 struct vsp1_video
*video
)
659 vsp1_pipeline_init(pipe
);
661 pipe
->frame_end
= vsp1_video_pipeline_frame_end
;
663 return vsp1_video_pipeline_build(pipe
, video
);
666 static struct vsp1_pipeline
*vsp1_video_pipeline_get(struct vsp1_video
*video
)
668 struct vsp1_pipeline
*pipe
;
672 * Get a pipeline object for the video node. If a pipeline has already
673 * been allocated just increment its reference count and return it.
674 * Otherwise allocate a new pipeline and initialize it, it will be freed
675 * when the last reference is released.
677 if (!video
->rwpf
->entity
.pipe
) {
678 pipe
= kzalloc(sizeof(*pipe
), GFP_KERNEL
);
680 return ERR_PTR(-ENOMEM
);
682 ret
= vsp1_video_pipeline_init(pipe
, video
);
684 vsp1_pipeline_reset(pipe
);
689 pipe
= video
->rwpf
->entity
.pipe
;
690 kref_get(&pipe
->kref
);
696 static void vsp1_video_pipeline_release(struct kref
*kref
)
698 struct vsp1_pipeline
*pipe
= container_of(kref
, typeof(*pipe
), kref
);
700 vsp1_pipeline_reset(pipe
);
704 static void vsp1_video_pipeline_put(struct vsp1_pipeline
*pipe
)
706 struct media_device
*mdev
= &pipe
->output
->entity
.vsp1
->media_dev
;
708 mutex_lock(&mdev
->graph_mutex
);
709 kref_put(&pipe
->kref
, vsp1_video_pipeline_release
);
710 mutex_unlock(&mdev
->graph_mutex
);
713 /* -----------------------------------------------------------------------------
714 * videobuf2 Queue Operations
718 vsp1_video_queue_setup(struct vb2_queue
*vq
,
719 unsigned int *nbuffers
, unsigned int *nplanes
,
720 unsigned int sizes
[], struct device
*alloc_devs
[])
722 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
723 const struct v4l2_pix_format_mplane
*format
= &video
->rwpf
->format
;
727 if (*nplanes
!= format
->num_planes
)
730 for (i
= 0; i
< *nplanes
; i
++)
731 if (sizes
[i
] < format
->plane_fmt
[i
].sizeimage
)
736 *nplanes
= format
->num_planes
;
738 for (i
= 0; i
< format
->num_planes
; ++i
)
739 sizes
[i
] = format
->plane_fmt
[i
].sizeimage
;
744 static int vsp1_video_buffer_prepare(struct vb2_buffer
*vb
)
746 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
747 struct vsp1_video
*video
= vb2_get_drv_priv(vb
->vb2_queue
);
748 struct vsp1_vb2_buffer
*buf
= to_vsp1_vb2_buffer(vbuf
);
749 const struct v4l2_pix_format_mplane
*format
= &video
->rwpf
->format
;
752 if (vb
->num_planes
< format
->num_planes
)
755 for (i
= 0; i
< vb
->num_planes
; ++i
) {
756 buf
->mem
.addr
[i
] = vb2_dma_contig_plane_dma_addr(vb
, i
);
758 if (vb2_plane_size(vb
, i
) < format
->plane_fmt
[i
].sizeimage
)
763 buf
->mem
.addr
[i
] = 0;
768 static void vsp1_video_buffer_queue(struct vb2_buffer
*vb
)
770 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
771 struct vsp1_video
*video
= vb2_get_drv_priv(vb
->vb2_queue
);
772 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
773 struct vsp1_vb2_buffer
*buf
= to_vsp1_vb2_buffer(vbuf
);
777 spin_lock_irqsave(&video
->irqlock
, flags
);
778 empty
= list_empty(&video
->irqqueue
);
779 list_add_tail(&buf
->queue
, &video
->irqqueue
);
780 spin_unlock_irqrestore(&video
->irqlock
, flags
);
785 spin_lock_irqsave(&pipe
->irqlock
, flags
);
787 video
->rwpf
->mem
= buf
->mem
;
788 pipe
->buffers_ready
|= 1 << video
->pipe_index
;
790 if (vb2_is_streaming(&video
->queue
) &&
791 vsp1_pipeline_ready(pipe
))
792 vsp1_video_pipeline_run(pipe
);
794 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
797 static int vsp1_video_setup_pipeline(struct vsp1_pipeline
*pipe
)
799 struct vsp1_entity
*entity
;
802 /* Determine this pipelines sizes for image partitioning support. */
803 ret
= vsp1_video_pipeline_setup_partitions(pipe
);
808 struct vsp1_uds
*uds
= to_uds(&pipe
->uds
->subdev
);
811 * If a BRU or BRS is present in the pipeline before the UDS,
812 * the alpha component doesn't need to be scaled as the BRU and
813 * BRS output alpha value is fixed to 255. Otherwise we need to
814 * scale the alpha component only when available at the input
817 if (pipe
->uds_input
->type
== VSP1_ENTITY_BRU
||
818 pipe
->uds_input
->type
== VSP1_ENTITY_BRS
) {
819 uds
->scale_alpha
= false;
821 struct vsp1_rwpf
*rpf
=
822 to_rwpf(&pipe
->uds_input
->subdev
);
824 uds
->scale_alpha
= rpf
->fmtinfo
->alpha
;
829 * Compute and cache the stream configuration into a body. The cached
830 * body will be added to the display list by vsp1_video_pipeline_run()
831 * whenever the pipeline needs to be fully reconfigured.
833 pipe
->stream_config
= vsp1_dlm_dl_body_get(pipe
->output
->dlm
);
834 if (!pipe
->stream_config
)
837 list_for_each_entry(entity
, &pipe
->entities
, list_pipe
) {
838 vsp1_entity_route_setup(entity
, pipe
, pipe
->stream_config
);
839 vsp1_entity_configure_stream(entity
, pipe
, pipe
->stream_config
);
845 static void vsp1_video_release_buffers(struct vsp1_video
*video
)
847 struct vsp1_vb2_buffer
*buffer
;
850 /* Remove all buffers from the IRQ queue. */
851 spin_lock_irqsave(&video
->irqlock
, flags
);
852 list_for_each_entry(buffer
, &video
->irqqueue
, queue
)
853 vb2_buffer_done(&buffer
->buf
.vb2_buf
, VB2_BUF_STATE_ERROR
);
854 INIT_LIST_HEAD(&video
->irqqueue
);
855 spin_unlock_irqrestore(&video
->irqlock
, flags
);
858 static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline
*pipe
)
860 lockdep_assert_held(&pipe
->lock
);
862 /* Release any cached configuration from our output video. */
863 vsp1_dl_body_put(pipe
->stream_config
);
864 pipe
->stream_config
= NULL
;
865 pipe
->configured
= false;
867 /* Release our partition table allocation. */
868 kfree(pipe
->part_table
);
869 pipe
->part_table
= NULL
;
872 static int vsp1_video_start_streaming(struct vb2_queue
*vq
, unsigned int count
)
874 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
875 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
876 bool start_pipeline
= false;
880 mutex_lock(&pipe
->lock
);
881 if (pipe
->stream_count
== pipe
->num_inputs
) {
882 ret
= vsp1_video_setup_pipeline(pipe
);
884 vsp1_video_release_buffers(video
);
885 vsp1_video_cleanup_pipeline(pipe
);
886 mutex_unlock(&pipe
->lock
);
890 start_pipeline
= true;
893 pipe
->stream_count
++;
894 mutex_unlock(&pipe
->lock
);
897 * vsp1_pipeline_ready() is not sufficient to establish that all streams
898 * are prepared and the pipeline is configured, as multiple streams
899 * can race through streamon with buffers already queued; Therefore we
900 * don't even attempt to start the pipeline until the last stream has
901 * called through here.
906 spin_lock_irqsave(&pipe
->irqlock
, flags
);
907 if (vsp1_pipeline_ready(pipe
))
908 vsp1_video_pipeline_run(pipe
);
909 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
914 static void vsp1_video_stop_streaming(struct vb2_queue
*vq
)
916 struct vsp1_video
*video
= vb2_get_drv_priv(vq
);
917 struct vsp1_pipeline
*pipe
= video
->rwpf
->entity
.pipe
;
922 * Clear the buffers ready flag to make sure the device won't be started
923 * by a QBUF on the video node on the other side of the pipeline.
925 spin_lock_irqsave(&video
->irqlock
, flags
);
926 pipe
->buffers_ready
&= ~(1 << video
->pipe_index
);
927 spin_unlock_irqrestore(&video
->irqlock
, flags
);
929 mutex_lock(&pipe
->lock
);
930 if (--pipe
->stream_count
== pipe
->num_inputs
) {
931 /* Stop the pipeline. */
932 ret
= vsp1_pipeline_stop(pipe
);
933 if (ret
== -ETIMEDOUT
)
934 dev_err(video
->vsp1
->dev
, "pipeline stop timeout\n");
936 vsp1_video_cleanup_pipeline(pipe
);
938 mutex_unlock(&pipe
->lock
);
940 media_pipeline_stop(&video
->video
.entity
);
941 vsp1_video_release_buffers(video
);
942 vsp1_video_pipeline_put(pipe
);
945 static const struct vb2_ops vsp1_video_queue_qops
= {
946 .queue_setup
= vsp1_video_queue_setup
,
947 .buf_prepare
= vsp1_video_buffer_prepare
,
948 .buf_queue
= vsp1_video_buffer_queue
,
949 .wait_prepare
= vb2_ops_wait_prepare
,
950 .wait_finish
= vb2_ops_wait_finish
,
951 .start_streaming
= vsp1_video_start_streaming
,
952 .stop_streaming
= vsp1_video_stop_streaming
,
955 /* -----------------------------------------------------------------------------
960 vsp1_video_querycap(struct file
*file
, void *fh
, struct v4l2_capability
*cap
)
962 struct v4l2_fh
*vfh
= file
->private_data
;
963 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
965 cap
->capabilities
= V4L2_CAP_DEVICE_CAPS
| V4L2_CAP_STREAMING
966 | V4L2_CAP_VIDEO_CAPTURE_MPLANE
967 | V4L2_CAP_VIDEO_OUTPUT_MPLANE
;
969 if (video
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
970 cap
->device_caps
= V4L2_CAP_VIDEO_CAPTURE_MPLANE
971 | V4L2_CAP_STREAMING
;
973 cap
->device_caps
= V4L2_CAP_VIDEO_OUTPUT_MPLANE
974 | V4L2_CAP_STREAMING
;
976 strscpy(cap
->driver
, "vsp1", sizeof(cap
->driver
));
977 strscpy(cap
->card
, video
->video
.name
, sizeof(cap
->card
));
978 snprintf(cap
->bus_info
, sizeof(cap
->bus_info
), "platform:%s",
979 dev_name(video
->vsp1
->dev
));
985 vsp1_video_get_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
987 struct v4l2_fh
*vfh
= file
->private_data
;
988 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
990 if (format
->type
!= video
->queue
.type
)
993 mutex_lock(&video
->lock
);
994 format
->fmt
.pix_mp
= video
->rwpf
->format
;
995 mutex_unlock(&video
->lock
);
1001 vsp1_video_try_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
1003 struct v4l2_fh
*vfh
= file
->private_data
;
1004 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
1006 if (format
->type
!= video
->queue
.type
)
1009 return __vsp1_video_try_format(video
, &format
->fmt
.pix_mp
, NULL
);
1013 vsp1_video_set_format(struct file
*file
, void *fh
, struct v4l2_format
*format
)
1015 struct v4l2_fh
*vfh
= file
->private_data
;
1016 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
1017 const struct vsp1_format_info
*info
;
1020 if (format
->type
!= video
->queue
.type
)
1023 ret
= __vsp1_video_try_format(video
, &format
->fmt
.pix_mp
, &info
);
1027 mutex_lock(&video
->lock
);
1029 if (vb2_is_busy(&video
->queue
)) {
1034 video
->rwpf
->format
= format
->fmt
.pix_mp
;
1035 video
->rwpf
->fmtinfo
= info
;
1038 mutex_unlock(&video
->lock
);
1043 vsp1_video_streamon(struct file
*file
, void *fh
, enum v4l2_buf_type type
)
1045 struct v4l2_fh
*vfh
= file
->private_data
;
1046 struct vsp1_video
*video
= to_vsp1_video(vfh
->vdev
);
1047 struct media_device
*mdev
= &video
->vsp1
->media_dev
;
1048 struct vsp1_pipeline
*pipe
;
1051 if (video
->queue
.owner
&& video
->queue
.owner
!= file
->private_data
)
1055 * Get a pipeline for the video node and start streaming on it. No link
1056 * touching an entity in the pipeline can be activated or deactivated
1057 * once streaming is started.
1059 mutex_lock(&mdev
->graph_mutex
);
1061 pipe
= vsp1_video_pipeline_get(video
);
1063 mutex_unlock(&mdev
->graph_mutex
);
1064 return PTR_ERR(pipe
);
1067 ret
= __media_pipeline_start(&video
->video
.entity
, &pipe
->pipe
);
1069 mutex_unlock(&mdev
->graph_mutex
);
1073 mutex_unlock(&mdev
->graph_mutex
);
1076 * Verify that the configured format matches the output of the connected
1079 ret
= vsp1_video_verify_format(video
);
1083 /* Start the queue. */
1084 ret
= vb2_streamon(&video
->queue
, type
);
1091 media_pipeline_stop(&video
->video
.entity
);
1093 vsp1_video_pipeline_put(pipe
);
1097 static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops
= {
1098 .vidioc_querycap
= vsp1_video_querycap
,
1099 .vidioc_g_fmt_vid_cap_mplane
= vsp1_video_get_format
,
1100 .vidioc_s_fmt_vid_cap_mplane
= vsp1_video_set_format
,
1101 .vidioc_try_fmt_vid_cap_mplane
= vsp1_video_try_format
,
1102 .vidioc_g_fmt_vid_out_mplane
= vsp1_video_get_format
,
1103 .vidioc_s_fmt_vid_out_mplane
= vsp1_video_set_format
,
1104 .vidioc_try_fmt_vid_out_mplane
= vsp1_video_try_format
,
1105 .vidioc_reqbufs
= vb2_ioctl_reqbufs
,
1106 .vidioc_querybuf
= vb2_ioctl_querybuf
,
1107 .vidioc_qbuf
= vb2_ioctl_qbuf
,
1108 .vidioc_dqbuf
= vb2_ioctl_dqbuf
,
1109 .vidioc_expbuf
= vb2_ioctl_expbuf
,
1110 .vidioc_create_bufs
= vb2_ioctl_create_bufs
,
1111 .vidioc_prepare_buf
= vb2_ioctl_prepare_buf
,
1112 .vidioc_streamon
= vsp1_video_streamon
,
1113 .vidioc_streamoff
= vb2_ioctl_streamoff
,
1116 /* -----------------------------------------------------------------------------
1117 * V4L2 File Operations
1120 static int vsp1_video_open(struct file
*file
)
1122 struct vsp1_video
*video
= video_drvdata(file
);
1123 struct v4l2_fh
*vfh
;
1126 vfh
= kzalloc(sizeof(*vfh
), GFP_KERNEL
);
1130 v4l2_fh_init(vfh
, &video
->video
);
1133 file
->private_data
= vfh
;
1135 ret
= vsp1_device_get(video
->vsp1
);
1145 static int vsp1_video_release(struct file
*file
)
1147 struct vsp1_video
*video
= video_drvdata(file
);
1148 struct v4l2_fh
*vfh
= file
->private_data
;
1150 mutex_lock(&video
->lock
);
1151 if (video
->queue
.owner
== vfh
) {
1152 vb2_queue_release(&video
->queue
);
1153 video
->queue
.owner
= NULL
;
1155 mutex_unlock(&video
->lock
);
1157 vsp1_device_put(video
->vsp1
);
1159 v4l2_fh_release(file
);
1161 file
->private_data
= NULL
;
1166 static const struct v4l2_file_operations vsp1_video_fops
= {
1167 .owner
= THIS_MODULE
,
1168 .unlocked_ioctl
= video_ioctl2
,
1169 .open
= vsp1_video_open
,
1170 .release
= vsp1_video_release
,
1171 .poll
= vb2_fop_poll
,
1172 .mmap
= vb2_fop_mmap
,
1175 /* -----------------------------------------------------------------------------
1176 * Suspend and Resume
1179 void vsp1_video_suspend(struct vsp1_device
*vsp1
)
1181 unsigned long flags
;
1186 * To avoid increasing the system suspend time needlessly, loop over the
1187 * pipelines twice, first to set them all to the stopping state, and
1188 * then to wait for the stop to complete.
1190 for (i
= 0; i
< vsp1
->info
->wpf_count
; ++i
) {
1191 struct vsp1_rwpf
*wpf
= vsp1
->wpf
[i
];
1192 struct vsp1_pipeline
*pipe
;
1197 pipe
= wpf
->entity
.pipe
;
1201 spin_lock_irqsave(&pipe
->irqlock
, flags
);
1202 if (pipe
->state
== VSP1_PIPELINE_RUNNING
)
1203 pipe
->state
= VSP1_PIPELINE_STOPPING
;
1204 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
1207 for (i
= 0; i
< vsp1
->info
->wpf_count
; ++i
) {
1208 struct vsp1_rwpf
*wpf
= vsp1
->wpf
[i
];
1209 struct vsp1_pipeline
*pipe
;
1214 pipe
= wpf
->entity
.pipe
;
1218 ret
= wait_event_timeout(pipe
->wq
, vsp1_pipeline_stopped(pipe
),
1219 msecs_to_jiffies(500));
1221 dev_warn(vsp1
->dev
, "pipeline %u stop timeout\n",
1226 void vsp1_video_resume(struct vsp1_device
*vsp1
)
1228 unsigned long flags
;
1231 /* Resume all running pipelines. */
1232 for (i
= 0; i
< vsp1
->info
->wpf_count
; ++i
) {
1233 struct vsp1_rwpf
*wpf
= vsp1
->wpf
[i
];
1234 struct vsp1_pipeline
*pipe
;
1239 pipe
= wpf
->entity
.pipe
;
1244 * The hardware may have been reset during a suspend and will
1245 * need a full reconfiguration.
1247 pipe
->configured
= false;
1249 spin_lock_irqsave(&pipe
->irqlock
, flags
);
1250 if (vsp1_pipeline_ready(pipe
))
1251 vsp1_video_pipeline_run(pipe
);
1252 spin_unlock_irqrestore(&pipe
->irqlock
, flags
);
1256 /* -----------------------------------------------------------------------------
1257 * Initialization and Cleanup
1260 struct vsp1_video
*vsp1_video_create(struct vsp1_device
*vsp1
,
1261 struct vsp1_rwpf
*rwpf
)
1263 struct vsp1_video
*video
;
1264 const char *direction
;
1267 video
= devm_kzalloc(vsp1
->dev
, sizeof(*video
), GFP_KERNEL
);
1269 return ERR_PTR(-ENOMEM
);
1271 rwpf
->video
= video
;
1276 if (rwpf
->entity
.type
== VSP1_ENTITY_RPF
) {
1277 direction
= "input";
1278 video
->type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1279 video
->pad
.flags
= MEDIA_PAD_FL_SOURCE
;
1280 video
->video
.vfl_dir
= VFL_DIR_TX
;
1282 direction
= "output";
1283 video
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1284 video
->pad
.flags
= MEDIA_PAD_FL_SINK
;
1285 video
->video
.vfl_dir
= VFL_DIR_RX
;
1288 mutex_init(&video
->lock
);
1289 spin_lock_init(&video
->irqlock
);
1290 INIT_LIST_HEAD(&video
->irqqueue
);
1292 /* Initialize the media entity... */
1293 ret
= media_entity_pads_init(&video
->video
.entity
, 1, &video
->pad
);
1295 return ERR_PTR(ret
);
1297 /* ... and the format ... */
1298 rwpf
->format
.pixelformat
= VSP1_VIDEO_DEF_FORMAT
;
1299 rwpf
->format
.width
= VSP1_VIDEO_DEF_WIDTH
;
1300 rwpf
->format
.height
= VSP1_VIDEO_DEF_HEIGHT
;
1301 __vsp1_video_try_format(video
, &rwpf
->format
, &rwpf
->fmtinfo
);
1303 /* ... and the video node... */
1304 video
->video
.v4l2_dev
= &video
->vsp1
->v4l2_dev
;
1305 video
->video
.fops
= &vsp1_video_fops
;
1306 snprintf(video
->video
.name
, sizeof(video
->video
.name
), "%s %s",
1307 rwpf
->entity
.subdev
.name
, direction
);
1308 video
->video
.vfl_type
= VFL_TYPE_GRABBER
;
1309 video
->video
.release
= video_device_release_empty
;
1310 video
->video
.ioctl_ops
= &vsp1_video_ioctl_ops
;
1312 video_set_drvdata(&video
->video
, video
);
1314 video
->queue
.type
= video
->type
;
1315 video
->queue
.io_modes
= VB2_MMAP
| VB2_USERPTR
| VB2_DMABUF
;
1316 video
->queue
.lock
= &video
->lock
;
1317 video
->queue
.drv_priv
= video
;
1318 video
->queue
.buf_struct_size
= sizeof(struct vsp1_vb2_buffer
);
1319 video
->queue
.ops
= &vsp1_video_queue_qops
;
1320 video
->queue
.mem_ops
= &vb2_dma_contig_memops
;
1321 video
->queue
.timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
1322 video
->queue
.dev
= video
->vsp1
->bus_master
;
1323 ret
= vb2_queue_init(&video
->queue
);
1325 dev_err(video
->vsp1
->dev
, "failed to initialize vb2 queue\n");
1329 /* ... and register the video device. */
1330 video
->video
.queue
= &video
->queue
;
1331 ret
= video_register_device(&video
->video
, VFL_TYPE_GRABBER
, -1);
1333 dev_err(video
->vsp1
->dev
, "failed to register video device\n");
1340 vsp1_video_cleanup(video
);
1341 return ERR_PTR(ret
);
1344 void vsp1_video_cleanup(struct vsp1_video
*video
)
1346 if (video_is_registered(&video
->video
))
1347 video_unregister_device(&video
->video
);
1349 media_entity_cleanup(&video
->video
.entity
);