1 // SPDX-License-Identifier: GPL-2.0-only
3 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
5 * Copyright (c) 2013 Texas Instruments Inc.
6 * David Griego, <dagriego@biglakesoftware.com>
7 * Dale Farnsworth, <dale@farnsworth.org>
8 * Archit Taneja, <archit@ti.com>
10 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
11 * Pawel Osciak, <pawel@osciak.com>
12 * Marek Szyprowski, <m.szyprowski@samsung.com>
14 * Based on the virtual v4l2-mem2mem example device
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
21 #include <linux/interrupt.h>
23 #include <linux/ioctl.h>
24 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/videodev2.h>
31 #include <linux/log2.h>
32 #include <linux/sizes.h>
34 #include <media/v4l2-common.h>
35 #include <media/v4l2-ctrls.h>
36 #include <media/v4l2-device.h>
37 #include <media/v4l2-event.h>
38 #include <media/v4l2-ioctl.h>
39 #include <media/v4l2-mem2mem.h>
40 #include <media/videobuf2-v4l2.h>
41 #include <media/videobuf2-dma-contig.h>
44 #include "vpdma_priv.h"
49 #define VPE_MODULE_NAME "vpe"
51 /* minimum and maximum frame sizes */
57 /* required alignments */
58 #define S_ALIGN 0 /* multiple of 1 */
59 #define H_ALIGN 1 /* multiple of 2 */
61 /* flags that indicate a format can be used for capture/output */
62 #define VPE_FMT_TYPE_CAPTURE (1 << 0)
63 #define VPE_FMT_TYPE_OUTPUT (1 << 1)
65 /* used as plane indices */
66 #define VPE_MAX_PLANES 2
70 /* per m2m context info */
71 #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
73 #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
76 * each VPE context can need up to 3 config descriptors, 7 input descriptors,
77 * 3 output descriptors, and 10 control descriptors
79 #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
80 13 * VPDMA_CFD_CTD_DESC_SIZE)
82 #define vpe_dbg(vpedev, fmt, arg...) \
83 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
84 #define vpe_err(vpedev, fmt, arg...) \
85 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
87 struct vpe_us_coeffs
{
88 unsigned short anchor_fid0_c0
;
89 unsigned short anchor_fid0_c1
;
90 unsigned short anchor_fid0_c2
;
91 unsigned short anchor_fid0_c3
;
92 unsigned short interp_fid0_c0
;
93 unsigned short interp_fid0_c1
;
94 unsigned short interp_fid0_c2
;
95 unsigned short interp_fid0_c3
;
96 unsigned short anchor_fid1_c0
;
97 unsigned short anchor_fid1_c1
;
98 unsigned short anchor_fid1_c2
;
99 unsigned short anchor_fid1_c3
;
100 unsigned short interp_fid1_c0
;
101 unsigned short interp_fid1_c1
;
102 unsigned short interp_fid1_c2
;
103 unsigned short interp_fid1_c3
;
107 * Default upsampler coefficients
109 static const struct vpe_us_coeffs us_coeffs
[] = {
111 /* Coefficients for progressive input */
112 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
113 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
116 /* Coefficients for Top Field Interlaced input */
117 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
118 /* Coefficients for Bottom Field Interlaced input */
119 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
124 * the following registers are for configuring some of the parameters of the
125 * motion and edge detection blocks inside DEI, these generally remain the same,
126 * these could be passed later via userspace if some one needs to tweak these.
128 struct vpe_dei_regs
{
129 unsigned long mdt_spacial_freq_thr_reg
; /* VPE_DEI_REG2 */
130 unsigned long edi_config_reg
; /* VPE_DEI_REG3 */
131 unsigned long edi_lut_reg0
; /* VPE_DEI_REG4 */
132 unsigned long edi_lut_reg1
; /* VPE_DEI_REG5 */
133 unsigned long edi_lut_reg2
; /* VPE_DEI_REG6 */
134 unsigned long edi_lut_reg3
; /* VPE_DEI_REG7 */
138 * default expert DEI register values, unlikely to be modified.
140 static const struct vpe_dei_regs dei_regs
= {
141 .mdt_spacial_freq_thr_reg
= 0x020C0804u
,
142 .edi_config_reg
= 0x0118100Cu
,
143 .edi_lut_reg0
= 0x08040200u
,
144 .edi_lut_reg1
= 0x1010100Cu
,
145 .edi_lut_reg2
= 0x10101010u
,
146 .edi_lut_reg3
= 0x10101010u
,
150 * The port_data structure contains per-port data.
152 struct vpe_port_data
{
153 enum vpdma_channel channel
; /* VPDMA channel */
154 u8 vb_index
; /* input frame f, f-1, f-2 index */
155 u8 vb_part
; /* plane index for co-panar formats */
159 * Define indices into the port_data tables
161 #define VPE_PORT_LUMA1_IN 0
162 #define VPE_PORT_CHROMA1_IN 1
163 #define VPE_PORT_LUMA2_IN 2
164 #define VPE_PORT_CHROMA2_IN 3
165 #define VPE_PORT_LUMA3_IN 4
166 #define VPE_PORT_CHROMA3_IN 5
167 #define VPE_PORT_MV_IN 6
168 #define VPE_PORT_MV_OUT 7
169 #define VPE_PORT_LUMA_OUT 8
170 #define VPE_PORT_CHROMA_OUT 9
171 #define VPE_PORT_RGB_OUT 10
173 static const struct vpe_port_data port_data
[11] = {
174 [VPE_PORT_LUMA1_IN
] = {
175 .channel
= VPE_CHAN_LUMA1_IN
,
179 [VPE_PORT_CHROMA1_IN
] = {
180 .channel
= VPE_CHAN_CHROMA1_IN
,
182 .vb_part
= VPE_CHROMA
,
184 [VPE_PORT_LUMA2_IN
] = {
185 .channel
= VPE_CHAN_LUMA2_IN
,
189 [VPE_PORT_CHROMA2_IN
] = {
190 .channel
= VPE_CHAN_CHROMA2_IN
,
192 .vb_part
= VPE_CHROMA
,
194 [VPE_PORT_LUMA3_IN
] = {
195 .channel
= VPE_CHAN_LUMA3_IN
,
199 [VPE_PORT_CHROMA3_IN
] = {
200 .channel
= VPE_CHAN_CHROMA3_IN
,
202 .vb_part
= VPE_CHROMA
,
205 .channel
= VPE_CHAN_MV_IN
,
207 [VPE_PORT_MV_OUT
] = {
208 .channel
= VPE_CHAN_MV_OUT
,
210 [VPE_PORT_LUMA_OUT
] = {
211 .channel
= VPE_CHAN_LUMA_OUT
,
214 [VPE_PORT_CHROMA_OUT
] = {
215 .channel
= VPE_CHAN_CHROMA_OUT
,
216 .vb_part
= VPE_CHROMA
,
218 [VPE_PORT_RGB_OUT
] = {
219 .channel
= VPE_CHAN_RGB_OUT
,
225 /* driver info for each of the supported video formats */
227 u32 fourcc
; /* standard format identifier */
228 u8 types
; /* CAPTURE and/or OUTPUT */
229 u8 coplanar
; /* set for unpacked Luma and Chroma */
230 /* vpdma format info for each plane */
231 struct vpdma_data_format
const *vpdma_fmt
[VPE_MAX_PLANES
];
234 static struct vpe_fmt vpe_formats
[] = {
236 .fourcc
= V4L2_PIX_FMT_NV16
,
237 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
239 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_Y444
],
240 &vpdma_yuv_fmts
[VPDMA_DATA_FMT_C444
],
244 .fourcc
= V4L2_PIX_FMT_NV12
,
245 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
247 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_Y420
],
248 &vpdma_yuv_fmts
[VPDMA_DATA_FMT_C420
],
252 .fourcc
= V4L2_PIX_FMT_YUYV
,
253 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
255 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_YCB422
],
259 .fourcc
= V4L2_PIX_FMT_UYVY
,
260 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
262 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_CBY422
],
266 .fourcc
= V4L2_PIX_FMT_RGB24
,
267 .types
= VPE_FMT_TYPE_CAPTURE
,
269 .vpdma_fmt
= { &vpdma_rgb_fmts
[VPDMA_DATA_FMT_RGB24
],
273 .fourcc
= V4L2_PIX_FMT_RGB32
,
274 .types
= VPE_FMT_TYPE_CAPTURE
,
276 .vpdma_fmt
= { &vpdma_rgb_fmts
[VPDMA_DATA_FMT_ARGB32
],
280 .fourcc
= V4L2_PIX_FMT_BGR24
,
281 .types
= VPE_FMT_TYPE_CAPTURE
,
283 .vpdma_fmt
= { &vpdma_rgb_fmts
[VPDMA_DATA_FMT_BGR24
],
287 .fourcc
= V4L2_PIX_FMT_BGR32
,
288 .types
= VPE_FMT_TYPE_CAPTURE
,
290 .vpdma_fmt
= { &vpdma_rgb_fmts
[VPDMA_DATA_FMT_ABGR32
],
294 .fourcc
= V4L2_PIX_FMT_RGB565
,
295 .types
= VPE_FMT_TYPE_CAPTURE
,
297 .vpdma_fmt
= { &vpdma_rgb_fmts
[VPDMA_DATA_FMT_RGB565
],
301 .fourcc
= V4L2_PIX_FMT_RGB555
,
302 .types
= VPE_FMT_TYPE_CAPTURE
,
304 .vpdma_fmt
= { &vpdma_rgb_fmts
[VPDMA_DATA_FMT_RGBA16_5551
],
310 * per-queue, driver-specific private data.
311 * there is one source queue and one destination queue for each m2m context.
314 unsigned int width
; /* frame width */
315 unsigned int height
; /* frame height */
316 unsigned int nplanes
; /* Current number of planes */
317 unsigned int bytesperline
[VPE_MAX_PLANES
]; /* bytes per line in memory */
318 enum v4l2_colorspace colorspace
;
319 enum v4l2_field field
; /* supported field value */
321 unsigned int sizeimage
[VPE_MAX_PLANES
]; /* image size in memory */
322 struct v4l2_rect c_rect
; /* crop/compose rectangle */
323 struct vpe_fmt
*fmt
; /* format info */
326 /* vpe_q_data flag bits */
327 #define Q_DATA_FRAME_1D BIT(0)
328 #define Q_DATA_MODE_TILED BIT(1)
329 #define Q_DATA_INTERLACED_ALTERNATE BIT(2)
330 #define Q_DATA_INTERLACED_SEQ_TB BIT(3)
332 #define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
333 Q_DATA_INTERLACED_SEQ_TB)
340 /* find our format description corresponding to the passed v4l2_format */
341 static struct vpe_fmt
*__find_format(u32 fourcc
)
346 for (k
= 0; k
< ARRAY_SIZE(vpe_formats
); k
++) {
347 fmt
= &vpe_formats
[k
];
348 if (fmt
->fourcc
== fourcc
)
355 static struct vpe_fmt
*find_format(struct v4l2_format
*f
)
357 return __find_format(f
->fmt
.pix
.pixelformat
);
361 * there is one vpe_dev structure in the driver, it is shared by
365 struct v4l2_device v4l2_dev
;
366 struct video_device vfd
;
367 struct v4l2_m2m_dev
*m2m_dev
;
369 atomic_t num_instances
; /* count of driver instances */
370 dma_addr_t loaded_mmrs
; /* shadow mmrs in device */
371 struct mutex dev_mutex
;
376 struct resource
*res
;
378 struct vpdma_data vpdma_data
;
379 struct vpdma_data
*vpdma
; /* vpdma data handle */
380 struct sc_data
*sc
; /* scaler data handle */
381 struct csc_data
*csc
; /* csc data handle */
385 * There is one vpe_ctx structure for each m2m context.
390 struct v4l2_ctrl_handler hdl
;
392 unsigned int field
; /* current field */
393 unsigned int sequence
; /* current frame/field seq */
394 unsigned int aborting
; /* abort after next irq */
396 unsigned int bufs_per_job
; /* input buffers per batch */
397 unsigned int bufs_completed
; /* bufs done in this batch */
399 struct vpe_q_data q_data
[2]; /* src & dst queue data */
400 struct vb2_v4l2_buffer
*src_vbs
[VPE_MAX_SRC_BUFS
];
401 struct vb2_v4l2_buffer
*dst_vb
;
403 dma_addr_t mv_buf_dma
[2]; /* dma addrs of motion vector in/out bufs */
404 void *mv_buf
[2]; /* virtual addrs of motion vector bufs */
405 size_t mv_buf_size
; /* current motion vector buffer size */
406 struct vpdma_buf mmr_adb
; /* shadow reg addr/data block */
407 struct vpdma_buf sc_coeff_h
; /* h coeff buffer */
408 struct vpdma_buf sc_coeff_v
; /* v coeff buffer */
409 struct vpdma_desc_list desc_list
; /* DMA descriptor list */
411 bool deinterlacing
; /* using de-interlacer */
412 bool load_mmrs
; /* have new shadow reg values */
414 unsigned int src_mv_buf_selector
;
419 * M2M devices get 2 queues.
420 * Return the queue given the type.
422 static struct vpe_q_data
*get_q_data(struct vpe_ctx
*ctx
,
423 enum v4l2_buf_type type
)
426 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
:
427 case V4L2_BUF_TYPE_VIDEO_OUTPUT
:
428 return &ctx
->q_data
[Q_DATA_SRC
];
429 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
:
430 case V4L2_BUF_TYPE_VIDEO_CAPTURE
:
431 return &ctx
->q_data
[Q_DATA_DST
];
438 static u32
read_reg(struct vpe_dev
*dev
, int offset
)
440 return ioread32(dev
->base
+ offset
);
443 static void write_reg(struct vpe_dev
*dev
, int offset
, u32 value
)
445 iowrite32(value
, dev
->base
+ offset
);
448 /* register field read/write helpers */
449 static int get_field(u32 value
, u32 mask
, int shift
)
451 return (value
& (mask
<< shift
)) >> shift
;
454 static int read_field_reg(struct vpe_dev
*dev
, int offset
, u32 mask
, int shift
)
456 return get_field(read_reg(dev
, offset
), mask
, shift
);
459 static void write_field(u32
*valp
, u32 field
, u32 mask
, int shift
)
463 val
&= ~(mask
<< shift
);
464 val
|= (field
& mask
) << shift
;
468 static void write_field_reg(struct vpe_dev
*dev
, int offset
, u32 field
,
471 u32 val
= read_reg(dev
, offset
);
473 write_field(&val
, field
, mask
, shift
);
475 write_reg(dev
, offset
, val
);
479 * DMA address/data block for the shadow registers
482 struct vpdma_adb_hdr out_fmt_hdr
;
485 struct vpdma_adb_hdr us1_hdr
;
487 struct vpdma_adb_hdr us2_hdr
;
489 struct vpdma_adb_hdr us3_hdr
;
491 struct vpdma_adb_hdr dei_hdr
;
493 struct vpdma_adb_hdr sc_hdr0
;
496 struct vpdma_adb_hdr sc_hdr8
;
499 struct vpdma_adb_hdr sc_hdr17
;
502 struct vpdma_adb_hdr csc_hdr
;
507 #define GET_OFFSET_TOP(ctx, obj, reg) \
508 ((obj)->res->start - ctx->dev->res->start + reg)
510 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
511 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
513 * Set the headers for all of the address/data block structures.
515 static void init_adb_hdrs(struct vpe_ctx
*ctx
)
517 VPE_SET_MMR_ADB_HDR(ctx
, out_fmt_hdr
, out_fmt_reg
, VPE_CLK_FORMAT_SELECT
);
518 VPE_SET_MMR_ADB_HDR(ctx
, us1_hdr
, us1_regs
, VPE_US1_R0
);
519 VPE_SET_MMR_ADB_HDR(ctx
, us2_hdr
, us2_regs
, VPE_US2_R0
);
520 VPE_SET_MMR_ADB_HDR(ctx
, us3_hdr
, us3_regs
, VPE_US3_R0
);
521 VPE_SET_MMR_ADB_HDR(ctx
, dei_hdr
, dei_regs
, VPE_DEI_FRAME_SIZE
);
522 VPE_SET_MMR_ADB_HDR(ctx
, sc_hdr0
, sc_regs0
,
523 GET_OFFSET_TOP(ctx
, ctx
->dev
->sc
, CFG_SC0
));
524 VPE_SET_MMR_ADB_HDR(ctx
, sc_hdr8
, sc_regs8
,
525 GET_OFFSET_TOP(ctx
, ctx
->dev
->sc
, CFG_SC8
));
526 VPE_SET_MMR_ADB_HDR(ctx
, sc_hdr17
, sc_regs17
,
527 GET_OFFSET_TOP(ctx
, ctx
->dev
->sc
, CFG_SC17
));
528 VPE_SET_MMR_ADB_HDR(ctx
, csc_hdr
, csc_regs
,
529 GET_OFFSET_TOP(ctx
, ctx
->dev
->csc
, CSC_CSC00
));
533 * Allocate or re-allocate the motion vector DMA buffers
534 * There are two buffers, one for input and one for output.
535 * However, the roles are reversed after each field is processed.
536 * In other words, after each field is processed, the previous
537 * output (dst) MV buffer becomes the new input (src) MV buffer.
539 static int realloc_mv_buffers(struct vpe_ctx
*ctx
, size_t size
)
541 struct device
*dev
= ctx
->dev
->v4l2_dev
.dev
;
543 if (ctx
->mv_buf_size
== size
)
547 dma_free_coherent(dev
, ctx
->mv_buf_size
, ctx
->mv_buf
[0],
551 dma_free_coherent(dev
, ctx
->mv_buf_size
, ctx
->mv_buf
[1],
557 ctx
->mv_buf
[0] = dma_alloc_coherent(dev
, size
, &ctx
->mv_buf_dma
[0],
559 if (!ctx
->mv_buf
[0]) {
560 vpe_err(ctx
->dev
, "failed to allocate motion vector buffer\n");
564 ctx
->mv_buf
[1] = dma_alloc_coherent(dev
, size
, &ctx
->mv_buf_dma
[1],
566 if (!ctx
->mv_buf
[1]) {
567 vpe_err(ctx
->dev
, "failed to allocate motion vector buffer\n");
568 dma_free_coherent(dev
, size
, ctx
->mv_buf
[0],
574 ctx
->mv_buf_size
= size
;
575 ctx
->src_mv_buf_selector
= 0;
580 static void free_mv_buffers(struct vpe_ctx
*ctx
)
582 realloc_mv_buffers(ctx
, 0);
586 * While de-interlacing, we keep the two most recent input buffers
587 * around. This function frees those two buffers when we have
588 * finished processing the current stream.
590 static void free_vbs(struct vpe_ctx
*ctx
)
592 struct vpe_dev
*dev
= ctx
->dev
;
595 if (ctx
->src_vbs
[2] == NULL
)
598 spin_lock_irqsave(&dev
->lock
, flags
);
599 if (ctx
->src_vbs
[2]) {
600 v4l2_m2m_buf_done(ctx
->src_vbs
[2], VB2_BUF_STATE_DONE
);
601 if (ctx
->src_vbs
[1] && (ctx
->src_vbs
[1] != ctx
->src_vbs
[2]))
602 v4l2_m2m_buf_done(ctx
->src_vbs
[1], VB2_BUF_STATE_DONE
);
603 ctx
->src_vbs
[2] = NULL
;
604 ctx
->src_vbs
[1] = NULL
;
606 spin_unlock_irqrestore(&dev
->lock
, flags
);
610 * Enable or disable the VPE clocks
612 static void vpe_set_clock_enable(struct vpe_dev
*dev
, bool on
)
617 val
= VPE_DATA_PATH_CLK_ENABLE
| VPE_VPEDMA_CLK_ENABLE
;
618 write_reg(dev
, VPE_CLK_ENABLE
, val
);
621 static void vpe_top_reset(struct vpe_dev
*dev
)
624 write_field_reg(dev
, VPE_CLK_RESET
, 1, VPE_DATA_PATH_CLK_RESET_MASK
,
625 VPE_DATA_PATH_CLK_RESET_SHIFT
);
627 usleep_range(100, 150);
629 write_field_reg(dev
, VPE_CLK_RESET
, 0, VPE_DATA_PATH_CLK_RESET_MASK
,
630 VPE_DATA_PATH_CLK_RESET_SHIFT
);
633 static void vpe_top_vpdma_reset(struct vpe_dev
*dev
)
635 write_field_reg(dev
, VPE_CLK_RESET
, 1, VPE_VPDMA_CLK_RESET_MASK
,
636 VPE_VPDMA_CLK_RESET_SHIFT
);
638 usleep_range(100, 150);
640 write_field_reg(dev
, VPE_CLK_RESET
, 0, VPE_VPDMA_CLK_RESET_MASK
,
641 VPE_VPDMA_CLK_RESET_SHIFT
);
645 * Load the correct of upsampler coefficients into the shadow MMRs
647 static void set_us_coefficients(struct vpe_ctx
*ctx
)
649 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
650 struct vpe_q_data
*s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
651 u32
*us1_reg
= &mmr_adb
->us1_regs
[0];
652 u32
*us2_reg
= &mmr_adb
->us2_regs
[0];
653 u32
*us3_reg
= &mmr_adb
->us3_regs
[0];
654 const unsigned short *cp
, *end_cp
;
656 cp
= &us_coeffs
[0].anchor_fid0_c0
;
658 if (s_q_data
->flags
& Q_IS_INTERLACED
) /* interlaced */
659 cp
+= sizeof(us_coeffs
[0]) / sizeof(*cp
);
661 end_cp
= cp
+ sizeof(us_coeffs
[0]) / sizeof(*cp
);
663 while (cp
< end_cp
) {
664 write_field(us1_reg
, *cp
++, VPE_US_C0_MASK
, VPE_US_C0_SHIFT
);
665 write_field(us1_reg
, *cp
++, VPE_US_C1_MASK
, VPE_US_C1_SHIFT
);
666 *us2_reg
++ = *us1_reg
;
667 *us3_reg
++ = *us1_reg
++;
669 ctx
->load_mmrs
= true;
673 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
675 static void set_cfg_modes(struct vpe_ctx
*ctx
)
677 struct vpe_fmt
*fmt
= ctx
->q_data
[Q_DATA_SRC
].fmt
;
678 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
679 u32
*us1_reg0
= &mmr_adb
->us1_regs
[0];
680 u32
*us2_reg0
= &mmr_adb
->us2_regs
[0];
681 u32
*us3_reg0
= &mmr_adb
->us3_regs
[0];
685 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
686 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
689 if (fmt
->fourcc
== V4L2_PIX_FMT_NV12
)
692 write_field(us1_reg0
, cfg_mode
, VPE_US_MODE_MASK
, VPE_US_MODE_SHIFT
);
693 write_field(us2_reg0
, cfg_mode
, VPE_US_MODE_MASK
, VPE_US_MODE_SHIFT
);
694 write_field(us3_reg0
, cfg_mode
, VPE_US_MODE_MASK
, VPE_US_MODE_SHIFT
);
696 ctx
->load_mmrs
= true;
699 static void set_line_modes(struct vpe_ctx
*ctx
)
701 struct vpe_fmt
*fmt
= ctx
->q_data
[Q_DATA_SRC
].fmt
;
704 if (fmt
->fourcc
== V4L2_PIX_FMT_NV12
)
705 line_mode
= 0; /* double lines to line buffer */
708 vpdma_set_line_mode(ctx
->dev
->vpdma
, line_mode
, VPE_CHAN_CHROMA1_IN
);
709 vpdma_set_line_mode(ctx
->dev
->vpdma
, line_mode
, VPE_CHAN_CHROMA2_IN
);
710 vpdma_set_line_mode(ctx
->dev
->vpdma
, line_mode
, VPE_CHAN_CHROMA3_IN
);
712 /* frame start for input luma */
713 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
715 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
717 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
720 /* frame start for input chroma */
721 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
722 VPE_CHAN_CHROMA1_IN
);
723 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
724 VPE_CHAN_CHROMA2_IN
);
725 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
726 VPE_CHAN_CHROMA3_IN
);
728 /* frame start for MV in client */
729 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
734 * Set the shadow registers that are modified when the source
737 static void set_src_registers(struct vpe_ctx
*ctx
)
739 set_us_coefficients(ctx
);
743 * Set the shadow registers that are modified when the destination
746 static void set_dst_registers(struct vpe_ctx
*ctx
)
748 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
749 enum v4l2_colorspace clrspc
= ctx
->q_data
[Q_DATA_DST
].colorspace
;
750 struct vpe_fmt
*fmt
= ctx
->q_data
[Q_DATA_DST
].fmt
;
753 if (clrspc
== V4L2_COLORSPACE_SRGB
) {
754 val
|= VPE_RGB_OUT_SELECT
;
755 vpdma_set_bg_color(ctx
->dev
->vpdma
,
756 (struct vpdma_data_format
*)fmt
->vpdma_fmt
[0], 0xff);
757 } else if (fmt
->fourcc
== V4L2_PIX_FMT_NV16
)
758 val
|= VPE_COLOR_SEPARATE_422
;
761 * the source of CHR_DS and CSC is always the scaler, irrespective of
762 * whether it's used or not
764 val
|= VPE_DS_SRC_DEI_SCALER
| VPE_CSC_SRC_DEI_SCALER
;
766 if (fmt
->fourcc
!= V4L2_PIX_FMT_NV12
)
767 val
|= VPE_DS_BYPASS
;
769 mmr_adb
->out_fmt_reg
[0] = val
;
771 ctx
->load_mmrs
= true;
775 * Set the de-interlacer shadow register values
777 static void set_dei_regs(struct vpe_ctx
*ctx
)
779 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
780 struct vpe_q_data
*s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
781 unsigned int src_h
= s_q_data
->c_rect
.height
;
782 unsigned int src_w
= s_q_data
->c_rect
.width
;
783 u32
*dei_mmr0
= &mmr_adb
->dei_regs
[0];
784 bool deinterlace
= true;
788 * according to TRM, we should set DEI in progressive bypass mode when
789 * the input content is progressive, however, DEI is bypassed correctly
790 * for both progressive and interlace content in interlace bypass mode.
791 * It has been recommended not to use progressive bypass mode.
793 if (!(s_q_data
->flags
& Q_IS_INTERLACED
) || !ctx
->deinterlacing
) {
795 val
= VPE_DEI_INTERLACE_BYPASS
;
798 src_h
= deinterlace
? src_h
* 2 : src_h
;
800 val
|= (src_h
<< VPE_DEI_HEIGHT_SHIFT
) |
801 (src_w
<< VPE_DEI_WIDTH_SHIFT
) |
806 ctx
->load_mmrs
= true;
809 static void set_dei_shadow_registers(struct vpe_ctx
*ctx
)
811 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
812 u32
*dei_mmr
= &mmr_adb
->dei_regs
[0];
813 const struct vpe_dei_regs
*cur
= &dei_regs
;
815 dei_mmr
[2] = cur
->mdt_spacial_freq_thr_reg
;
816 dei_mmr
[3] = cur
->edi_config_reg
;
817 dei_mmr
[4] = cur
->edi_lut_reg0
;
818 dei_mmr
[5] = cur
->edi_lut_reg1
;
819 dei_mmr
[6] = cur
->edi_lut_reg2
;
820 dei_mmr
[7] = cur
->edi_lut_reg3
;
822 ctx
->load_mmrs
= true;
825 static void config_edi_input_mode(struct vpe_ctx
*ctx
, int mode
)
827 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
828 u32
*edi_config_reg
= &mmr_adb
->dei_regs
[3];
831 write_field(edi_config_reg
, 1, 1, 2); /* EDI_ENABLE_3D */
834 write_field(edi_config_reg
, 1, 1, 3); /* EDI_CHROMA_3D */
836 write_field(edi_config_reg
, mode
, VPE_EDI_INP_MODE_MASK
,
837 VPE_EDI_INP_MODE_SHIFT
);
839 ctx
->load_mmrs
= true;
843 * Set the shadow registers whose values are modified when either the
844 * source or destination format is changed.
846 static int set_srcdst_params(struct vpe_ctx
*ctx
)
848 struct vpe_q_data
*s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
849 struct vpe_q_data
*d_q_data
= &ctx
->q_data
[Q_DATA_DST
];
850 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
851 unsigned int src_w
= s_q_data
->c_rect
.width
;
852 unsigned int src_h
= s_q_data
->c_rect
.height
;
853 unsigned int dst_w
= d_q_data
->c_rect
.width
;
854 unsigned int dst_h
= d_q_data
->c_rect
.height
;
859 ctx
->field
= V4L2_FIELD_TOP
;
861 if ((s_q_data
->flags
& Q_IS_INTERLACED
) &&
862 !(d_q_data
->flags
& Q_IS_INTERLACED
)) {
864 const struct vpdma_data_format
*mv
=
865 &vpdma_misc_fmts
[VPDMA_DATA_FMT_MV
];
868 * we make sure that the source image has a 16 byte aligned
869 * stride, we need to do the same for the motion vector buffer
870 * by aligning it's stride to the next 16 byte boundary. this
871 * extra space will not be used by the de-interlacer, but will
872 * ensure that vpdma operates correctly
874 bytes_per_line
= ALIGN((s_q_data
->width
* mv
->depth
) >> 3,
876 mv_buf_size
= bytes_per_line
* s_q_data
->height
;
878 ctx
->deinterlacing
= true;
881 ctx
->deinterlacing
= false;
886 ctx
->src_vbs
[2] = ctx
->src_vbs
[1] = ctx
->src_vbs
[0] = NULL
;
888 ret
= realloc_mv_buffers(ctx
, mv_buf_size
);
895 csc_set_coeff(ctx
->dev
->csc
, &mmr_adb
->csc_regs
[0],
896 s_q_data
->colorspace
, d_q_data
->colorspace
);
898 sc_set_hs_coeffs(ctx
->dev
->sc
, ctx
->sc_coeff_h
.addr
, src_w
, dst_w
);
899 sc_set_vs_coeffs(ctx
->dev
->sc
, ctx
->sc_coeff_v
.addr
, src_h
, dst_h
);
901 sc_config_scaler(ctx
->dev
->sc
, &mmr_adb
->sc_regs0
[0],
902 &mmr_adb
->sc_regs8
[0], &mmr_adb
->sc_regs17
[0],
903 src_w
, src_h
, dst_w
, dst_h
);
909 * Return the vpe_ctx structure for a given struct file
911 static struct vpe_ctx
*file2ctx(struct file
*file
)
913 return container_of(file
->private_data
, struct vpe_ctx
, fh
);
921 * job_ready() - check whether an instance is ready to be scheduled to run
923 static int job_ready(void *priv
)
925 struct vpe_ctx
*ctx
= priv
;
928 * This check is needed as this might be called directly from driver
929 * When called by m2m framework, this will always satisfy, but when
930 * called from vpe_irq, this might fail. (src stream with zero buffers)
932 if (v4l2_m2m_num_src_bufs_ready(ctx
->fh
.m2m_ctx
) <= 0 ||
933 v4l2_m2m_num_dst_bufs_ready(ctx
->fh
.m2m_ctx
) <= 0)
939 static void job_abort(void *priv
)
941 struct vpe_ctx
*ctx
= priv
;
943 /* Will cancel the transaction in the next interrupt handler */
947 static void vpe_dump_regs(struct vpe_dev
*dev
)
949 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
951 vpe_dbg(dev
, "VPE Registers:\n");
955 DUMPREG(INT0_STATUS0_RAW
);
956 DUMPREG(INT0_STATUS0
);
957 DUMPREG(INT0_ENABLE0
);
958 DUMPREG(INT0_STATUS1_RAW
);
959 DUMPREG(INT0_STATUS1
);
960 DUMPREG(INT0_ENABLE1
);
963 DUMPREG(CLK_FORMAT_SELECT
);
964 DUMPREG(CLK_RANGE_MAP
);
989 DUMPREG(DEI_FRAME_SIZE
);
991 DUMPREG(MDT_SF_THRESHOLD
);
993 DUMPREG(DEI_EDI_LUT_R0
);
994 DUMPREG(DEI_EDI_LUT_R1
);
995 DUMPREG(DEI_EDI_LUT_R2
);
996 DUMPREG(DEI_EDI_LUT_R3
);
997 DUMPREG(DEI_FMD_WINDOW_R0
);
998 DUMPREG(DEI_FMD_WINDOW_R1
);
999 DUMPREG(DEI_FMD_CONTROL_R0
);
1000 DUMPREG(DEI_FMD_CONTROL_R1
);
1001 DUMPREG(DEI_FMD_STATUS_R0
);
1002 DUMPREG(DEI_FMD_STATUS_R1
);
1003 DUMPREG(DEI_FMD_STATUS_R2
);
1006 sc_dump_regs(dev
->sc
);
1007 csc_dump_regs(dev
->csc
);
1010 static void add_out_dtd(struct vpe_ctx
*ctx
, int port
)
1012 struct vpe_q_data
*q_data
= &ctx
->q_data
[Q_DATA_DST
];
1013 const struct vpe_port_data
*p_data
= &port_data
[port
];
1014 struct vb2_buffer
*vb
= &ctx
->dst_vb
->vb2_buf
;
1015 struct vpe_fmt
*fmt
= q_data
->fmt
;
1016 const struct vpdma_data_format
*vpdma_fmt
;
1017 int mv_buf_selector
= !ctx
->src_mv_buf_selector
;
1018 dma_addr_t dma_addr
;
1023 if (port
== VPE_PORT_MV_OUT
) {
1024 vpdma_fmt
= &vpdma_misc_fmts
[VPDMA_DATA_FMT_MV
];
1025 dma_addr
= ctx
->mv_buf_dma
[mv_buf_selector
];
1026 q_data
= &ctx
->q_data
[Q_DATA_SRC
];
1027 stride
= ALIGN((q_data
->width
* vpdma_fmt
->depth
) >> 3,
1028 VPDMA_STRIDE_ALIGN
);
1030 /* to incorporate interleaved formats */
1031 int plane
= fmt
->coplanar
? p_data
->vb_part
: 0;
1033 vpdma_fmt
= fmt
->vpdma_fmt
[plane
];
1035 * If we are using a single plane buffer and
1036 * we need to set a separate vpdma chroma channel.
1038 if (q_data
->nplanes
== 1 && plane
) {
1039 dma_addr
= vb2_dma_contig_plane_dma_addr(vb
, 0);
1040 /* Compute required offset */
1041 offset
= q_data
->bytesperline
[0] * q_data
->height
;
1043 dma_addr
= vb2_dma_contig_plane_dma_addr(vb
, plane
);
1044 /* Use address as is, no offset */
1049 "acquiring output buffer(%d) dma_addr failed\n",
1053 /* Apply the offset */
1055 stride
= q_data
->bytesperline
[VPE_LUMA
];
1058 if (q_data
->flags
& Q_DATA_FRAME_1D
)
1059 flags
|= VPDMA_DATA_FRAME_1D
;
1060 if (q_data
->flags
& Q_DATA_MODE_TILED
)
1061 flags
|= VPDMA_DATA_MODE_TILED
;
1063 vpdma_set_max_size(ctx
->dev
->vpdma
, VPDMA_MAX_SIZE1
,
1066 vpdma_add_out_dtd(&ctx
->desc_list
, q_data
->width
,
1067 stride
, &q_data
->c_rect
,
1068 vpdma_fmt
, dma_addr
, MAX_OUT_WIDTH_REG1
,
1069 MAX_OUT_HEIGHT_REG1
, p_data
->channel
, flags
);
1072 static void add_in_dtd(struct vpe_ctx
*ctx
, int port
)
1074 struct vpe_q_data
*q_data
= &ctx
->q_data
[Q_DATA_SRC
];
1075 const struct vpe_port_data
*p_data
= &port_data
[port
];
1076 struct vb2_buffer
*vb
= &ctx
->src_vbs
[p_data
->vb_index
]->vb2_buf
;
1077 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
1078 struct vpe_fmt
*fmt
= q_data
->fmt
;
1079 const struct vpdma_data_format
*vpdma_fmt
;
1080 int mv_buf_selector
= ctx
->src_mv_buf_selector
;
1081 int field
= vbuf
->field
== V4L2_FIELD_BOTTOM
;
1082 int frame_width
, frame_height
;
1083 dma_addr_t dma_addr
;
1088 if (port
== VPE_PORT_MV_IN
) {
1089 vpdma_fmt
= &vpdma_misc_fmts
[VPDMA_DATA_FMT_MV
];
1090 dma_addr
= ctx
->mv_buf_dma
[mv_buf_selector
];
1091 stride
= ALIGN((q_data
->width
* vpdma_fmt
->depth
) >> 3,
1092 VPDMA_STRIDE_ALIGN
);
1094 /* to incorporate interleaved formats */
1095 int plane
= fmt
->coplanar
? p_data
->vb_part
: 0;
1097 vpdma_fmt
= fmt
->vpdma_fmt
[plane
];
1099 * If we are using a single plane buffer and
1100 * we need to set a separate vpdma chroma channel.
1102 if (q_data
->nplanes
== 1 && plane
) {
1103 dma_addr
= vb2_dma_contig_plane_dma_addr(vb
, 0);
1104 /* Compute required offset */
1105 offset
= q_data
->bytesperline
[0] * q_data
->height
;
1107 dma_addr
= vb2_dma_contig_plane_dma_addr(vb
, plane
);
1108 /* Use address as is, no offset */
1113 "acquiring output buffer(%d) dma_addr failed\n",
1117 /* Apply the offset */
1119 stride
= q_data
->bytesperline
[VPE_LUMA
];
1121 if (q_data
->flags
& Q_DATA_INTERLACED_SEQ_TB
) {
1123 * Use top or bottom field from same vb alternately
1124 * f,f-1,f-2 = TBT when seq is even
1125 * f,f-1,f-2 = BTB when seq is odd
1127 field
= (p_data
->vb_index
+ (ctx
->sequence
% 2)) % 2;
1131 * bottom field of a SEQ_TB buffer
1132 * Skip the top field data by
1134 int height
= q_data
->height
/ 2;
1135 int bpp
= fmt
->fourcc
== V4L2_PIX_FMT_NV12
?
1136 1 : (vpdma_fmt
->depth
>> 3);
1139 dma_addr
+= q_data
->width
* height
* bpp
;
1144 if (q_data
->flags
& Q_DATA_FRAME_1D
)
1145 flags
|= VPDMA_DATA_FRAME_1D
;
1146 if (q_data
->flags
& Q_DATA_MODE_TILED
)
1147 flags
|= VPDMA_DATA_MODE_TILED
;
1149 frame_width
= q_data
->c_rect
.width
;
1150 frame_height
= q_data
->c_rect
.height
;
1152 if (p_data
->vb_part
&& fmt
->fourcc
== V4L2_PIX_FMT_NV12
)
1155 vpdma_add_in_dtd(&ctx
->desc_list
, q_data
->width
, stride
,
1156 &q_data
->c_rect
, vpdma_fmt
, dma_addr
,
1157 p_data
->channel
, field
, flags
, frame_width
,
1158 frame_height
, 0, 0);
1162 * Enable the expected IRQ sources
1164 static void enable_irqs(struct vpe_ctx
*ctx
)
1166 write_reg(ctx
->dev
, VPE_INT0_ENABLE0_SET
, VPE_INT0_LIST0_COMPLETE
);
1167 write_reg(ctx
->dev
, VPE_INT0_ENABLE1_SET
, VPE_DEI_ERROR_INT
|
1168 VPE_DS1_UV_ERROR_INT
);
1170 vpdma_enable_list_complete_irq(ctx
->dev
->vpdma
, 0, 0, true);
1173 static void disable_irqs(struct vpe_ctx
*ctx
)
1175 write_reg(ctx
->dev
, VPE_INT0_ENABLE0_CLR
, 0xffffffff);
1176 write_reg(ctx
->dev
, VPE_INT0_ENABLE1_CLR
, 0xffffffff);
1178 vpdma_enable_list_complete_irq(ctx
->dev
->vpdma
, 0, 0, false);
1181 /* device_run() - prepares and starts the device
1183 * This function is only called when both the source and destination
1184 * buffers are in place.
1186 static void device_run(void *priv
)
1188 struct vpe_ctx
*ctx
= priv
;
1189 struct sc_data
*sc
= ctx
->dev
->sc
;
1190 struct vpe_q_data
*d_q_data
= &ctx
->q_data
[Q_DATA_DST
];
1191 struct vpe_q_data
*s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
1193 if (ctx
->deinterlacing
&& s_q_data
->flags
& Q_DATA_INTERLACED_SEQ_TB
&&
1194 ctx
->sequence
% 2 == 0) {
1195 /* When using SEQ_TB buffers, When using it first time,
1196 * No need to remove the buffer as the next field is present
1197 * in the same buffer. (so that job_ready won't fail)
1198 * It will be removed when using bottom field
1200 ctx
->src_vbs
[0] = v4l2_m2m_next_src_buf(ctx
->fh
.m2m_ctx
);
1201 WARN_ON(ctx
->src_vbs
[0] == NULL
);
1203 ctx
->src_vbs
[0] = v4l2_m2m_src_buf_remove(ctx
->fh
.m2m_ctx
);
1204 WARN_ON(ctx
->src_vbs
[0] == NULL
);
1207 ctx
->dst_vb
= v4l2_m2m_dst_buf_remove(ctx
->fh
.m2m_ctx
);
1208 WARN_ON(ctx
->dst_vb
== NULL
);
1210 if (ctx
->deinterlacing
) {
1212 if (ctx
->src_vbs
[2] == NULL
) {
1213 ctx
->src_vbs
[2] = ctx
->src_vbs
[0];
1214 WARN_ON(ctx
->src_vbs
[2] == NULL
);
1215 ctx
->src_vbs
[1] = ctx
->src_vbs
[0];
1216 WARN_ON(ctx
->src_vbs
[1] == NULL
);
1220 * we have output the first 2 frames through line average, we
1221 * now switch to EDI de-interlacer
1223 if (ctx
->sequence
== 2)
1224 config_edi_input_mode(ctx
, 0x3); /* EDI (Y + UV) */
1227 /* config descriptors */
1228 if (ctx
->dev
->loaded_mmrs
!= ctx
->mmr_adb
.dma_addr
|| ctx
->load_mmrs
) {
1229 vpdma_map_desc_buf(ctx
->dev
->vpdma
, &ctx
->mmr_adb
);
1230 vpdma_add_cfd_adb(&ctx
->desc_list
, CFD_MMR_CLIENT
, &ctx
->mmr_adb
);
1232 set_line_modes(ctx
);
1234 ctx
->dev
->loaded_mmrs
= ctx
->mmr_adb
.dma_addr
;
1235 ctx
->load_mmrs
= false;
1238 if (sc
->loaded_coeff_h
!= ctx
->sc_coeff_h
.dma_addr
||
1240 vpdma_map_desc_buf(ctx
->dev
->vpdma
, &ctx
->sc_coeff_h
);
1241 vpdma_add_cfd_block(&ctx
->desc_list
, CFD_SC_CLIENT
,
1242 &ctx
->sc_coeff_h
, 0);
1244 sc
->loaded_coeff_h
= ctx
->sc_coeff_h
.dma_addr
;
1245 sc
->load_coeff_h
= false;
1248 if (sc
->loaded_coeff_v
!= ctx
->sc_coeff_v
.dma_addr
||
1250 vpdma_map_desc_buf(ctx
->dev
->vpdma
, &ctx
->sc_coeff_v
);
1251 vpdma_add_cfd_block(&ctx
->desc_list
, CFD_SC_CLIENT
,
1252 &ctx
->sc_coeff_v
, SC_COEF_SRAM_SIZE
>> 4);
1254 sc
->loaded_coeff_v
= ctx
->sc_coeff_v
.dma_addr
;
1255 sc
->load_coeff_v
= false;
1258 /* output data descriptors */
1259 if (ctx
->deinterlacing
)
1260 add_out_dtd(ctx
, VPE_PORT_MV_OUT
);
1262 if (d_q_data
->colorspace
== V4L2_COLORSPACE_SRGB
) {
1263 add_out_dtd(ctx
, VPE_PORT_RGB_OUT
);
1265 add_out_dtd(ctx
, VPE_PORT_LUMA_OUT
);
1266 if (d_q_data
->fmt
->coplanar
)
1267 add_out_dtd(ctx
, VPE_PORT_CHROMA_OUT
);
1270 /* input data descriptors */
1271 if (ctx
->deinterlacing
) {
1272 add_in_dtd(ctx
, VPE_PORT_LUMA3_IN
);
1273 add_in_dtd(ctx
, VPE_PORT_CHROMA3_IN
);
1275 add_in_dtd(ctx
, VPE_PORT_LUMA2_IN
);
1276 add_in_dtd(ctx
, VPE_PORT_CHROMA2_IN
);
1279 add_in_dtd(ctx
, VPE_PORT_LUMA1_IN
);
1280 add_in_dtd(ctx
, VPE_PORT_CHROMA1_IN
);
1282 if (ctx
->deinterlacing
)
1283 add_in_dtd(ctx
, VPE_PORT_MV_IN
);
1285 /* sync on channel control descriptors for input ports */
1286 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_LUMA1_IN
);
1287 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_CHROMA1_IN
);
1289 if (ctx
->deinterlacing
) {
1290 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1292 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1293 VPE_CHAN_CHROMA2_IN
);
1295 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1297 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1298 VPE_CHAN_CHROMA3_IN
);
1300 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_MV_IN
);
1303 /* sync on channel control descriptors for output ports */
1304 if (d_q_data
->colorspace
== V4L2_COLORSPACE_SRGB
) {
1305 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1308 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1310 if (d_q_data
->fmt
->coplanar
)
1311 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
,
1312 VPE_CHAN_CHROMA_OUT
);
1315 if (ctx
->deinterlacing
)
1316 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_MV_OUT
);
1320 vpdma_map_desc_buf(ctx
->dev
->vpdma
, &ctx
->desc_list
.buf
);
1321 vpdma_submit_descs(ctx
->dev
->vpdma
, &ctx
->desc_list
, 0);
1324 static void dei_error(struct vpe_ctx
*ctx
)
1326 dev_warn(ctx
->dev
->v4l2_dev
.dev
,
1327 "received DEI error interrupt\n");
1330 static void ds1_uv_error(struct vpe_ctx
*ctx
)
1332 dev_warn(ctx
->dev
->v4l2_dev
.dev
,
1333 "received downsampler error interrupt\n");
1336 static irqreturn_t
vpe_irq(int irq_vpe
, void *data
)
1338 struct vpe_dev
*dev
= (struct vpe_dev
*)data
;
1339 struct vpe_ctx
*ctx
;
1340 struct vpe_q_data
*d_q_data
;
1341 struct vb2_v4l2_buffer
*s_vb
, *d_vb
;
1342 unsigned long flags
;
1344 bool list_complete
= false;
1346 irqst0
= read_reg(dev
, VPE_INT0_STATUS0
);
1348 write_reg(dev
, VPE_INT0_STATUS0_CLR
, irqst0
);
1349 vpe_dbg(dev
, "INT0_STATUS0 = 0x%08x\n", irqst0
);
1352 irqst1
= read_reg(dev
, VPE_INT0_STATUS1
);
1354 write_reg(dev
, VPE_INT0_STATUS1_CLR
, irqst1
);
1355 vpe_dbg(dev
, "INT0_STATUS1 = 0x%08x\n", irqst1
);
1358 ctx
= v4l2_m2m_get_curr_priv(dev
->m2m_dev
);
1360 vpe_err(dev
, "instance released before end of transaction\n");
1365 if (irqst1
& VPE_DEI_ERROR_INT
) {
1366 irqst1
&= ~VPE_DEI_ERROR_INT
;
1369 if (irqst1
& VPE_DS1_UV_ERROR_INT
) {
1370 irqst1
&= ~VPE_DS1_UV_ERROR_INT
;
1376 if (irqst0
& VPE_INT0_LIST0_COMPLETE
)
1377 vpdma_clear_list_stat(ctx
->dev
->vpdma
, 0, 0);
1379 irqst0
&= ~(VPE_INT0_LIST0_COMPLETE
);
1380 list_complete
= true;
1383 if (irqst0
| irqst1
) {
1384 dev_warn(dev
->v4l2_dev
.dev
, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1389 * Setup next operation only when list complete IRQ occurs
1390 * otherwise, skip the following code
1397 vpdma_unmap_desc_buf(dev
->vpdma
, &ctx
->desc_list
.buf
);
1398 vpdma_unmap_desc_buf(dev
->vpdma
, &ctx
->mmr_adb
);
1399 vpdma_unmap_desc_buf(dev
->vpdma
, &ctx
->sc_coeff_h
);
1400 vpdma_unmap_desc_buf(dev
->vpdma
, &ctx
->sc_coeff_v
);
1402 vpdma_reset_desc_list(&ctx
->desc_list
);
1404 /* the previous dst mv buffer becomes the next src mv buffer */
1405 ctx
->src_mv_buf_selector
= !ctx
->src_mv_buf_selector
;
1410 s_vb
= ctx
->src_vbs
[0];
1413 d_vb
->flags
= s_vb
->flags
;
1414 d_vb
->vb2_buf
.timestamp
= s_vb
->vb2_buf
.timestamp
;
1416 if (s_vb
->flags
& V4L2_BUF_FLAG_TIMECODE
)
1417 d_vb
->timecode
= s_vb
->timecode
;
1419 d_vb
->sequence
= ctx
->sequence
;
1420 s_vb
->sequence
= ctx
->sequence
;
1422 d_q_data
= &ctx
->q_data
[Q_DATA_DST
];
1423 if (d_q_data
->flags
& Q_IS_INTERLACED
) {
1424 d_vb
->field
= ctx
->field
;
1425 if (ctx
->field
== V4L2_FIELD_BOTTOM
) {
1427 ctx
->field
= V4L2_FIELD_TOP
;
1429 WARN_ON(ctx
->field
!= V4L2_FIELD_TOP
);
1430 ctx
->field
= V4L2_FIELD_BOTTOM
;
1433 d_vb
->field
= V4L2_FIELD_NONE
;
1437 if (ctx
->deinterlacing
) {
1439 * Allow source buffer to be dequeued only if it won't be used
1440 * in the next iteration. All vbs are initialized to first
1441 * buffer and we are shifting buffers every iteration, for the
1442 * first two iterations, no buffer will be dequeued.
1443 * This ensures that driver will keep (n-2)th (n-1)th and (n)th
1444 * field when deinterlacing is enabled
1446 if (ctx
->src_vbs
[2] != ctx
->src_vbs
[1])
1447 s_vb
= ctx
->src_vbs
[2];
1452 spin_lock_irqsave(&dev
->lock
, flags
);
1455 v4l2_m2m_buf_done(s_vb
, VB2_BUF_STATE_DONE
);
1457 v4l2_m2m_buf_done(d_vb
, VB2_BUF_STATE_DONE
);
1459 spin_unlock_irqrestore(&dev
->lock
, flags
);
1461 if (ctx
->deinterlacing
) {
1462 ctx
->src_vbs
[2] = ctx
->src_vbs
[1];
1463 ctx
->src_vbs
[1] = ctx
->src_vbs
[0];
1467 * Since the vb2_buf_done has already been called fir therse
1468 * buffer we can now NULL them out so that we won't try
1469 * to clean out stray pointer later on.
1471 ctx
->src_vbs
[0] = NULL
;
1474 ctx
->bufs_completed
++;
1475 if (ctx
->bufs_completed
< ctx
->bufs_per_job
&& job_ready(ctx
)) {
1481 vpe_dbg(ctx
->dev
, "finishing transaction\n");
1482 ctx
->bufs_completed
= 0;
1483 v4l2_m2m_job_finish(dev
->m2m_dev
, ctx
->fh
.m2m_ctx
);
1491 static int vpe_querycap(struct file
*file
, void *priv
,
1492 struct v4l2_capability
*cap
)
1494 strscpy(cap
->driver
, VPE_MODULE_NAME
, sizeof(cap
->driver
));
1495 strscpy(cap
->card
, VPE_MODULE_NAME
, sizeof(cap
->card
));
1496 snprintf(cap
->bus_info
, sizeof(cap
->bus_info
), "platform:%s",
1501 static int __enum_fmt(struct v4l2_fmtdesc
*f
, u32 type
)
1504 struct vpe_fmt
*fmt
= NULL
;
1507 for (i
= 0; i
< ARRAY_SIZE(vpe_formats
); ++i
) {
1508 if (vpe_formats
[i
].types
& type
) {
1509 if (index
== f
->index
) {
1510 fmt
= &vpe_formats
[i
];
1520 f
->pixelformat
= fmt
->fourcc
;
1524 static int vpe_enum_fmt(struct file
*file
, void *priv
,
1525 struct v4l2_fmtdesc
*f
)
1527 if (V4L2_TYPE_IS_OUTPUT(f
->type
))
1528 return __enum_fmt(f
, VPE_FMT_TYPE_OUTPUT
);
1530 return __enum_fmt(f
, VPE_FMT_TYPE_CAPTURE
);
1533 static int vpe_g_fmt(struct file
*file
, void *priv
, struct v4l2_format
*f
)
1535 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
1536 struct vpe_ctx
*ctx
= file2ctx(file
);
1537 struct vb2_queue
*vq
;
1538 struct vpe_q_data
*q_data
;
1541 vq
= v4l2_m2m_get_vq(ctx
->fh
.m2m_ctx
, f
->type
);
1545 q_data
= get_q_data(ctx
, f
->type
);
1547 pix
->width
= q_data
->width
;
1548 pix
->height
= q_data
->height
;
1549 pix
->pixelformat
= q_data
->fmt
->fourcc
;
1550 pix
->field
= q_data
->field
;
1552 if (V4L2_TYPE_IS_OUTPUT(f
->type
)) {
1553 pix
->colorspace
= q_data
->colorspace
;
1555 struct vpe_q_data
*s_q_data
;
1557 /* get colorspace from the source queue */
1558 s_q_data
= get_q_data(ctx
, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
);
1560 pix
->colorspace
= s_q_data
->colorspace
;
1563 pix
->num_planes
= q_data
->nplanes
;
1565 for (i
= 0; i
< pix
->num_planes
; i
++) {
1566 pix
->plane_fmt
[i
].bytesperline
= q_data
->bytesperline
[i
];
1567 pix
->plane_fmt
[i
].sizeimage
= q_data
->sizeimage
[i
];
1573 static int __vpe_try_fmt(struct vpe_ctx
*ctx
, struct v4l2_format
*f
,
1574 struct vpe_fmt
*fmt
, int type
)
1576 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
1577 struct v4l2_plane_pix_format
*plane_fmt
;
1578 unsigned int w_align
;
1579 int i
, depth
, depth_bytes
, height
;
1580 unsigned int stride
= 0;
1582 if (!fmt
|| !(fmt
->types
& type
)) {
1583 vpe_dbg(ctx
->dev
, "Fourcc format (0x%08x) invalid.\n",
1585 fmt
= __find_format(V4L2_PIX_FMT_YUYV
);
1588 if (pix
->field
!= V4L2_FIELD_NONE
&& pix
->field
!= V4L2_FIELD_ALTERNATE
1589 && pix
->field
!= V4L2_FIELD_SEQ_TB
)
1590 pix
->field
= V4L2_FIELD_NONE
;
1592 depth
= fmt
->vpdma_fmt
[VPE_LUMA
]->depth
;
1595 * the line stride should 16 byte aligned for VPDMA to work, based on
1596 * the bytes per pixel, figure out how much the width should be aligned
1597 * to make sure line stride is 16 byte aligned
1599 depth_bytes
= depth
>> 3;
1601 if (depth_bytes
== 3) {
1603 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1604 * really help in ensuring line stride is 16 byte aligned
1609 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1610 * can ensure a line stride alignment of 16 bytes. For example,
1611 * if bpp is 2, then the line stride can be 16 byte aligned if
1612 * the width is 8 byte aligned
1616 * HACK: using order_base_2() here causes lots of asm output
1617 * errors with smatch, on i386:
1618 * ./arch/x86/include/asm/bitops.h:457:22:
1619 * warning: asm output is not an lvalue
1620 * Perhaps some gcc optimization is doing the wrong thing
1622 * Let's get rid of them by doing the calculus on two steps
1624 w_align
= roundup_pow_of_two(VPDMA_DESC_ALIGN
/ depth_bytes
);
1625 w_align
= ilog2(w_align
);
1628 v4l_bound_align_image(&pix
->width
, MIN_W
, MAX_W
, w_align
,
1629 &pix
->height
, MIN_H
, MAX_H
, H_ALIGN
,
1632 if (!pix
->num_planes
)
1633 pix
->num_planes
= fmt
->coplanar
? 2 : 1;
1634 else if (pix
->num_planes
> 1 && !fmt
->coplanar
)
1635 pix
->num_planes
= 1;
1637 pix
->pixelformat
= fmt
->fourcc
;
1640 * For the actual image parameters, we need to consider the field
1641 * height of the image for SEQ_TB buffers.
1643 if (pix
->field
== V4L2_FIELD_SEQ_TB
)
1644 height
= pix
->height
/ 2;
1646 height
= pix
->height
;
1648 if (!pix
->colorspace
) {
1649 if (fmt
->fourcc
== V4L2_PIX_FMT_RGB24
||
1650 fmt
->fourcc
== V4L2_PIX_FMT_BGR24
||
1651 fmt
->fourcc
== V4L2_PIX_FMT_RGB32
||
1652 fmt
->fourcc
== V4L2_PIX_FMT_BGR32
) {
1653 pix
->colorspace
= V4L2_COLORSPACE_SRGB
;
1655 if (height
> 1280) /* HD */
1656 pix
->colorspace
= V4L2_COLORSPACE_REC709
;
1658 pix
->colorspace
= V4L2_COLORSPACE_SMPTE170M
;
1662 memset(pix
->reserved
, 0, sizeof(pix
->reserved
));
1663 for (i
= 0; i
< pix
->num_planes
; i
++) {
1664 plane_fmt
= &pix
->plane_fmt
[i
];
1665 depth
= fmt
->vpdma_fmt
[i
]->depth
;
1667 stride
= (pix
->width
* fmt
->vpdma_fmt
[VPE_LUMA
]->depth
) >> 3;
1668 if (stride
> plane_fmt
->bytesperline
)
1669 plane_fmt
->bytesperline
= stride
;
1671 plane_fmt
->bytesperline
= ALIGN(plane_fmt
->bytesperline
,
1672 VPDMA_STRIDE_ALIGN
);
1674 if (i
== VPE_LUMA
) {
1675 plane_fmt
->sizeimage
= pix
->height
*
1676 plane_fmt
->bytesperline
;
1678 if (pix
->num_planes
== 1 && fmt
->coplanar
)
1679 plane_fmt
->sizeimage
+= pix
->height
*
1680 plane_fmt
->bytesperline
*
1681 fmt
->vpdma_fmt
[VPE_CHROMA
]->depth
>> 3;
1683 } else { /* i == VIP_CHROMA */
1684 plane_fmt
->sizeimage
= (pix
->height
*
1685 plane_fmt
->bytesperline
*
1688 memset(plane_fmt
->reserved
, 0, sizeof(plane_fmt
->reserved
));
1694 static int vpe_try_fmt(struct file
*file
, void *priv
, struct v4l2_format
*f
)
1696 struct vpe_ctx
*ctx
= file2ctx(file
);
1697 struct vpe_fmt
*fmt
= find_format(f
);
1699 if (V4L2_TYPE_IS_OUTPUT(f
->type
))
1700 return __vpe_try_fmt(ctx
, f
, fmt
, VPE_FMT_TYPE_OUTPUT
);
1702 return __vpe_try_fmt(ctx
, f
, fmt
, VPE_FMT_TYPE_CAPTURE
);
1705 static int __vpe_s_fmt(struct vpe_ctx
*ctx
, struct v4l2_format
*f
)
1707 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
1708 struct v4l2_plane_pix_format
*plane_fmt
;
1709 struct vpe_q_data
*q_data
;
1710 struct vb2_queue
*vq
;
1713 vq
= v4l2_m2m_get_vq(ctx
->fh
.m2m_ctx
, f
->type
);
1717 if (vb2_is_busy(vq
)) {
1718 vpe_err(ctx
->dev
, "queue busy\n");
1722 q_data
= get_q_data(ctx
, f
->type
);
1726 q_data
->fmt
= find_format(f
);
1727 q_data
->width
= pix
->width
;
1728 q_data
->height
= pix
->height
;
1729 q_data
->colorspace
= pix
->colorspace
;
1730 q_data
->field
= pix
->field
;
1731 q_data
->nplanes
= pix
->num_planes
;
1733 for (i
= 0; i
< pix
->num_planes
; i
++) {
1734 plane_fmt
= &pix
->plane_fmt
[i
];
1736 q_data
->bytesperline
[i
] = plane_fmt
->bytesperline
;
1737 q_data
->sizeimage
[i
] = plane_fmt
->sizeimage
;
1740 q_data
->c_rect
.left
= 0;
1741 q_data
->c_rect
.top
= 0;
1742 q_data
->c_rect
.width
= q_data
->width
;
1743 q_data
->c_rect
.height
= q_data
->height
;
1745 if (q_data
->field
== V4L2_FIELD_ALTERNATE
)
1746 q_data
->flags
|= Q_DATA_INTERLACED_ALTERNATE
;
1747 else if (q_data
->field
== V4L2_FIELD_SEQ_TB
)
1748 q_data
->flags
|= Q_DATA_INTERLACED_SEQ_TB
;
1750 q_data
->flags
&= ~Q_IS_INTERLACED
;
1752 /* the crop height is halved for the case of SEQ_TB buffers */
1753 if (q_data
->flags
& Q_DATA_INTERLACED_SEQ_TB
)
1754 q_data
->c_rect
.height
/= 2;
1756 vpe_dbg(ctx
->dev
, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1757 f
->type
, q_data
->width
, q_data
->height
, q_data
->fmt
->fourcc
,
1758 q_data
->bytesperline
[VPE_LUMA
]);
1759 if (q_data
->nplanes
== 2)
1760 vpe_dbg(ctx
->dev
, " bpl_uv %d\n",
1761 q_data
->bytesperline
[VPE_CHROMA
]);
1766 static int vpe_s_fmt(struct file
*file
, void *priv
, struct v4l2_format
*f
)
1769 struct vpe_ctx
*ctx
= file2ctx(file
);
1771 ret
= vpe_try_fmt(file
, priv
, f
);
1775 ret
= __vpe_s_fmt(ctx
, f
);
1779 if (V4L2_TYPE_IS_OUTPUT(f
->type
))
1780 set_src_registers(ctx
);
1782 set_dst_registers(ctx
);
1784 return set_srcdst_params(ctx
);
1787 static int __vpe_try_selection(struct vpe_ctx
*ctx
, struct v4l2_selection
*s
)
1789 struct vpe_q_data
*q_data
;
1792 if ((s
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE
) &&
1793 (s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT
))
1796 q_data
= get_q_data(ctx
, s
->type
);
1800 switch (s
->target
) {
1801 case V4L2_SEL_TGT_COMPOSE
:
1803 * COMPOSE target is only valid for capture buffer type, return
1804 * error for output buffer type
1806 if (s
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
)
1809 case V4L2_SEL_TGT_CROP
:
1811 * CROP target is only valid for output buffer type, return
1812 * error for capture buffer type
1814 if (s
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
)
1818 * bound and default crop/compose targets are invalid targets to
1826 * For SEQ_TB buffers, crop height should be less than the height of
1827 * the field height, not the buffer height
1829 if (q_data
->flags
& Q_DATA_INTERLACED_SEQ_TB
)
1830 height
= q_data
->height
/ 2;
1832 height
= q_data
->height
;
1834 if (s
->r
.top
< 0 || s
->r
.left
< 0) {
1835 vpe_err(ctx
->dev
, "negative values for top and left\n");
1836 s
->r
.top
= s
->r
.left
= 0;
1839 v4l_bound_align_image(&s
->r
.width
, MIN_W
, q_data
->width
, 1,
1840 &s
->r
.height
, MIN_H
, height
, H_ALIGN
, S_ALIGN
);
1842 /* adjust left/top if cropping rectangle is out of bounds */
1843 if (s
->r
.left
+ s
->r
.width
> q_data
->width
)
1844 s
->r
.left
= q_data
->width
- s
->r
.width
;
1845 if (s
->r
.top
+ s
->r
.height
> q_data
->height
)
1846 s
->r
.top
= q_data
->height
- s
->r
.height
;
1851 static int vpe_g_selection(struct file
*file
, void *fh
,
1852 struct v4l2_selection
*s
)
1854 struct vpe_ctx
*ctx
= file2ctx(file
);
1855 struct vpe_q_data
*q_data
;
1856 bool use_c_rect
= false;
1858 if ((s
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE
) &&
1859 (s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT
))
1862 q_data
= get_q_data(ctx
, s
->type
);
1866 switch (s
->target
) {
1867 case V4L2_SEL_TGT_COMPOSE_DEFAULT
:
1868 case V4L2_SEL_TGT_COMPOSE_BOUNDS
:
1869 if (s
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
)
1872 case V4L2_SEL_TGT_CROP_BOUNDS
:
1873 case V4L2_SEL_TGT_CROP_DEFAULT
:
1874 if (s
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
)
1877 case V4L2_SEL_TGT_COMPOSE
:
1878 if (s
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
)
1882 case V4L2_SEL_TGT_CROP
:
1883 if (s
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
)
1893 * for CROP/COMPOSE target type, return c_rect params from the
1894 * respective buffer type
1896 s
->r
= q_data
->c_rect
;
1899 * for DEFAULT/BOUNDS target type, return width and height from
1900 * S_FMT of the respective buffer type
1904 s
->r
.width
= q_data
->width
;
1905 s
->r
.height
= q_data
->height
;
1912 static int vpe_s_selection(struct file
*file
, void *fh
,
1913 struct v4l2_selection
*s
)
1915 struct vpe_ctx
*ctx
= file2ctx(file
);
1916 struct vpe_q_data
*q_data
;
1917 struct v4l2_selection sel
= *s
;
1920 ret
= __vpe_try_selection(ctx
, &sel
);
1924 q_data
= get_q_data(ctx
, sel
.type
);
1928 if ((q_data
->c_rect
.left
== sel
.r
.left
) &&
1929 (q_data
->c_rect
.top
== sel
.r
.top
) &&
1930 (q_data
->c_rect
.width
== sel
.r
.width
) &&
1931 (q_data
->c_rect
.height
== sel
.r
.height
)) {
1933 "requested crop/compose values are already set\n");
1937 q_data
->c_rect
= sel
.r
;
1939 return set_srcdst_params(ctx
);
1943 * defines number of buffers/frames a context can process with VPE before
1944 * switching to a different context. default value is 1 buffer per context
1946 #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1948 static int vpe_s_ctrl(struct v4l2_ctrl
*ctrl
)
1950 struct vpe_ctx
*ctx
=
1951 container_of(ctrl
->handler
, struct vpe_ctx
, hdl
);
1954 case V4L2_CID_VPE_BUFS_PER_JOB
:
1955 ctx
->bufs_per_job
= ctrl
->val
;
1959 vpe_err(ctx
->dev
, "Invalid control\n");
1966 static const struct v4l2_ctrl_ops vpe_ctrl_ops
= {
1967 .s_ctrl
= vpe_s_ctrl
,
1970 static const struct v4l2_ioctl_ops vpe_ioctl_ops
= {
1971 .vidioc_querycap
= vpe_querycap
,
1973 .vidioc_enum_fmt_vid_cap
= vpe_enum_fmt
,
1974 .vidioc_g_fmt_vid_cap_mplane
= vpe_g_fmt
,
1975 .vidioc_try_fmt_vid_cap_mplane
= vpe_try_fmt
,
1976 .vidioc_s_fmt_vid_cap_mplane
= vpe_s_fmt
,
1978 .vidioc_enum_fmt_vid_out
= vpe_enum_fmt
,
1979 .vidioc_g_fmt_vid_out_mplane
= vpe_g_fmt
,
1980 .vidioc_try_fmt_vid_out_mplane
= vpe_try_fmt
,
1981 .vidioc_s_fmt_vid_out_mplane
= vpe_s_fmt
,
1983 .vidioc_g_selection
= vpe_g_selection
,
1984 .vidioc_s_selection
= vpe_s_selection
,
1986 .vidioc_reqbufs
= v4l2_m2m_ioctl_reqbufs
,
1987 .vidioc_querybuf
= v4l2_m2m_ioctl_querybuf
,
1988 .vidioc_qbuf
= v4l2_m2m_ioctl_qbuf
,
1989 .vidioc_dqbuf
= v4l2_m2m_ioctl_dqbuf
,
1990 .vidioc_expbuf
= v4l2_m2m_ioctl_expbuf
,
1991 .vidioc_streamon
= v4l2_m2m_ioctl_streamon
,
1992 .vidioc_streamoff
= v4l2_m2m_ioctl_streamoff
,
1994 .vidioc_subscribe_event
= v4l2_ctrl_subscribe_event
,
1995 .vidioc_unsubscribe_event
= v4l2_event_unsubscribe
,
2001 static int vpe_queue_setup(struct vb2_queue
*vq
,
2002 unsigned int *nbuffers
, unsigned int *nplanes
,
2003 unsigned int sizes
[], struct device
*alloc_devs
[])
2006 struct vpe_ctx
*ctx
= vb2_get_drv_priv(vq
);
2007 struct vpe_q_data
*q_data
;
2009 q_data
= get_q_data(ctx
, vq
->type
);
2011 *nplanes
= q_data
->nplanes
;
2013 for (i
= 0; i
< *nplanes
; i
++)
2014 sizes
[i
] = q_data
->sizeimage
[i
];
2016 vpe_dbg(ctx
->dev
, "get %d buffer(s) of size %d", *nbuffers
,
2018 if (q_data
->nplanes
== 2)
2019 vpe_dbg(ctx
->dev
, " and %d\n", sizes
[VPE_CHROMA
]);
2024 static int vpe_buf_prepare(struct vb2_buffer
*vb
)
2026 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
2027 struct vpe_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
2028 struct vpe_q_data
*q_data
;
2031 vpe_dbg(ctx
->dev
, "type: %d\n", vb
->vb2_queue
->type
);
2033 q_data
= get_q_data(ctx
, vb
->vb2_queue
->type
);
2034 num_planes
= q_data
->nplanes
;
2036 if (vb
->vb2_queue
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
) {
2037 if (!(q_data
->flags
& Q_IS_INTERLACED
)) {
2038 vbuf
->field
= V4L2_FIELD_NONE
;
2040 if (vbuf
->field
!= V4L2_FIELD_TOP
&&
2041 vbuf
->field
!= V4L2_FIELD_BOTTOM
&&
2042 vbuf
->field
!= V4L2_FIELD_SEQ_TB
)
2047 for (i
= 0; i
< num_planes
; i
++) {
2048 if (vb2_plane_size(vb
, i
) < q_data
->sizeimage
[i
]) {
2050 "data will not fit into plane (%lu < %lu)\n",
2051 vb2_plane_size(vb
, i
),
2052 (long) q_data
->sizeimage
[i
]);
2057 for (i
= 0; i
< num_planes
; i
++)
2058 vb2_set_plane_payload(vb
, i
, q_data
->sizeimage
[i
]);
2063 static void vpe_buf_queue(struct vb2_buffer
*vb
)
2065 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
2066 struct vpe_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
2068 v4l2_m2m_buf_queue(ctx
->fh
.m2m_ctx
, vbuf
);
2071 static int check_srcdst_sizes(struct vpe_ctx
*ctx
)
2073 struct vpe_q_data
*s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
2074 struct vpe_q_data
*d_q_data
= &ctx
->q_data
[Q_DATA_DST
];
2075 unsigned int src_w
= s_q_data
->c_rect
.width
;
2076 unsigned int src_h
= s_q_data
->c_rect
.height
;
2077 unsigned int dst_w
= d_q_data
->c_rect
.width
;
2078 unsigned int dst_h
= d_q_data
->c_rect
.height
;
2080 if (src_w
== dst_w
&& src_h
== dst_h
)
2083 if (src_h
<= SC_MAX_PIXEL_HEIGHT
&&
2084 src_w
<= SC_MAX_PIXEL_WIDTH
&&
2085 dst_h
<= SC_MAX_PIXEL_HEIGHT
&&
2086 dst_w
<= SC_MAX_PIXEL_WIDTH
)
2092 static void vpe_return_all_buffers(struct vpe_ctx
*ctx
, struct vb2_queue
*q
,
2093 enum vb2_buffer_state state
)
2095 struct vb2_v4l2_buffer
*vb
;
2096 unsigned long flags
;
2099 if (V4L2_TYPE_IS_OUTPUT(q
->type
))
2100 vb
= v4l2_m2m_src_buf_remove(ctx
->fh
.m2m_ctx
);
2102 vb
= v4l2_m2m_dst_buf_remove(ctx
->fh
.m2m_ctx
);
2105 spin_lock_irqsave(&ctx
->dev
->lock
, flags
);
2106 v4l2_m2m_buf_done(vb
, state
);
2107 spin_unlock_irqrestore(&ctx
->dev
->lock
, flags
);
2111 * Cleanup the in-transit vb2 buffers that have been
2112 * removed from their respective queue already but for
2113 * which procecessing has not been completed yet.
2115 if (V4L2_TYPE_IS_OUTPUT(q
->type
)) {
2116 spin_lock_irqsave(&ctx
->dev
->lock
, flags
);
2118 if (ctx
->src_vbs
[2])
2119 v4l2_m2m_buf_done(ctx
->src_vbs
[2], state
);
2121 if (ctx
->src_vbs
[1] && (ctx
->src_vbs
[1] != ctx
->src_vbs
[2]))
2122 v4l2_m2m_buf_done(ctx
->src_vbs
[1], state
);
2124 if (ctx
->src_vbs
[0] &&
2125 (ctx
->src_vbs
[0] != ctx
->src_vbs
[1]) &&
2126 (ctx
->src_vbs
[0] != ctx
->src_vbs
[2]))
2127 v4l2_m2m_buf_done(ctx
->src_vbs
[0], state
);
2129 ctx
->src_vbs
[2] = NULL
;
2130 ctx
->src_vbs
[1] = NULL
;
2131 ctx
->src_vbs
[0] = NULL
;
2133 spin_unlock_irqrestore(&ctx
->dev
->lock
, flags
);
2136 spin_lock_irqsave(&ctx
->dev
->lock
, flags
);
2138 v4l2_m2m_buf_done(ctx
->dst_vb
, state
);
2140 spin_unlock_irqrestore(&ctx
->dev
->lock
, flags
);
2145 static int vpe_start_streaming(struct vb2_queue
*q
, unsigned int count
)
2147 struct vpe_ctx
*ctx
= vb2_get_drv_priv(q
);
2149 /* Check any of the size exceed maximum scaling sizes */
2150 if (check_srcdst_sizes(ctx
)) {
2152 "Conversion setup failed, check source and destination parameters\n"
2154 vpe_return_all_buffers(ctx
, q
, VB2_BUF_STATE_QUEUED
);
2158 if (ctx
->deinterlacing
)
2159 config_edi_input_mode(ctx
, 0x0);
2161 if (ctx
->sequence
!= 0)
2162 set_srcdst_params(ctx
);
2167 static void vpe_stop_streaming(struct vb2_queue
*q
)
2169 struct vpe_ctx
*ctx
= vb2_get_drv_priv(q
);
2171 vpe_dump_regs(ctx
->dev
);
2172 vpdma_dump_regs(ctx
->dev
->vpdma
);
2174 vpe_return_all_buffers(ctx
, q
, VB2_BUF_STATE_ERROR
);
2177 static const struct vb2_ops vpe_qops
= {
2178 .queue_setup
= vpe_queue_setup
,
2179 .buf_prepare
= vpe_buf_prepare
,
2180 .buf_queue
= vpe_buf_queue
,
2181 .wait_prepare
= vb2_ops_wait_prepare
,
2182 .wait_finish
= vb2_ops_wait_finish
,
2183 .start_streaming
= vpe_start_streaming
,
2184 .stop_streaming
= vpe_stop_streaming
,
2187 static int queue_init(void *priv
, struct vb2_queue
*src_vq
,
2188 struct vb2_queue
*dst_vq
)
2190 struct vpe_ctx
*ctx
= priv
;
2191 struct vpe_dev
*dev
= ctx
->dev
;
2194 memset(src_vq
, 0, sizeof(*src_vq
));
2195 src_vq
->type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
2196 src_vq
->io_modes
= VB2_MMAP
| VB2_DMABUF
;
2197 src_vq
->drv_priv
= ctx
;
2198 src_vq
->buf_struct_size
= sizeof(struct v4l2_m2m_buffer
);
2199 src_vq
->ops
= &vpe_qops
;
2200 src_vq
->mem_ops
= &vb2_dma_contig_memops
;
2201 src_vq
->timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
2202 src_vq
->lock
= &dev
->dev_mutex
;
2203 src_vq
->dev
= dev
->v4l2_dev
.dev
;
2205 ret
= vb2_queue_init(src_vq
);
2209 memset(dst_vq
, 0, sizeof(*dst_vq
));
2210 dst_vq
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
2211 dst_vq
->io_modes
= VB2_MMAP
| VB2_DMABUF
;
2212 dst_vq
->drv_priv
= ctx
;
2213 dst_vq
->buf_struct_size
= sizeof(struct v4l2_m2m_buffer
);
2214 dst_vq
->ops
= &vpe_qops
;
2215 dst_vq
->mem_ops
= &vb2_dma_contig_memops
;
2216 dst_vq
->timestamp_flags
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
2217 dst_vq
->lock
= &dev
->dev_mutex
;
2218 dst_vq
->dev
= dev
->v4l2_dev
.dev
;
2220 return vb2_queue_init(dst_vq
);
2223 static const struct v4l2_ctrl_config vpe_bufs_per_job
= {
2224 .ops
= &vpe_ctrl_ops
,
2225 .id
= V4L2_CID_VPE_BUFS_PER_JOB
,
2226 .name
= "Buffers Per Transaction",
2227 .type
= V4L2_CTRL_TYPE_INTEGER
,
2228 .def
= VPE_DEF_BUFS_PER_JOB
,
2230 .max
= VIDEO_MAX_FRAME
,
2237 static int vpe_open(struct file
*file
)
2239 struct vpe_dev
*dev
= video_drvdata(file
);
2240 struct vpe_q_data
*s_q_data
;
2241 struct v4l2_ctrl_handler
*hdl
;
2242 struct vpe_ctx
*ctx
;
2245 vpe_dbg(dev
, "vpe_open\n");
2247 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
2253 if (mutex_lock_interruptible(&dev
->dev_mutex
)) {
2258 ret
= vpdma_create_desc_list(&ctx
->desc_list
, VPE_DESC_LIST_SIZE
,
2259 VPDMA_LIST_TYPE_NORMAL
);
2263 ret
= vpdma_alloc_desc_buf(&ctx
->mmr_adb
, sizeof(struct vpe_mmr_adb
));
2265 goto free_desc_list
;
2267 ret
= vpdma_alloc_desc_buf(&ctx
->sc_coeff_h
, SC_COEF_SRAM_SIZE
);
2271 ret
= vpdma_alloc_desc_buf(&ctx
->sc_coeff_v
, SC_COEF_SRAM_SIZE
);
2277 v4l2_fh_init(&ctx
->fh
, video_devdata(file
));
2278 file
->private_data
= &ctx
->fh
;
2281 v4l2_ctrl_handler_init(hdl
, 1);
2282 v4l2_ctrl_new_custom(hdl
, &vpe_bufs_per_job
, NULL
);
2287 ctx
->fh
.ctrl_handler
= hdl
;
2288 v4l2_ctrl_handler_setup(hdl
);
2290 s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
2291 s_q_data
->fmt
= &vpe_formats
[2];
2292 s_q_data
->width
= 1920;
2293 s_q_data
->height
= 1080;
2294 s_q_data
->nplanes
= 1;
2295 s_q_data
->bytesperline
[VPE_LUMA
] = (s_q_data
->width
*
2296 s_q_data
->fmt
->vpdma_fmt
[VPE_LUMA
]->depth
) >> 3;
2297 s_q_data
->sizeimage
[VPE_LUMA
] = (s_q_data
->bytesperline
[VPE_LUMA
] *
2299 s_q_data
->colorspace
= V4L2_COLORSPACE_REC709
;
2300 s_q_data
->field
= V4L2_FIELD_NONE
;
2301 s_q_data
->c_rect
.left
= 0;
2302 s_q_data
->c_rect
.top
= 0;
2303 s_q_data
->c_rect
.width
= s_q_data
->width
;
2304 s_q_data
->c_rect
.height
= s_q_data
->height
;
2305 s_q_data
->flags
= 0;
2307 ctx
->q_data
[Q_DATA_DST
] = *s_q_data
;
2309 set_dei_shadow_registers(ctx
);
2310 set_src_registers(ctx
);
2311 set_dst_registers(ctx
);
2312 ret
= set_srcdst_params(ctx
);
2316 ctx
->fh
.m2m_ctx
= v4l2_m2m_ctx_init(dev
->m2m_dev
, ctx
, &queue_init
);
2318 if (IS_ERR(ctx
->fh
.m2m_ctx
)) {
2319 ret
= PTR_ERR(ctx
->fh
.m2m_ctx
);
2323 v4l2_fh_add(&ctx
->fh
);
2326 * for now, just report the creation of the first instance, we can later
2327 * optimize the driver to enable or disable clocks when the first
2328 * instance is created or the last instance released
2330 if (atomic_inc_return(&dev
->num_instances
) == 1)
2331 vpe_dbg(dev
, "first instance created\n");
2333 ctx
->bufs_per_job
= VPE_DEF_BUFS_PER_JOB
;
2335 ctx
->load_mmrs
= true;
2337 vpe_dbg(dev
, "created instance %p, m2m_ctx: %p\n",
2338 ctx
, ctx
->fh
.m2m_ctx
);
2340 mutex_unlock(&dev
->dev_mutex
);
2344 v4l2_ctrl_handler_free(hdl
);
2345 v4l2_fh_exit(&ctx
->fh
);
2346 vpdma_free_desc_buf(&ctx
->sc_coeff_v
);
2348 vpdma_free_desc_buf(&ctx
->sc_coeff_h
);
2350 vpdma_free_desc_buf(&ctx
->mmr_adb
);
2352 vpdma_free_desc_list(&ctx
->desc_list
);
2354 mutex_unlock(&dev
->dev_mutex
);
2360 static int vpe_release(struct file
*file
)
2362 struct vpe_dev
*dev
= video_drvdata(file
);
2363 struct vpe_ctx
*ctx
= file2ctx(file
);
2365 vpe_dbg(dev
, "releasing instance %p\n", ctx
);
2367 mutex_lock(&dev
->dev_mutex
);
2368 free_mv_buffers(ctx
);
2369 vpdma_free_desc_list(&ctx
->desc_list
);
2370 vpdma_free_desc_buf(&ctx
->mmr_adb
);
2372 vpdma_free_desc_buf(&ctx
->sc_coeff_v
);
2373 vpdma_free_desc_buf(&ctx
->sc_coeff_h
);
2375 v4l2_fh_del(&ctx
->fh
);
2376 v4l2_fh_exit(&ctx
->fh
);
2377 v4l2_ctrl_handler_free(&ctx
->hdl
);
2378 v4l2_m2m_ctx_release(ctx
->fh
.m2m_ctx
);
2383 * for now, just report the release of the last instance, we can later
2384 * optimize the driver to enable or disable clocks when the first
2385 * instance is created or the last instance released
2387 if (atomic_dec_return(&dev
->num_instances
) == 0)
2388 vpe_dbg(dev
, "last instance released\n");
2390 mutex_unlock(&dev
->dev_mutex
);
2395 static const struct v4l2_file_operations vpe_fops
= {
2396 .owner
= THIS_MODULE
,
2398 .release
= vpe_release
,
2399 .poll
= v4l2_m2m_fop_poll
,
2400 .unlocked_ioctl
= video_ioctl2
,
2401 .mmap
= v4l2_m2m_fop_mmap
,
2404 static const struct video_device vpe_videodev
= {
2405 .name
= VPE_MODULE_NAME
,
2407 .ioctl_ops
= &vpe_ioctl_ops
,
2409 .release
= video_device_release_empty
,
2410 .vfl_dir
= VFL_DIR_M2M
,
2411 .device_caps
= V4L2_CAP_VIDEO_M2M_MPLANE
| V4L2_CAP_STREAMING
,
2414 static const struct v4l2_m2m_ops m2m_ops
= {
2415 .device_run
= device_run
,
2416 .job_ready
= job_ready
,
2417 .job_abort
= job_abort
,
2420 static int vpe_runtime_get(struct platform_device
*pdev
)
2424 dev_dbg(&pdev
->dev
, "vpe_runtime_get\n");
2426 r
= pm_runtime_get_sync(&pdev
->dev
);
2428 return r
< 0 ? r
: 0;
2431 static void vpe_runtime_put(struct platform_device
*pdev
)
2436 dev_dbg(&pdev
->dev
, "vpe_runtime_put\n");
2438 r
= pm_runtime_put_sync(&pdev
->dev
);
2439 WARN_ON(r
< 0 && r
!= -ENOSYS
);
2442 static void vpe_fw_cb(struct platform_device
*pdev
)
2444 struct vpe_dev
*dev
= platform_get_drvdata(pdev
);
2445 struct video_device
*vfd
;
2449 *vfd
= vpe_videodev
;
2450 vfd
->lock
= &dev
->dev_mutex
;
2451 vfd
->v4l2_dev
= &dev
->v4l2_dev
;
2453 ret
= video_register_device(vfd
, VFL_TYPE_GRABBER
, 0);
2455 vpe_err(dev
, "Failed to register video device\n");
2457 vpe_set_clock_enable(dev
, 0);
2458 vpe_runtime_put(pdev
);
2459 pm_runtime_disable(&pdev
->dev
);
2460 v4l2_m2m_release(dev
->m2m_dev
);
2461 v4l2_device_unregister(&dev
->v4l2_dev
);
2466 video_set_drvdata(vfd
, dev
);
2467 dev_info(dev
->v4l2_dev
.dev
, "Device registered as /dev/video%d\n",
2471 static int vpe_probe(struct platform_device
*pdev
)
2473 struct vpe_dev
*dev
;
2476 dev
= devm_kzalloc(&pdev
->dev
, sizeof(*dev
), GFP_KERNEL
);
2480 spin_lock_init(&dev
->lock
);
2482 ret
= v4l2_device_register(&pdev
->dev
, &dev
->v4l2_dev
);
2486 atomic_set(&dev
->num_instances
, 0);
2487 mutex_init(&dev
->dev_mutex
);
2489 dev
->res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
2492 * HACK: we get resource info from device tree in the form of a list of
2493 * VPE sub blocks, the driver currently uses only the base of vpe_top
2494 * for register access, the driver should be changed later to access
2495 * registers based on the sub block base addresses
2497 dev
->base
= devm_ioremap(&pdev
->dev
, dev
->res
->start
, SZ_32K
);
2500 goto v4l2_dev_unreg
;
2503 irq
= platform_get_irq(pdev
, 0);
2504 ret
= devm_request_irq(&pdev
->dev
, irq
, vpe_irq
, 0, VPE_MODULE_NAME
,
2507 goto v4l2_dev_unreg
;
2509 platform_set_drvdata(pdev
, dev
);
2511 dev
->m2m_dev
= v4l2_m2m_init(&m2m_ops
);
2512 if (IS_ERR(dev
->m2m_dev
)) {
2513 vpe_err(dev
, "Failed to init mem2mem device\n");
2514 ret
= PTR_ERR(dev
->m2m_dev
);
2515 goto v4l2_dev_unreg
;
2518 pm_runtime_enable(&pdev
->dev
);
2520 ret
= vpe_runtime_get(pdev
);
2524 /* Perform clk enable followed by reset */
2525 vpe_set_clock_enable(dev
, 1);
2529 func
= read_field_reg(dev
, VPE_PID
, VPE_PID_FUNC_MASK
,
2530 VPE_PID_FUNC_SHIFT
);
2531 vpe_dbg(dev
, "VPE PID function %x\n", func
);
2533 vpe_top_vpdma_reset(dev
);
2535 dev
->sc
= sc_create(pdev
, "sc");
2536 if (IS_ERR(dev
->sc
)) {
2537 ret
= PTR_ERR(dev
->sc
);
2541 dev
->csc
= csc_create(pdev
, "csc");
2542 if (IS_ERR(dev
->csc
)) {
2543 ret
= PTR_ERR(dev
->csc
);
2547 dev
->vpdma
= &dev
->vpdma_data
;
2548 ret
= vpdma_create(pdev
, dev
->vpdma
, vpe_fw_cb
);
2555 vpe_runtime_put(pdev
);
2557 pm_runtime_disable(&pdev
->dev
);
2558 v4l2_m2m_release(dev
->m2m_dev
);
2560 v4l2_device_unregister(&dev
->v4l2_dev
);
2565 static int vpe_remove(struct platform_device
*pdev
)
2567 struct vpe_dev
*dev
= platform_get_drvdata(pdev
);
2569 v4l2_info(&dev
->v4l2_dev
, "Removing " VPE_MODULE_NAME
);
2571 v4l2_m2m_release(dev
->m2m_dev
);
2572 video_unregister_device(&dev
->vfd
);
2573 v4l2_device_unregister(&dev
->v4l2_dev
);
2575 vpe_set_clock_enable(dev
, 0);
2576 vpe_runtime_put(pdev
);
2577 pm_runtime_disable(&pdev
->dev
);
2582 #if defined(CONFIG_OF)
2583 static const struct of_device_id vpe_of_match
[] = {
2585 .compatible
= "ti,vpe",
2589 MODULE_DEVICE_TABLE(of
, vpe_of_match
);
2592 static struct platform_driver vpe_pdrv
= {
2594 .remove
= vpe_remove
,
2596 .name
= VPE_MODULE_NAME
,
2597 .of_match_table
= of_match_ptr(vpe_of_match
),
2601 module_platform_driver(vpe_pdrv
);
2603 MODULE_DESCRIPTION("TI VPE driver");
2604 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2605 MODULE_LICENSE("GPL");