]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/platform/qcom/camss-8x16/camss-vfe.c
Merge remote-tracking branch 'asoc/fix/rcar' into asoc-linus
[mirror_ubuntu-jammy-kernel.git] / drivers / media / platform / qcom / camss-8x16 / camss-vfe.c
1 /*
2 * camss-vfe.c
3 *
4 * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
5 *
6 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
7 * Copyright (C) 2015-2017 Linaro Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18 #include <linux/clk.h>
19 #include <linux/completion.h>
20 #include <linux/interrupt.h>
21 #include <linux/iommu.h>
22 #include <linux/iopoll.h>
23 #include <linux/mutex.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock_types.h>
27 #include <linux/spinlock.h>
28 #include <media/media-entity.h>
29 #include <media/v4l2-device.h>
30 #include <media/v4l2-subdev.h>
31
32 #include "camss-vfe.h"
33 #include "camss.h"
34
35 #define MSM_VFE_NAME "msm_vfe"
36
37 #define vfe_line_array(ptr_line) \
38 ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
39
40 #define to_vfe(ptr_line) \
41 container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line)
42
43 #define VFE_0_HW_VERSION 0x000
44
45 #define VFE_0_GLOBAL_RESET_CMD 0x00c
46 #define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0)
47 #define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1)
48 #define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2)
49 #define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3)
50 #define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4)
51 #define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5)
52 #define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6)
53 #define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7)
54 #define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8)
55
56 #define VFE_0_MODULE_CFG 0x018
57 #define VFE_0_MODULE_CFG_DEMUX (1 << 2)
58 #define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3)
59 #define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23)
60 #define VFE_0_MODULE_CFG_CROP_ENC (1 << 27)
61
62 #define VFE_0_CORE_CFG 0x01c
63 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
64 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
65 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
66 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
67
68 #define VFE_0_IRQ_CMD 0x024
69 #define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0)
70
71 #define VFE_0_IRQ_MASK_0 0x028
72 #define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0)
73 #define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1)
74 #define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
75 #define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
76 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
77 #define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
78 #define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
79 #define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31)
80 #define VFE_0_IRQ_MASK_1 0x02c
81 #define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0)
82 #define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7)
83 #define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8)
84 #define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9))
85 #define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29))
86
87 #define VFE_0_IRQ_CLEAR_0 0x030
88 #define VFE_0_IRQ_CLEAR_1 0x034
89
90 #define VFE_0_IRQ_STATUS_0 0x038
91 #define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0)
92 #define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
93 #define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
94 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
95 #define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
96 #define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
97 #define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31)
98 #define VFE_0_IRQ_STATUS_1 0x03c
99 #define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7)
100 #define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8)
101 #define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29))
102
103 #define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
104 #define VFE_0_VIOLATION_STATUS 0x48
105
106 #define VFE_0_BUS_CMD 0x4c
107 #define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x))
108
109 #define VFE_0_BUS_CFG 0x050
110
111 #define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
112 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1)
113 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
114 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
115 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
116 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
117 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
118 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
119
120 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
121 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
122 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
123 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
124 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
125 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
126 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
127 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2)
128
129 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
130 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
131 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
132 #define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
133 #define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
134 (0x088 + 0x24 * (n))
135 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
136 (0x08c + 0x24 * (n))
137 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
138
139 #define VFE_0_BUS_PING_PONG_STATUS 0x268
140
141 #define VFE_0_BUS_BDG_CMD 0x2c0
142 #define VFE_0_BUS_BDG_CMD_HALT_REQ 1
143
144 #define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
145 #define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
146 #define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
147 #define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
148 #define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
149 #define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
150 #define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
151 #define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
152 #define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
153 #define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
154
155 #define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
156 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
157 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
158 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
159 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
160 #define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2)
161 #define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
162 #define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r)))
163
164 #define VFE_0_CAMIF_CMD 0x2f4
165 #define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
166 #define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
167 #define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2)
168 #define VFE_0_CAMIF_CFG 0x2f8
169 #define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6)
170 #define VFE_0_CAMIF_FRAME_CFG 0x300
171 #define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
172 #define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
173 #define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
174 #define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
175 #define VFE_0_CAMIF_STATUS 0x31c
176 #define VFE_0_CAMIF_STATUS_HALT (1 << 31)
177
178 #define VFE_0_REG_UPDATE 0x378
179 #define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n)))
180 #define VFE_0_REG_UPDATE_line_n(n) \
181 ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
182
183 #define VFE_0_DEMUX_CFG 0x424
184 #define VFE_0_DEMUX_CFG_PERIOD 0x3
185 #define VFE_0_DEMUX_GAIN_0 0x428
186 #define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
187 #define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
188 #define VFE_0_DEMUX_GAIN_1 0x42c
189 #define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
190 #define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
191 #define VFE_0_DEMUX_EVEN_CFG 0x438
192 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
193 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
194 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
195 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
196 #define VFE_0_DEMUX_ODD_CFG 0x43c
197 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
198 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
199 #define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
200 #define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
201
202 #define VFE_0_SCALE_ENC_Y_CFG 0x75c
203 #define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
204 #define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
205 #define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
206 #define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
207 #define VFE_0_SCALE_ENC_CBCR_CFG 0x778
208 #define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
209 #define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
210 #define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
211 #define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
212
213 #define VFE_0_CROP_ENC_Y_WIDTH 0x854
214 #define VFE_0_CROP_ENC_Y_HEIGHT 0x858
215 #define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
216 #define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
217
218 #define VFE_0_CLAMP_ENC_MAX_CFG 0x874
219 #define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
220 #define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
221 #define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
222 #define VFE_0_CLAMP_ENC_MIN_CFG 0x878
223 #define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
224 #define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
225 #define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
226
227 #define VFE_0_CGC_OVERRIDE_1 0x974
228 #define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x))
229
230 /* VFE reset timeout */
231 #define VFE_RESET_TIMEOUT_MS 50
232 /* VFE halt timeout */
233 #define VFE_HALT_TIMEOUT_MS 100
234 /* Max number of frame drop updates per frame */
235 #define VFE_FRAME_DROP_UPDATES 5
236 /* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */
237 #define VFE_FRAME_DROP_VAL 20
238
239 #define VFE_NEXT_SOF_MS 500
240
241 #define CAMIF_TIMEOUT_SLEEP_US 1000
242 #define CAMIF_TIMEOUT_ALL_US 1000000
243
244 #define SCALER_RATIO_MAX 16
245
246 static const struct {
247 u32 code;
248 u8 bpp;
249 } vfe_formats[] = {
250 {
251 MEDIA_BUS_FMT_UYVY8_2X8,
252 8,
253 },
254 {
255 MEDIA_BUS_FMT_VYUY8_2X8,
256 8,
257 },
258 {
259 MEDIA_BUS_FMT_YUYV8_2X8,
260 8,
261 },
262 {
263 MEDIA_BUS_FMT_YVYU8_2X8,
264 8,
265 },
266 {
267 MEDIA_BUS_FMT_SBGGR8_1X8,
268 8,
269 },
270 {
271 MEDIA_BUS_FMT_SGBRG8_1X8,
272 8,
273 },
274 {
275 MEDIA_BUS_FMT_SGRBG8_1X8,
276 8,
277 },
278 {
279 MEDIA_BUS_FMT_SRGGB8_1X8,
280 8,
281 },
282 {
283 MEDIA_BUS_FMT_SBGGR10_1X10,
284 10,
285 },
286 {
287 MEDIA_BUS_FMT_SGBRG10_1X10,
288 10,
289 },
290 {
291 MEDIA_BUS_FMT_SGRBG10_1X10,
292 10,
293 },
294 {
295 MEDIA_BUS_FMT_SRGGB10_1X10,
296 10,
297 },
298 {
299 MEDIA_BUS_FMT_SBGGR12_1X12,
300 12,
301 },
302 {
303 MEDIA_BUS_FMT_SGBRG12_1X12,
304 12,
305 },
306 {
307 MEDIA_BUS_FMT_SGRBG12_1X12,
308 12,
309 },
310 {
311 MEDIA_BUS_FMT_SRGGB12_1X12,
312 12,
313 }
314 };
315
316 /*
317 * vfe_get_bpp - map media bus format to bits per pixel
318 * @code: media bus format code
319 *
320 * Return number of bits per pixel
321 */
322 static u8 vfe_get_bpp(u32 code)
323 {
324 unsigned int i;
325
326 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
327 if (code == vfe_formats[i].code)
328 return vfe_formats[i].bpp;
329
330 WARN(1, "Unknown format\n");
331
332 return vfe_formats[0].bpp;
333 }
334
335 static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
336 {
337 u32 bits = readl_relaxed(vfe->base + reg);
338
339 writel_relaxed(bits & ~clr_bits, vfe->base + reg);
340 }
341
342 static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
343 {
344 u32 bits = readl_relaxed(vfe->base + reg);
345
346 writel_relaxed(bits | set_bits, vfe->base + reg);
347 }
348
349 static void vfe_global_reset(struct vfe_device *vfe)
350 {
351 u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
352 VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
353 VFE_0_GLOBAL_RESET_CMD_PM |
354 VFE_0_GLOBAL_RESET_CMD_TIMER |
355 VFE_0_GLOBAL_RESET_CMD_REGISTER |
356 VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
357 VFE_0_GLOBAL_RESET_CMD_BUS |
358 VFE_0_GLOBAL_RESET_CMD_CAMIF |
359 VFE_0_GLOBAL_RESET_CMD_CORE;
360
361 writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
362 }
363
364 static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
365 {
366 if (enable)
367 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
368 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
369 else
370 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
371 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
372 }
373
374 static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
375 {
376 if (enable)
377 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
378 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
379 else
380 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
381 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
382 }
383
384 #define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
385
386 static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line)
387 {
388 int val = 0;
389
390 switch (format) {
391 case V4L2_PIX_FMT_NV12:
392 case V4L2_PIX_FMT_NV21:
393 case V4L2_PIX_FMT_NV16:
394 case V4L2_PIX_FMT_NV61:
395 val = CALC_WORD(pixel_per_line, 1, 8);
396 break;
397 case V4L2_PIX_FMT_YUYV:
398 case V4L2_PIX_FMT_YVYU:
399 case V4L2_PIX_FMT_UYVY:
400 case V4L2_PIX_FMT_VYUY:
401 val = CALC_WORD(pixel_per_line, 2, 8);
402 break;
403 }
404
405 return val;
406 }
407
408 static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
409 u16 *width, u16 *height, u16 *bytesperline)
410 {
411 switch (pix->pixelformat) {
412 case V4L2_PIX_FMT_NV12:
413 case V4L2_PIX_FMT_NV21:
414 *width = pix->width;
415 *height = pix->height;
416 *bytesperline = pix->plane_fmt[0].bytesperline;
417 if (plane == 1)
418 *height /= 2;
419 break;
420 case V4L2_PIX_FMT_NV16:
421 case V4L2_PIX_FMT_NV61:
422 *width = pix->width;
423 *height = pix->height;
424 *bytesperline = pix->plane_fmt[0].bytesperline;
425 break;
426 }
427 }
428
429 static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
430 struct v4l2_pix_format_mplane *pix,
431 u8 plane, u32 enable)
432 {
433 u32 reg;
434
435 if (enable) {
436 u16 width = 0, height = 0, bytesperline = 0, wpl;
437
438 vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
439
440 wpl = vfe_word_per_line(pix->pixelformat, width);
441
442 reg = height - 1;
443 reg |= ((wpl + 1) / 2 - 1) << 16;
444
445 writel_relaxed(reg, vfe->base +
446 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
447
448 wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
449
450 reg = 0x3;
451 reg |= (height - 1) << 4;
452 reg |= wpl << 16;
453
454 writel_relaxed(reg, vfe->base +
455 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
456 } else {
457 writel_relaxed(0, vfe->base +
458 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
459 writel_relaxed(0, vfe->base +
460 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
461 }
462 }
463
464 static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
465 {
466 u32 reg;
467
468 reg = readl_relaxed(vfe->base +
469 VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
470
471 reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
472
473 reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
474 & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
475
476 writel_relaxed(reg,
477 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
478 }
479
480 static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
481 u32 pattern)
482 {
483 writel_relaxed(pattern,
484 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
485 }
486
487 static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset,
488 u16 depth)
489 {
490 u32 reg;
491
492 reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
493 depth;
494 writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
495 }
496
497 static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
498 {
499 wmb();
500 writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
501 wmb();
502 }
503
504 static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
505 {
506 writel_relaxed(addr,
507 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
508 }
509
510 static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
511 {
512 writel_relaxed(addr,
513 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
514 }
515
516 static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
517 {
518 u32 reg;
519
520 reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
521
522 return (reg >> wm) & 0x1;
523 }
524
525 static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
526 {
527 if (enable)
528 writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
529 else
530 writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
531 }
532
533 static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
534 enum vfe_line_id id)
535 {
536 u32 reg;
537
538 reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
539 reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
540 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
541
542 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
543 reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
544 VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
545 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
546
547 switch (id) {
548 case VFE_LINE_RDI0:
549 default:
550 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
551 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
552 break;
553 case VFE_LINE_RDI1:
554 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
555 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
556 break;
557 case VFE_LINE_RDI2:
558 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
559 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
560 break;
561 }
562
563 if (wm % 2 == 1)
564 reg <<= 16;
565
566 vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
567 }
568
569 static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
570 {
571 writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
572 vfe->base +
573 VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
574 }
575
576 static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
577 enum vfe_line_id id)
578 {
579 u32 reg;
580
581 reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
582 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
583
584 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
585 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
586
587 switch (id) {
588 case VFE_LINE_RDI0:
589 default:
590 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
591 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
592 break;
593 case VFE_LINE_RDI1:
594 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
595 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
596 break;
597 case VFE_LINE_RDI2:
598 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
599 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
600 break;
601 }
602
603 if (wm % 2 == 1)
604 reg <<= 16;
605
606 vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
607 }
608
609 static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
610 u8 enable)
611 {
612 struct vfe_line *line = container_of(output, struct vfe_line, output);
613 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
614 u32 reg;
615 unsigned int i;
616
617 for (i = 0; i < output->wm_num; i++) {
618 if (i == 0) {
619 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
620 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
621 } else if (i == 1) {
622 reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
623 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
624 reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
625 } else {
626 /* On current devices output->wm_num is always <= 2 */
627 break;
628 }
629
630 if (output->wm_idx[i] % 2 == 1)
631 reg <<= 16;
632
633 if (enable)
634 vfe_reg_set(vfe,
635 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
636 reg);
637 else
638 vfe_reg_clr(vfe,
639 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
640 reg);
641 }
642 }
643
644 static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
645 {
646 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
647 VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
648
649 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
650 cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
651 }
652
653 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
654 {
655 vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
656 wmb();
657 writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
658 wmb();
659 }
660
661 static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
662 enum vfe_line_id line_id, u8 enable)
663 {
664 u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
665 VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
666 u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
667 VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
668
669 if (enable) {
670 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
671 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
672 } else {
673 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
674 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
675 }
676 }
677
678 static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
679 enum vfe_line_id line_id, u8 enable)
680 {
681 struct vfe_output *output = &vfe->line[line_id].output;
682 unsigned int i;
683 u32 irq_en0;
684 u32 irq_en1;
685 u32 comp_mask = 0;
686
687 irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
688 irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
689 irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
690 irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
691 irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
692 for (i = 0; i < output->wm_num; i++) {
693 irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
694 output->wm_idx[i]);
695 comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
696 }
697
698 if (enable) {
699 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
700 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
701 vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
702 } else {
703 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
704 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
705 vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
706 }
707 }
708
709 static void vfe_enable_irq_common(struct vfe_device *vfe)
710 {
711 u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
712 u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
713 VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
714
715 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
716 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
717 }
718
719 static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
720 {
721 u32 val, even_cfg, odd_cfg;
722
723 writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
724
725 val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
726 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
727
728 val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
729 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
730
731 switch (line->fmt[MSM_VFE_PAD_SINK].code) {
732 case MEDIA_BUS_FMT_YUYV8_2X8:
733 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
734 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
735 break;
736 case MEDIA_BUS_FMT_YVYU8_2X8:
737 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
738 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
739 break;
740 case MEDIA_BUS_FMT_UYVY8_2X8:
741 default:
742 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
743 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
744 break;
745 case MEDIA_BUS_FMT_VYUY8_2X8:
746 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
747 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
748 break;
749 }
750
751 writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
752 writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
753 }
754
755 static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
756 {
757 if (input / output >= 16)
758 return 0;
759
760 if (input / output >= 8)
761 return 1;
762
763 if (input / output >= 4)
764 return 2;
765
766 return 3;
767 }
768
769 static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
770 {
771 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
772 u32 reg;
773 u16 input, output;
774 u8 interp_reso;
775 u32 phase_mult;
776
777 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
778
779 input = line->fmt[MSM_VFE_PAD_SINK].width;
780 output = line->compose.width;
781 reg = (output << 16) | input;
782 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
783
784 interp_reso = vfe_calc_interp_reso(input, output);
785 phase_mult = input * (1 << (13 + interp_reso)) / output;
786 reg = (interp_reso << 20) | phase_mult;
787 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
788
789 input = line->fmt[MSM_VFE_PAD_SINK].height;
790 output = line->compose.height;
791 reg = (output << 16) | input;
792 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
793
794 interp_reso = vfe_calc_interp_reso(input, output);
795 phase_mult = input * (1 << (13 + interp_reso)) / output;
796 reg = (interp_reso << 20) | phase_mult;
797 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
798
799 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
800
801 input = line->fmt[MSM_VFE_PAD_SINK].width;
802 output = line->compose.width / 2;
803 reg = (output << 16) | input;
804 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
805
806 interp_reso = vfe_calc_interp_reso(input, output);
807 phase_mult = input * (1 << (13 + interp_reso)) / output;
808 reg = (interp_reso << 20) | phase_mult;
809 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
810
811 input = line->fmt[MSM_VFE_PAD_SINK].height;
812 output = line->compose.height;
813 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
814 output = line->compose.height / 2;
815 reg = (output << 16) | input;
816 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
817
818 interp_reso = vfe_calc_interp_reso(input, output);
819 phase_mult = input * (1 << (13 + interp_reso)) / output;
820 reg = (interp_reso << 20) | phase_mult;
821 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
822 }
823
824 static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
825 {
826 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
827 u32 reg;
828 u16 first, last;
829
830 first = line->crop.left;
831 last = line->crop.left + line->crop.width - 1;
832 reg = (first << 16) | last;
833 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
834
835 first = line->crop.top;
836 last = line->crop.top + line->crop.height - 1;
837 reg = (first << 16) | last;
838 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
839
840 first = line->crop.left / 2;
841 last = line->crop.left / 2 + line->crop.width / 2 - 1;
842 reg = (first << 16) | last;
843 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
844
845 first = line->crop.top;
846 last = line->crop.top + line->crop.height - 1;
847 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
848 first = line->crop.top / 2;
849 last = line->crop.top / 2 + line->crop.height / 2 - 1;
850 }
851 reg = (first << 16) | last;
852 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
853 }
854
855 static void vfe_set_clamp_cfg(struct vfe_device *vfe)
856 {
857 u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
858 VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
859 VFE_0_CLAMP_ENC_MAX_CFG_CH2;
860
861 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
862
863 val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
864 VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
865 VFE_0_CLAMP_ENC_MIN_CFG_CH2;
866
867 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
868 }
869
870 /*
871 * vfe_reset - Trigger reset on VFE module and wait to complete
872 * @vfe: VFE device
873 *
874 * Return 0 on success or a negative error code otherwise
875 */
876 static int vfe_reset(struct vfe_device *vfe)
877 {
878 unsigned long time;
879
880 reinit_completion(&vfe->reset_complete);
881
882 vfe_global_reset(vfe);
883
884 time = wait_for_completion_timeout(&vfe->reset_complete,
885 msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
886 if (!time) {
887 dev_err(to_device(vfe), "VFE reset timeout\n");
888 return -EIO;
889 }
890
891 return 0;
892 }
893
894 /*
895 * vfe_halt - Trigger halt on VFE module and wait to complete
896 * @vfe: VFE device
897 *
898 * Return 0 on success or a negative error code otherwise
899 */
900 static int vfe_halt(struct vfe_device *vfe)
901 {
902 unsigned long time;
903
904 reinit_completion(&vfe->halt_complete);
905
906 writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
907 vfe->base + VFE_0_BUS_BDG_CMD);
908
909 time = wait_for_completion_timeout(&vfe->halt_complete,
910 msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
911 if (!time) {
912 dev_err(to_device(vfe), "VFE halt timeout\n");
913 return -EIO;
914 }
915
916 return 0;
917 }
918
919 static void vfe_init_outputs(struct vfe_device *vfe)
920 {
921 int i;
922
923 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
924 struct vfe_output *output = &vfe->line[i].output;
925
926 output->state = VFE_OUTPUT_OFF;
927 output->buf[0] = NULL;
928 output->buf[1] = NULL;
929 INIT_LIST_HEAD(&output->pending_bufs);
930
931 output->wm_num = 1;
932 if (vfe->line[i].id == VFE_LINE_PIX)
933 output->wm_num = 2;
934 }
935 }
936
937 static void vfe_reset_output_maps(struct vfe_device *vfe)
938 {
939 int i;
940
941 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
942 vfe->wm_output_map[i] = VFE_LINE_NONE;
943 }
944
945 static void vfe_set_qos(struct vfe_device *vfe)
946 {
947 u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
948 u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
949
950 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
951 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
952 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
953 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
954 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
955 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
956 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
957 writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
958 }
959
960 static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
961 {
962 u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
963
964 if (enable)
965 vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
966 else
967 vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
968
969 wmb();
970 }
971
972 static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
973 {
974 u32 val = VFE_0_MODULE_CFG_DEMUX |
975 VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
976 VFE_0_MODULE_CFG_SCALE_ENC |
977 VFE_0_MODULE_CFG_CROP_ENC;
978
979 if (enable)
980 writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
981 else
982 writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
983 }
984
985 static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
986 {
987 u32 val;
988
989 switch (line->fmt[MSM_VFE_PAD_SINK].code) {
990 case MEDIA_BUS_FMT_YUYV8_2X8:
991 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
992 break;
993 case MEDIA_BUS_FMT_YVYU8_2X8:
994 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
995 break;
996 case MEDIA_BUS_FMT_UYVY8_2X8:
997 default:
998 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
999 break;
1000 case MEDIA_BUS_FMT_VYUY8_2X8:
1001 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
1002 break;
1003 }
1004
1005 writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
1006
1007 val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
1008 val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
1009 writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
1010
1011 val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
1012 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
1013
1014 val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
1015 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
1016
1017 val = 0xffffffff;
1018 writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
1019
1020 val = 0xffffffff;
1021 writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
1022
1023 val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
1024 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
1025
1026 val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
1027 writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
1028 }
1029
1030 static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd)
1031 {
1032 writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS,
1033 vfe->base + VFE_0_CAMIF_CMD);
1034
1035 writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
1036 }
1037
1038 static int vfe_camif_wait_for_stop(struct vfe_device *vfe)
1039 {
1040 u32 val;
1041 int ret;
1042
1043 ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
1044 val,
1045 (val & VFE_0_CAMIF_STATUS_HALT),
1046 CAMIF_TIMEOUT_SLEEP_US,
1047 CAMIF_TIMEOUT_ALL_US);
1048 if (ret < 0)
1049 dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__);
1050
1051 return ret;
1052 }
1053
1054 static void vfe_output_init_addrs(struct vfe_device *vfe,
1055 struct vfe_output *output, u8 sync)
1056 {
1057 u32 ping_addr;
1058 u32 pong_addr;
1059 unsigned int i;
1060
1061 output->active_buf = 0;
1062
1063 for (i = 0; i < output->wm_num; i++) {
1064 if (output->buf[0])
1065 ping_addr = output->buf[0]->addr[i];
1066 else
1067 ping_addr = 0;
1068
1069 if (output->buf[1])
1070 pong_addr = output->buf[1]->addr[i];
1071 else
1072 pong_addr = ping_addr;
1073
1074 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
1075 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
1076 if (sync)
1077 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1078 }
1079 }
1080
1081 static void vfe_output_update_ping_addr(struct vfe_device *vfe,
1082 struct vfe_output *output, u8 sync)
1083 {
1084 u32 addr;
1085 unsigned int i;
1086
1087 for (i = 0; i < output->wm_num; i++) {
1088 if (output->buf[0])
1089 addr = output->buf[0]->addr[i];
1090 else
1091 addr = 0;
1092
1093 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr);
1094 if (sync)
1095 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1096 }
1097 }
1098
1099 static void vfe_output_update_pong_addr(struct vfe_device *vfe,
1100 struct vfe_output *output, u8 sync)
1101 {
1102 u32 addr;
1103 unsigned int i;
1104
1105 for (i = 0; i < output->wm_num; i++) {
1106 if (output->buf[1])
1107 addr = output->buf[1]->addr[i];
1108 else
1109 addr = 0;
1110
1111 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr);
1112 if (sync)
1113 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1114 }
1115
1116 }
1117
1118 static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id)
1119 {
1120 int ret = -EBUSY;
1121 int i;
1122
1123 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
1124 if (vfe->wm_output_map[i] == VFE_LINE_NONE) {
1125 vfe->wm_output_map[i] = line_id;
1126 ret = i;
1127 break;
1128 }
1129 }
1130
1131 return ret;
1132 }
1133
1134 static int vfe_release_wm(struct vfe_device *vfe, u8 wm)
1135 {
1136 if (wm >= ARRAY_SIZE(vfe->wm_output_map))
1137 return -EINVAL;
1138
1139 vfe->wm_output_map[wm] = VFE_LINE_NONE;
1140
1141 return 0;
1142 }
1143
1144 static void vfe_output_frame_drop(struct vfe_device *vfe,
1145 struct vfe_output *output,
1146 u32 drop_pattern)
1147 {
1148 u8 drop_period;
1149 unsigned int i;
1150
1151 /* We need to toggle update period to be valid on next frame */
1152 output->drop_update_idx++;
1153 output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
1154 drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
1155
1156 for (i = 0; i < output->wm_num; i++) {
1157 vfe_wm_set_framedrop_period(vfe, output->wm_idx[i],
1158 drop_period);
1159 vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i],
1160 drop_pattern);
1161 }
1162 vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id);
1163 }
1164
1165 static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
1166 {
1167 struct camss_buffer *buffer = NULL;
1168
1169 if (!list_empty(&output->pending_bufs)) {
1170 buffer = list_first_entry(&output->pending_bufs,
1171 struct camss_buffer,
1172 queue);
1173 list_del(&buffer->queue);
1174 }
1175
1176 return buffer;
1177 }
1178
1179 /*
1180 * vfe_buf_add_pending - Add output buffer to list of pending
1181 * @output: VFE output
1182 * @buffer: Video buffer
1183 */
1184 static void vfe_buf_add_pending(struct vfe_output *output,
1185 struct camss_buffer *buffer)
1186 {
1187 INIT_LIST_HEAD(&buffer->queue);
1188 list_add_tail(&buffer->queue, &output->pending_bufs);
1189 }
1190
1191 /*
1192 * vfe_buf_flush_pending - Flush all pending buffers.
1193 * @output: VFE output
1194 * @state: vb2 buffer state
1195 */
1196 static void vfe_buf_flush_pending(struct vfe_output *output,
1197 enum vb2_buffer_state state)
1198 {
1199 struct camss_buffer *buf;
1200 struct camss_buffer *t;
1201
1202 list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
1203 vb2_buffer_done(&buf->vb.vb2_buf, state);
1204 list_del(&buf->queue);
1205 }
1206 }
1207
1208 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
1209 struct vfe_output *output)
1210 {
1211 switch (output->state) {
1212 case VFE_OUTPUT_CONTINUOUS:
1213 vfe_output_frame_drop(vfe, output, 3);
1214 break;
1215 case VFE_OUTPUT_SINGLE:
1216 default:
1217 dev_err_ratelimited(to_device(vfe),
1218 "Next buf in wrong state! %d\n",
1219 output->state);
1220 break;
1221 }
1222 }
1223
1224 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
1225 struct vfe_output *output)
1226 {
1227 switch (output->state) {
1228 case VFE_OUTPUT_CONTINUOUS:
1229 output->state = VFE_OUTPUT_SINGLE;
1230 vfe_output_frame_drop(vfe, output, 1);
1231 break;
1232 case VFE_OUTPUT_SINGLE:
1233 output->state = VFE_OUTPUT_STOPPING;
1234 vfe_output_frame_drop(vfe, output, 0);
1235 break;
1236 default:
1237 dev_err_ratelimited(to_device(vfe),
1238 "Last buff in wrong state! %d\n",
1239 output->state);
1240 break;
1241 }
1242 }
1243
1244 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
1245 struct vfe_output *output,
1246 struct camss_buffer *new_buf)
1247 {
1248 int inactive_idx;
1249
1250 switch (output->state) {
1251 case VFE_OUTPUT_SINGLE:
1252 inactive_idx = !output->active_buf;
1253
1254 if (!output->buf[inactive_idx]) {
1255 output->buf[inactive_idx] = new_buf;
1256
1257 if (inactive_idx)
1258 vfe_output_update_pong_addr(vfe, output, 0);
1259 else
1260 vfe_output_update_ping_addr(vfe, output, 0);
1261
1262 vfe_output_frame_drop(vfe, output, 3);
1263 output->state = VFE_OUTPUT_CONTINUOUS;
1264 } else {
1265 vfe_buf_add_pending(output, new_buf);
1266 dev_err_ratelimited(to_device(vfe),
1267 "Inactive buffer is busy\n");
1268 }
1269 break;
1270
1271 case VFE_OUTPUT_IDLE:
1272 if (!output->buf[0]) {
1273 output->buf[0] = new_buf;
1274
1275 vfe_output_init_addrs(vfe, output, 1);
1276
1277 vfe_output_frame_drop(vfe, output, 1);
1278 output->state = VFE_OUTPUT_SINGLE;
1279 } else {
1280 vfe_buf_add_pending(output, new_buf);
1281 dev_err_ratelimited(to_device(vfe),
1282 "Output idle with buffer set!\n");
1283 }
1284 break;
1285
1286 case VFE_OUTPUT_CONTINUOUS:
1287 default:
1288 vfe_buf_add_pending(output, new_buf);
1289 break;
1290 }
1291 }
1292
1293 static int vfe_get_output(struct vfe_line *line)
1294 {
1295 struct vfe_device *vfe = to_vfe(line);
1296 struct vfe_output *output;
1297 unsigned long flags;
1298 int i;
1299 int wm_idx;
1300
1301 spin_lock_irqsave(&vfe->output_lock, flags);
1302
1303 output = &line->output;
1304 if (output->state != VFE_OUTPUT_OFF) {
1305 dev_err(to_device(vfe), "Output is running\n");
1306 goto error;
1307 }
1308 output->state = VFE_OUTPUT_RESERVED;
1309
1310 output->active_buf = 0;
1311
1312 for (i = 0; i < output->wm_num; i++) {
1313 wm_idx = vfe_reserve_wm(vfe, line->id);
1314 if (wm_idx < 0) {
1315 dev_err(to_device(vfe), "Can not reserve wm\n");
1316 goto error_get_wm;
1317 }
1318 output->wm_idx[i] = wm_idx;
1319 }
1320
1321 output->drop_update_idx = 0;
1322
1323 spin_unlock_irqrestore(&vfe->output_lock, flags);
1324
1325 return 0;
1326
1327 error_get_wm:
1328 for (i--; i >= 0; i--)
1329 vfe_release_wm(vfe, output->wm_idx[i]);
1330 output->state = VFE_OUTPUT_OFF;
1331 error:
1332 spin_unlock_irqrestore(&vfe->output_lock, flags);
1333
1334 return -EINVAL;
1335 }
1336
1337 static int vfe_put_output(struct vfe_line *line)
1338 {
1339 struct vfe_device *vfe = to_vfe(line);
1340 struct vfe_output *output = &line->output;
1341 unsigned long flags;
1342 unsigned int i;
1343
1344 spin_lock_irqsave(&vfe->output_lock, flags);
1345
1346 for (i = 0; i < output->wm_num; i++)
1347 vfe_release_wm(vfe, output->wm_idx[i]);
1348
1349 output->state = VFE_OUTPUT_OFF;
1350
1351 spin_unlock_irqrestore(&vfe->output_lock, flags);
1352 return 0;
1353 }
1354
1355 static int vfe_enable_output(struct vfe_line *line)
1356 {
1357 struct vfe_device *vfe = to_vfe(line);
1358 struct vfe_output *output = &line->output;
1359 unsigned long flags;
1360 unsigned int i;
1361 u16 ub_size;
1362
1363 switch (vfe->id) {
1364 case 0:
1365 ub_size = MSM_VFE_VFE0_UB_SIZE_RDI;
1366 break;
1367 case 1:
1368 ub_size = MSM_VFE_VFE1_UB_SIZE_RDI;
1369 break;
1370 default:
1371 return -EINVAL;
1372 }
1373
1374 spin_lock_irqsave(&vfe->output_lock, flags);
1375
1376 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id);
1377
1378 if (output->state != VFE_OUTPUT_RESERVED) {
1379 dev_err(to_device(vfe), "Output is not in reserved state %d\n",
1380 output->state);
1381 spin_unlock_irqrestore(&vfe->output_lock, flags);
1382 return -EINVAL;
1383 }
1384 output->state = VFE_OUTPUT_IDLE;
1385
1386 output->buf[0] = vfe_buf_get_pending(output);
1387 output->buf[1] = vfe_buf_get_pending(output);
1388
1389 if (!output->buf[0] && output->buf[1]) {
1390 output->buf[0] = output->buf[1];
1391 output->buf[1] = NULL;
1392 }
1393
1394 if (output->buf[0])
1395 output->state = VFE_OUTPUT_SINGLE;
1396
1397 if (output->buf[1])
1398 output->state = VFE_OUTPUT_CONTINUOUS;
1399
1400 switch (output->state) {
1401 case VFE_OUTPUT_SINGLE:
1402 vfe_output_frame_drop(vfe, output, 1);
1403 break;
1404 case VFE_OUTPUT_CONTINUOUS:
1405 vfe_output_frame_drop(vfe, output, 3);
1406 break;
1407 default:
1408 vfe_output_frame_drop(vfe, output, 0);
1409 break;
1410 }
1411
1412 output->sequence = 0;
1413 output->wait_sof = 0;
1414 output->wait_reg_update = 0;
1415 reinit_completion(&output->sof);
1416 reinit_completion(&output->reg_update);
1417
1418 vfe_output_init_addrs(vfe, output, 0);
1419
1420 if (line->id != VFE_LINE_PIX) {
1421 vfe_set_cgc_override(vfe, output->wm_idx[0], 1);
1422 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
1423 vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
1424 vfe_wm_set_subsample(vfe, output->wm_idx[0]);
1425 vfe_set_rdi_cid(vfe, line->id, 0);
1426 vfe_wm_set_ub_cfg(vfe, output->wm_idx[0],
1427 (ub_size + 1) * output->wm_idx[0], ub_size);
1428 vfe_wm_frame_based(vfe, output->wm_idx[0], 1);
1429 vfe_wm_enable(vfe, output->wm_idx[0], 1);
1430 vfe_bus_reload_wm(vfe, output->wm_idx[0]);
1431 } else {
1432 ub_size /= output->wm_num;
1433 for (i = 0; i < output->wm_num; i++) {
1434 vfe_set_cgc_override(vfe, output->wm_idx[i], 1);
1435 vfe_wm_set_subsample(vfe, output->wm_idx[i]);
1436 vfe_wm_set_ub_cfg(vfe, output->wm_idx[i],
1437 (ub_size + 1) * output->wm_idx[i],
1438 ub_size);
1439 vfe_wm_line_based(vfe, output->wm_idx[i],
1440 &line->video_out.active_fmt.fmt.pix_mp,
1441 i, 1);
1442 vfe_wm_enable(vfe, output->wm_idx[i], 1);
1443 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1444 }
1445 vfe_enable_irq_pix_line(vfe, 0, line->id, 1);
1446 vfe_set_module_cfg(vfe, 1);
1447 vfe_set_camif_cfg(vfe, line);
1448 vfe_set_xbar_cfg(vfe, output, 1);
1449 vfe_set_demux_cfg(vfe, line);
1450 vfe_set_scale_cfg(vfe, line);
1451 vfe_set_crop_cfg(vfe, line);
1452 vfe_set_clamp_cfg(vfe);
1453 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY);
1454 }
1455
1456 vfe_reg_update(vfe, line->id);
1457
1458 spin_unlock_irqrestore(&vfe->output_lock, flags);
1459
1460 return 0;
1461 }
1462
1463 static int vfe_disable_output(struct vfe_line *line)
1464 {
1465 struct vfe_device *vfe = to_vfe(line);
1466 struct vfe_output *output = &line->output;
1467 unsigned long flags;
1468 unsigned long time;
1469 unsigned int i;
1470
1471 spin_lock_irqsave(&vfe->output_lock, flags);
1472
1473 output->wait_sof = 1;
1474 spin_unlock_irqrestore(&vfe->output_lock, flags);
1475
1476 time = wait_for_completion_timeout(&output->sof,
1477 msecs_to_jiffies(VFE_NEXT_SOF_MS));
1478 if (!time)
1479 dev_err(to_device(vfe), "VFE sof timeout\n");
1480
1481 spin_lock_irqsave(&vfe->output_lock, flags);
1482 for (i = 0; i < output->wm_num; i++)
1483 vfe_wm_enable(vfe, output->wm_idx[i], 0);
1484
1485 vfe_reg_update(vfe, line->id);
1486 output->wait_reg_update = 1;
1487 spin_unlock_irqrestore(&vfe->output_lock, flags);
1488
1489 time = wait_for_completion_timeout(&output->reg_update,
1490 msecs_to_jiffies(VFE_NEXT_SOF_MS));
1491 if (!time)
1492 dev_err(to_device(vfe), "VFE reg update timeout\n");
1493
1494 spin_lock_irqsave(&vfe->output_lock, flags);
1495
1496 if (line->id != VFE_LINE_PIX) {
1497 vfe_wm_frame_based(vfe, output->wm_idx[0], 0);
1498 vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
1499 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
1500 vfe_set_cgc_override(vfe, output->wm_idx[0], 0);
1501 spin_unlock_irqrestore(&vfe->output_lock, flags);
1502 } else {
1503 for (i = 0; i < output->wm_num; i++) {
1504 vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
1505 vfe_set_cgc_override(vfe, output->wm_idx[i], 0);
1506 }
1507
1508 vfe_enable_irq_pix_line(vfe, 0, line->id, 0);
1509 vfe_set_module_cfg(vfe, 0);
1510 vfe_set_xbar_cfg(vfe, output, 0);
1511
1512 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY);
1513 spin_unlock_irqrestore(&vfe->output_lock, flags);
1514
1515 vfe_camif_wait_for_stop(vfe);
1516 }
1517
1518 return 0;
1519 }
1520
1521 /*
1522 * vfe_enable - Enable streaming on VFE line
1523 * @line: VFE line
1524 *
1525 * Return 0 on success or a negative error code otherwise
1526 */
1527 static int vfe_enable(struct vfe_line *line)
1528 {
1529 struct vfe_device *vfe = to_vfe(line);
1530 int ret;
1531
1532 mutex_lock(&vfe->stream_lock);
1533
1534 if (!vfe->stream_count) {
1535 vfe_enable_irq_common(vfe);
1536
1537 vfe_bus_enable_wr_if(vfe, 1);
1538
1539 vfe_set_qos(vfe);
1540 }
1541
1542 vfe->stream_count++;
1543
1544 mutex_unlock(&vfe->stream_lock);
1545
1546 ret = vfe_get_output(line);
1547 if (ret < 0)
1548 goto error_get_output;
1549
1550 ret = vfe_enable_output(line);
1551 if (ret < 0)
1552 goto error_enable_output;
1553
1554 vfe->was_streaming = 1;
1555
1556 return 0;
1557
1558
1559 error_enable_output:
1560 vfe_put_output(line);
1561
1562 error_get_output:
1563 mutex_lock(&vfe->stream_lock);
1564
1565 if (vfe->stream_count == 1)
1566 vfe_bus_enable_wr_if(vfe, 0);
1567
1568 vfe->stream_count--;
1569
1570 mutex_unlock(&vfe->stream_lock);
1571
1572 return ret;
1573 }
1574
1575 /*
1576 * vfe_disable - Disable streaming on VFE line
1577 * @line: VFE line
1578 *
1579 * Return 0 on success or a negative error code otherwise
1580 */
1581 static int vfe_disable(struct vfe_line *line)
1582 {
1583 struct vfe_device *vfe = to_vfe(line);
1584
1585 vfe_disable_output(line);
1586
1587 vfe_put_output(line);
1588
1589 mutex_lock(&vfe->stream_lock);
1590
1591 if (vfe->stream_count == 1)
1592 vfe_bus_enable_wr_if(vfe, 0);
1593
1594 vfe->stream_count--;
1595
1596 mutex_unlock(&vfe->stream_lock);
1597
1598 return 0;
1599 }
1600
1601 /*
1602 * vfe_isr_sof - Process start of frame interrupt
1603 * @vfe: VFE Device
1604 * @line_id: VFE line
1605 */
1606 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
1607 {
1608 struct vfe_output *output;
1609 unsigned long flags;
1610
1611 spin_lock_irqsave(&vfe->output_lock, flags);
1612 output = &vfe->line[line_id].output;
1613 if (output->wait_sof) {
1614 output->wait_sof = 0;
1615 complete(&output->sof);
1616 }
1617 spin_unlock_irqrestore(&vfe->output_lock, flags);
1618 }
1619
1620 /*
1621 * vfe_isr_reg_update - Process reg update interrupt
1622 * @vfe: VFE Device
1623 * @line_id: VFE line
1624 */
1625 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
1626 {
1627 struct vfe_output *output;
1628 unsigned long flags;
1629
1630 spin_lock_irqsave(&vfe->output_lock, flags);
1631 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
1632
1633 output = &vfe->line[line_id].output;
1634
1635 if (output->wait_reg_update) {
1636 output->wait_reg_update = 0;
1637 complete(&output->reg_update);
1638 spin_unlock_irqrestore(&vfe->output_lock, flags);
1639 return;
1640 }
1641
1642 if (output->state == VFE_OUTPUT_STOPPING) {
1643 /* Release last buffer when hw is idle */
1644 if (output->last_buffer) {
1645 vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
1646 VB2_BUF_STATE_DONE);
1647 output->last_buffer = NULL;
1648 }
1649 output->state = VFE_OUTPUT_IDLE;
1650
1651 /* Buffers received in stopping state are queued in */
1652 /* dma pending queue, start next capture here */
1653
1654 output->buf[0] = vfe_buf_get_pending(output);
1655 output->buf[1] = vfe_buf_get_pending(output);
1656
1657 if (!output->buf[0] && output->buf[1]) {
1658 output->buf[0] = output->buf[1];
1659 output->buf[1] = NULL;
1660 }
1661
1662 if (output->buf[0])
1663 output->state = VFE_OUTPUT_SINGLE;
1664
1665 if (output->buf[1])
1666 output->state = VFE_OUTPUT_CONTINUOUS;
1667
1668 switch (output->state) {
1669 case VFE_OUTPUT_SINGLE:
1670 vfe_output_frame_drop(vfe, output, 2);
1671 break;
1672 case VFE_OUTPUT_CONTINUOUS:
1673 vfe_output_frame_drop(vfe, output, 3);
1674 break;
1675 default:
1676 vfe_output_frame_drop(vfe, output, 0);
1677 break;
1678 }
1679
1680 vfe_output_init_addrs(vfe, output, 1);
1681 }
1682
1683 spin_unlock_irqrestore(&vfe->output_lock, flags);
1684 }
1685
1686 /*
1687 * vfe_isr_wm_done - Process write master done interrupt
1688 * @vfe: VFE Device
1689 * @wm: Write master id
1690 */
1691 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
1692 {
1693 struct camss_buffer *ready_buf;
1694 struct vfe_output *output;
1695 dma_addr_t *new_addr;
1696 unsigned long flags;
1697 u32 active_index;
1698 u64 ts = ktime_get_ns();
1699 unsigned int i;
1700
1701 active_index = vfe_wm_get_ping_pong_status(vfe, wm);
1702
1703 spin_lock_irqsave(&vfe->output_lock, flags);
1704
1705 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
1706 dev_err_ratelimited(to_device(vfe),
1707 "Received wm done for unmapped index\n");
1708 goto out_unlock;
1709 }
1710 output = &vfe->line[vfe->wm_output_map[wm]].output;
1711
1712 if (output->active_buf == active_index) {
1713 dev_err_ratelimited(to_device(vfe),
1714 "Active buffer mismatch!\n");
1715 goto out_unlock;
1716 }
1717 output->active_buf = active_index;
1718
1719 ready_buf = output->buf[!active_index];
1720 if (!ready_buf) {
1721 dev_err_ratelimited(to_device(vfe),
1722 "Missing ready buf %d %d!\n",
1723 !active_index, output->state);
1724 goto out_unlock;
1725 }
1726
1727 ready_buf->vb.vb2_buf.timestamp = ts;
1728 ready_buf->vb.sequence = output->sequence++;
1729
1730 /* Get next buffer */
1731 output->buf[!active_index] = vfe_buf_get_pending(output);
1732 if (!output->buf[!active_index]) {
1733 /* No next buffer - set same address */
1734 new_addr = ready_buf->addr;
1735 vfe_buf_update_wm_on_last(vfe, output);
1736 } else {
1737 new_addr = output->buf[!active_index]->addr;
1738 vfe_buf_update_wm_on_next(vfe, output);
1739 }
1740
1741 if (active_index)
1742 for (i = 0; i < output->wm_num; i++)
1743 vfe_wm_set_ping_addr(vfe, output->wm_idx[i],
1744 new_addr[i]);
1745 else
1746 for (i = 0; i < output->wm_num; i++)
1747 vfe_wm_set_pong_addr(vfe, output->wm_idx[i],
1748 new_addr[i]);
1749
1750 spin_unlock_irqrestore(&vfe->output_lock, flags);
1751
1752 if (output->state == VFE_OUTPUT_STOPPING)
1753 output->last_buffer = ready_buf;
1754 else
1755 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1756
1757 return;
1758
1759 out_unlock:
1760 spin_unlock_irqrestore(&vfe->output_lock, flags);
1761 }
1762
1763 /*
1764 * vfe_isr_wm_done - Process composite image done interrupt
1765 * @vfe: VFE Device
1766 * @comp: Composite image id
1767 */
1768 static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
1769 {
1770 unsigned int i;
1771
1772 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
1773 if (vfe->wm_output_map[i] == VFE_LINE_PIX) {
1774 vfe_isr_wm_done(vfe, i);
1775 break;
1776 }
1777 }
1778
1779 /*
1780 * vfe_isr - ISPIF module interrupt handler
1781 * @irq: Interrupt line
1782 * @dev: VFE device
1783 *
1784 * Return IRQ_HANDLED on success
1785 */
1786 static irqreturn_t vfe_isr(int irq, void *dev)
1787 {
1788 struct vfe_device *vfe = dev;
1789 u32 value0, value1;
1790 u32 violation;
1791 int i, j;
1792
1793 value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
1794 value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
1795
1796 writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0);
1797 writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1);
1798
1799 wmb();
1800 writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
1801
1802 if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
1803 complete(&vfe->reset_complete);
1804
1805 if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) {
1806 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
1807 dev_err_ratelimited(to_device(vfe),
1808 "VFE: violation = 0x%08x\n", violation);
1809 }
1810
1811 if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) {
1812 complete(&vfe->halt_complete);
1813 writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
1814 }
1815
1816 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
1817 if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
1818 vfe_isr_reg_update(vfe, i);
1819
1820 if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
1821 vfe_isr_sof(vfe, VFE_LINE_PIX);
1822
1823 for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
1824 if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
1825 vfe_isr_sof(vfe, i);
1826
1827 for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
1828 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
1829 vfe_isr_comp_done(vfe, i);
1830 for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
1831 if (vfe->wm_output_map[j] == VFE_LINE_PIX)
1832 value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
1833 }
1834
1835 for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
1836 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
1837 vfe_isr_wm_done(vfe, i);
1838
1839 return IRQ_HANDLED;
1840 }
1841
1842 /*
1843 * vfe_set_clock_rates - Calculate and set clock rates on VFE module
1844 * @vfe: VFE device
1845 *
1846 * Return 0 on success or a negative error code otherwise
1847 */
1848 static int vfe_set_clock_rates(struct vfe_device *vfe)
1849 {
1850 struct device *dev = to_device(vfe);
1851 u32 pixel_clock[MSM_VFE_LINE_NUM];
1852 int i, j;
1853 int ret;
1854
1855 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
1856 ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
1857 &pixel_clock[i]);
1858 if (ret)
1859 pixel_clock[i] = 0;
1860 }
1861
1862 for (i = 0; i < vfe->nclocks; i++) {
1863 struct camss_clock *clock = &vfe->clock[i];
1864
1865 if (!strcmp(clock->name, "camss_vfe_vfe")) {
1866 u64 min_rate = 0;
1867 long rate;
1868
1869 for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
1870 u32 tmp;
1871 u8 bpp;
1872
1873 if (j == VFE_LINE_PIX) {
1874 tmp = pixel_clock[j];
1875 } else {
1876 bpp = vfe_get_bpp(vfe->line[j].
1877 fmt[MSM_VFE_PAD_SINK].code);
1878 tmp = pixel_clock[j] * bpp / 64;
1879 }
1880
1881 if (min_rate < tmp)
1882 min_rate = tmp;
1883 }
1884
1885 camss_add_clock_margin(&min_rate);
1886
1887 for (j = 0; j < clock->nfreqs; j++)
1888 if (min_rate < clock->freq[j])
1889 break;
1890
1891 if (j == clock->nfreqs) {
1892 dev_err(dev,
1893 "Pixel clock is too high for VFE");
1894 return -EINVAL;
1895 }
1896
1897 /* if sensor pixel clock is not available */
1898 /* set highest possible VFE clock rate */
1899 if (min_rate == 0)
1900 j = clock->nfreqs - 1;
1901
1902 rate = clk_round_rate(clock->clk, clock->freq[j]);
1903 if (rate < 0) {
1904 dev_err(dev, "clk round rate failed: %ld\n",
1905 rate);
1906 return -EINVAL;
1907 }
1908
1909 ret = clk_set_rate(clock->clk, rate);
1910 if (ret < 0) {
1911 dev_err(dev, "clk set rate failed: %d\n", ret);
1912 return ret;
1913 }
1914 }
1915 }
1916
1917 return 0;
1918 }
1919
1920 /*
1921 * vfe_check_clock_rates - Check current clock rates on VFE module
1922 * @vfe: VFE device
1923 *
1924 * Return 0 if current clock rates are suitable for a new pipeline
1925 * or a negative error code otherwise
1926 */
1927 static int vfe_check_clock_rates(struct vfe_device *vfe)
1928 {
1929 u32 pixel_clock[MSM_VFE_LINE_NUM];
1930 int i, j;
1931 int ret;
1932
1933 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
1934 ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
1935 &pixel_clock[i]);
1936 if (ret)
1937 pixel_clock[i] = 0;
1938 }
1939
1940 for (i = 0; i < vfe->nclocks; i++) {
1941 struct camss_clock *clock = &vfe->clock[i];
1942
1943 if (!strcmp(clock->name, "camss_vfe_vfe")) {
1944 u64 min_rate = 0;
1945 unsigned long rate;
1946
1947 for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
1948 u32 tmp;
1949 u8 bpp;
1950
1951 if (j == VFE_LINE_PIX) {
1952 tmp = pixel_clock[j];
1953 } else {
1954 bpp = vfe_get_bpp(vfe->line[j].
1955 fmt[MSM_VFE_PAD_SINK].code);
1956 tmp = pixel_clock[j] * bpp / 64;
1957 }
1958
1959 if (min_rate < tmp)
1960 min_rate = tmp;
1961 }
1962
1963 camss_add_clock_margin(&min_rate);
1964
1965 rate = clk_get_rate(clock->clk);
1966 if (rate < min_rate)
1967 return -EBUSY;
1968 }
1969 }
1970
1971 return 0;
1972 }
1973
1974 /*
1975 * vfe_get - Power up and reset VFE module
1976 * @vfe: VFE Device
1977 *
1978 * Return 0 on success or a negative error code otherwise
1979 */
1980 static int vfe_get(struct vfe_device *vfe)
1981 {
1982 int ret;
1983
1984 mutex_lock(&vfe->power_lock);
1985
1986 if (vfe->power_count == 0) {
1987 ret = vfe_set_clock_rates(vfe);
1988 if (ret < 0)
1989 goto error_clocks;
1990
1991 ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
1992 to_device(vfe));
1993 if (ret < 0)
1994 goto error_clocks;
1995
1996 ret = vfe_reset(vfe);
1997 if (ret < 0)
1998 goto error_reset;
1999
2000 vfe_reset_output_maps(vfe);
2001
2002 vfe_init_outputs(vfe);
2003 } else {
2004 ret = vfe_check_clock_rates(vfe);
2005 if (ret < 0)
2006 goto error_clocks;
2007 }
2008 vfe->power_count++;
2009
2010 mutex_unlock(&vfe->power_lock);
2011
2012 return 0;
2013
2014 error_reset:
2015 camss_disable_clocks(vfe->nclocks, vfe->clock);
2016
2017 error_clocks:
2018 mutex_unlock(&vfe->power_lock);
2019
2020 return ret;
2021 }
2022
2023 /*
2024 * vfe_put - Power down VFE module
2025 * @vfe: VFE Device
2026 */
2027 static void vfe_put(struct vfe_device *vfe)
2028 {
2029 mutex_lock(&vfe->power_lock);
2030
2031 if (vfe->power_count == 0) {
2032 dev_err(to_device(vfe), "vfe power off on power_count == 0\n");
2033 goto exit;
2034 } else if (vfe->power_count == 1) {
2035 if (vfe->was_streaming) {
2036 vfe->was_streaming = 0;
2037 vfe_halt(vfe);
2038 }
2039 camss_disable_clocks(vfe->nclocks, vfe->clock);
2040 }
2041
2042 vfe->power_count--;
2043
2044 exit:
2045 mutex_unlock(&vfe->power_lock);
2046 }
2047
2048 /*
2049 * vfe_video_pad_to_line - Get pointer to VFE line by media pad
2050 * @pad: Media pad
2051 *
2052 * Return pointer to vfe line structure
2053 */
2054 static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
2055 {
2056 struct media_pad *vfe_pad;
2057 struct v4l2_subdev *subdev;
2058
2059 vfe_pad = media_entity_remote_pad(pad);
2060 if (vfe_pad == NULL)
2061 return NULL;
2062
2063 subdev = media_entity_to_v4l2_subdev(vfe_pad->entity);
2064
2065 return container_of(subdev, struct vfe_line, subdev);
2066 }
2067
2068 /*
2069 * vfe_queue_buffer - Add empty buffer
2070 * @vid: Video device structure
2071 * @buf: Buffer to be enqueued
2072 *
2073 * Add an empty buffer - depending on the current number of buffers it will be
2074 * put in pending buffer queue or directly given to the hardware to be filled.
2075 *
2076 * Return 0 on success or a negative error code otherwise
2077 */
2078 static int vfe_queue_buffer(struct camss_video *vid,
2079 struct camss_buffer *buf)
2080 {
2081 struct vfe_device *vfe = &vid->camss->vfe;
2082 struct vfe_line *line;
2083 struct vfe_output *output;
2084 unsigned long flags;
2085
2086 line = vfe_video_pad_to_line(&vid->pad);
2087 if (!line) {
2088 dev_err(to_device(vfe), "Can not queue buffer\n");
2089 return -1;
2090 }
2091 output = &line->output;
2092
2093 spin_lock_irqsave(&vfe->output_lock, flags);
2094
2095 vfe_buf_update_wm_on_new(vfe, output, buf);
2096
2097 spin_unlock_irqrestore(&vfe->output_lock, flags);
2098
2099 return 0;
2100 }
2101
2102 /*
2103 * vfe_flush_buffers - Return all vb2 buffers
2104 * @vid: Video device structure
2105 * @state: vb2 buffer state of the returned buffers
2106 *
2107 * Return all buffers to vb2. This includes queued pending buffers (still
2108 * unused) and any buffers given to the hardware but again still not used.
2109 *
2110 * Return 0 on success or a negative error code otherwise
2111 */
2112 static int vfe_flush_buffers(struct camss_video *vid,
2113 enum vb2_buffer_state state)
2114 {
2115 struct vfe_device *vfe = &vid->camss->vfe;
2116 struct vfe_line *line;
2117 struct vfe_output *output;
2118 unsigned long flags;
2119
2120 line = vfe_video_pad_to_line(&vid->pad);
2121 if (!line) {
2122 dev_err(to_device(vfe), "Can not flush buffers\n");
2123 return -1;
2124 }
2125 output = &line->output;
2126
2127 spin_lock_irqsave(&vfe->output_lock, flags);
2128
2129 vfe_buf_flush_pending(output, state);
2130
2131 if (output->buf[0])
2132 vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
2133
2134 if (output->buf[1])
2135 vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
2136
2137 if (output->last_buffer) {
2138 vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
2139 output->last_buffer = NULL;
2140 }
2141
2142 spin_unlock_irqrestore(&vfe->output_lock, flags);
2143
2144 return 0;
2145 }
2146
2147 /*
2148 * vfe_set_power - Power on/off VFE module
2149 * @sd: VFE V4L2 subdevice
2150 * @on: Requested power state
2151 *
2152 * Return 0 on success or a negative error code otherwise
2153 */
2154 static int vfe_set_power(struct v4l2_subdev *sd, int on)
2155 {
2156 struct vfe_line *line = v4l2_get_subdevdata(sd);
2157 struct vfe_device *vfe = to_vfe(line);
2158 int ret;
2159
2160 if (on) {
2161 u32 hw_version;
2162
2163 ret = vfe_get(vfe);
2164 if (ret < 0)
2165 return ret;
2166
2167 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
2168 dev_dbg(to_device(vfe),
2169 "VFE HW Version = 0x%08x\n", hw_version);
2170 } else {
2171 vfe_put(vfe);
2172 }
2173
2174 return 0;
2175 }
2176
2177 /*
2178 * vfe_set_stream - Enable/disable streaming on VFE module
2179 * @sd: VFE V4L2 subdevice
2180 * @enable: Requested streaming state
2181 *
2182 * Main configuration of VFE module is triggered here.
2183 *
2184 * Return 0 on success or a negative error code otherwise
2185 */
2186 static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
2187 {
2188 struct vfe_line *line = v4l2_get_subdevdata(sd);
2189 struct vfe_device *vfe = to_vfe(line);
2190 int ret;
2191
2192 if (enable) {
2193 ret = vfe_enable(line);
2194 if (ret < 0)
2195 dev_err(to_device(vfe),
2196 "Failed to enable vfe outputs\n");
2197 } else {
2198 ret = vfe_disable(line);
2199 if (ret < 0)
2200 dev_err(to_device(vfe),
2201 "Failed to disable vfe outputs\n");
2202 }
2203
2204 return ret;
2205 }
2206
2207 /*
2208 * __vfe_get_format - Get pointer to format structure
2209 * @line: VFE line
2210 * @cfg: V4L2 subdev pad configuration
2211 * @pad: pad from which format is requested
2212 * @which: TRY or ACTIVE format
2213 *
2214 * Return pointer to TRY or ACTIVE format structure
2215 */
2216 static struct v4l2_mbus_framefmt *
2217 __vfe_get_format(struct vfe_line *line,
2218 struct v4l2_subdev_pad_config *cfg,
2219 unsigned int pad,
2220 enum v4l2_subdev_format_whence which)
2221 {
2222 if (which == V4L2_SUBDEV_FORMAT_TRY)
2223 return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
2224
2225 return &line->fmt[pad];
2226 }
2227
2228 /*
2229 * __vfe_get_compose - Get pointer to compose selection structure
2230 * @line: VFE line
2231 * @cfg: V4L2 subdev pad configuration
2232 * @which: TRY or ACTIVE format
2233 *
2234 * Return pointer to TRY or ACTIVE compose rectangle structure
2235 */
2236 static struct v4l2_rect *
2237 __vfe_get_compose(struct vfe_line *line,
2238 struct v4l2_subdev_pad_config *cfg,
2239 enum v4l2_subdev_format_whence which)
2240 {
2241 if (which == V4L2_SUBDEV_FORMAT_TRY)
2242 return v4l2_subdev_get_try_compose(&line->subdev, cfg,
2243 MSM_VFE_PAD_SINK);
2244
2245 return &line->compose;
2246 }
2247
2248 /*
2249 * __vfe_get_crop - Get pointer to crop selection structure
2250 * @line: VFE line
2251 * @cfg: V4L2 subdev pad configuration
2252 * @which: TRY or ACTIVE format
2253 *
2254 * Return pointer to TRY or ACTIVE crop rectangle structure
2255 */
2256 static struct v4l2_rect *
2257 __vfe_get_crop(struct vfe_line *line,
2258 struct v4l2_subdev_pad_config *cfg,
2259 enum v4l2_subdev_format_whence which)
2260 {
2261 if (which == V4L2_SUBDEV_FORMAT_TRY)
2262 return v4l2_subdev_get_try_crop(&line->subdev, cfg,
2263 MSM_VFE_PAD_SRC);
2264
2265 return &line->crop;
2266 }
2267
2268 /*
2269 * vfe_try_format - Handle try format by pad subdev method
2270 * @line: VFE line
2271 * @cfg: V4L2 subdev pad configuration
2272 * @pad: pad on which format is requested
2273 * @fmt: pointer to v4l2 format structure
2274 * @which: wanted subdev format
2275 */
2276 static void vfe_try_format(struct vfe_line *line,
2277 struct v4l2_subdev_pad_config *cfg,
2278 unsigned int pad,
2279 struct v4l2_mbus_framefmt *fmt,
2280 enum v4l2_subdev_format_whence which)
2281 {
2282 unsigned int i;
2283 u32 code;
2284
2285 switch (pad) {
2286 case MSM_VFE_PAD_SINK:
2287 /* Set format on sink pad */
2288
2289 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
2290 if (fmt->code == vfe_formats[i].code)
2291 break;
2292
2293 /* If not found, use UYVY as default */
2294 if (i >= ARRAY_SIZE(vfe_formats))
2295 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
2296
2297 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
2298 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
2299
2300 fmt->field = V4L2_FIELD_NONE;
2301 fmt->colorspace = V4L2_COLORSPACE_SRGB;
2302
2303 break;
2304
2305 case MSM_VFE_PAD_SRC:
2306 /* Set and return a format same as sink pad */
2307
2308 code = fmt->code;
2309
2310 *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
2311 which);
2312
2313 if (line->id == VFE_LINE_PIX) {
2314 struct v4l2_rect *rect;
2315
2316 rect = __vfe_get_crop(line, cfg, which);
2317
2318 fmt->width = rect->width;
2319 fmt->height = rect->height;
2320
2321 switch (fmt->code) {
2322 case MEDIA_BUS_FMT_YUYV8_2X8:
2323 if (code == MEDIA_BUS_FMT_YUYV8_1_5X8)
2324 fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
2325 else
2326 fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
2327 break;
2328 case MEDIA_BUS_FMT_YVYU8_2X8:
2329 if (code == MEDIA_BUS_FMT_YVYU8_1_5X8)
2330 fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8;
2331 else
2332 fmt->code = MEDIA_BUS_FMT_YVYU8_2X8;
2333 break;
2334 case MEDIA_BUS_FMT_UYVY8_2X8:
2335 default:
2336 if (code == MEDIA_BUS_FMT_UYVY8_1_5X8)
2337 fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8;
2338 else
2339 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
2340 break;
2341 case MEDIA_BUS_FMT_VYUY8_2X8:
2342 if (code == MEDIA_BUS_FMT_VYUY8_1_5X8)
2343 fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8;
2344 else
2345 fmt->code = MEDIA_BUS_FMT_VYUY8_2X8;
2346 break;
2347 }
2348 }
2349
2350 break;
2351 }
2352
2353 fmt->colorspace = V4L2_COLORSPACE_SRGB;
2354 }
2355
2356 /*
2357 * vfe_try_compose - Handle try compose selection by pad subdev method
2358 * @line: VFE line
2359 * @cfg: V4L2 subdev pad configuration
2360 * @rect: pointer to v4l2 rect structure
2361 * @which: wanted subdev format
2362 */
2363 static void vfe_try_compose(struct vfe_line *line,
2364 struct v4l2_subdev_pad_config *cfg,
2365 struct v4l2_rect *rect,
2366 enum v4l2_subdev_format_whence which)
2367 {
2368 struct v4l2_mbus_framefmt *fmt;
2369
2370 fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
2371
2372 if (rect->width > fmt->width)
2373 rect->width = fmt->width;
2374
2375 if (rect->height > fmt->height)
2376 rect->height = fmt->height;
2377
2378 if (fmt->width > rect->width * SCALER_RATIO_MAX)
2379 rect->width = (fmt->width + SCALER_RATIO_MAX - 1) /
2380 SCALER_RATIO_MAX;
2381
2382 rect->width &= ~0x1;
2383
2384 if (fmt->height > rect->height * SCALER_RATIO_MAX)
2385 rect->height = (fmt->height + SCALER_RATIO_MAX - 1) /
2386 SCALER_RATIO_MAX;
2387
2388 if (rect->width < 16)
2389 rect->width = 16;
2390
2391 if (rect->height < 4)
2392 rect->height = 4;
2393 }
2394
2395 /*
2396 * vfe_try_crop - Handle try crop selection by pad subdev method
2397 * @line: VFE line
2398 * @cfg: V4L2 subdev pad configuration
2399 * @rect: pointer to v4l2 rect structure
2400 * @which: wanted subdev format
2401 */
2402 static void vfe_try_crop(struct vfe_line *line,
2403 struct v4l2_subdev_pad_config *cfg,
2404 struct v4l2_rect *rect,
2405 enum v4l2_subdev_format_whence which)
2406 {
2407 struct v4l2_rect *compose;
2408
2409 compose = __vfe_get_compose(line, cfg, which);
2410
2411 if (rect->width > compose->width)
2412 rect->width = compose->width;
2413
2414 if (rect->width + rect->left > compose->width)
2415 rect->left = compose->width - rect->width;
2416
2417 if (rect->height > compose->height)
2418 rect->height = compose->height;
2419
2420 if (rect->height + rect->top > compose->height)
2421 rect->top = compose->height - rect->height;
2422
2423 /* wm in line based mode writes multiple of 16 horizontally */
2424 rect->left += (rect->width & 0xf) >> 1;
2425 rect->width &= ~0xf;
2426
2427 if (rect->width < 16) {
2428 rect->left = 0;
2429 rect->width = 16;
2430 }
2431
2432 if (rect->height < 4) {
2433 rect->top = 0;
2434 rect->height = 4;
2435 }
2436 }
2437
2438 /*
2439 * vfe_enum_mbus_code - Handle pixel format enumeration
2440 * @sd: VFE V4L2 subdevice
2441 * @cfg: V4L2 subdev pad configuration
2442 * @code: pointer to v4l2_subdev_mbus_code_enum structure
2443 *
2444 * return -EINVAL or zero on success
2445 */
2446 static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
2447 struct v4l2_subdev_pad_config *cfg,
2448 struct v4l2_subdev_mbus_code_enum *code)
2449 {
2450 struct vfe_line *line = v4l2_get_subdevdata(sd);
2451 struct v4l2_mbus_framefmt *format;
2452
2453 if (code->pad == MSM_VFE_PAD_SINK) {
2454 if (code->index >= ARRAY_SIZE(vfe_formats))
2455 return -EINVAL;
2456
2457 code->code = vfe_formats[code->index].code;
2458 } else {
2459 if (code->index > 0)
2460 return -EINVAL;
2461
2462 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
2463 code->which);
2464
2465 code->code = format->code;
2466 }
2467
2468 return 0;
2469 }
2470
2471 /*
2472 * vfe_enum_frame_size - Handle frame size enumeration
2473 * @sd: VFE V4L2 subdevice
2474 * @cfg: V4L2 subdev pad configuration
2475 * @fse: pointer to v4l2_subdev_frame_size_enum structure
2476 *
2477 * Return -EINVAL or zero on success
2478 */
2479 static int vfe_enum_frame_size(struct v4l2_subdev *sd,
2480 struct v4l2_subdev_pad_config *cfg,
2481 struct v4l2_subdev_frame_size_enum *fse)
2482 {
2483 struct vfe_line *line = v4l2_get_subdevdata(sd);
2484 struct v4l2_mbus_framefmt format;
2485
2486 if (fse->index != 0)
2487 return -EINVAL;
2488
2489 format.code = fse->code;
2490 format.width = 1;
2491 format.height = 1;
2492 vfe_try_format(line, cfg, fse->pad, &format, fse->which);
2493 fse->min_width = format.width;
2494 fse->min_height = format.height;
2495
2496 if (format.code != fse->code)
2497 return -EINVAL;
2498
2499 format.code = fse->code;
2500 format.width = -1;
2501 format.height = -1;
2502 vfe_try_format(line, cfg, fse->pad, &format, fse->which);
2503 fse->max_width = format.width;
2504 fse->max_height = format.height;
2505
2506 return 0;
2507 }
2508
2509 /*
2510 * vfe_get_format - Handle get format by pads subdev method
2511 * @sd: VFE V4L2 subdevice
2512 * @cfg: V4L2 subdev pad configuration
2513 * @fmt: pointer to v4l2 subdev format structure
2514 *
2515 * Return -EINVAL or zero on success
2516 */
2517 static int vfe_get_format(struct v4l2_subdev *sd,
2518 struct v4l2_subdev_pad_config *cfg,
2519 struct v4l2_subdev_format *fmt)
2520 {
2521 struct vfe_line *line = v4l2_get_subdevdata(sd);
2522 struct v4l2_mbus_framefmt *format;
2523
2524 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
2525 if (format == NULL)
2526 return -EINVAL;
2527
2528 fmt->format = *format;
2529
2530 return 0;
2531 }
2532
2533 static int vfe_set_selection(struct v4l2_subdev *sd,
2534 struct v4l2_subdev_pad_config *cfg,
2535 struct v4l2_subdev_selection *sel);
2536
2537 /*
2538 * vfe_set_format - Handle set format by pads subdev method
2539 * @sd: VFE V4L2 subdevice
2540 * @cfg: V4L2 subdev pad configuration
2541 * @fmt: pointer to v4l2 subdev format structure
2542 *
2543 * Return -EINVAL or zero on success
2544 */
2545 static int vfe_set_format(struct v4l2_subdev *sd,
2546 struct v4l2_subdev_pad_config *cfg,
2547 struct v4l2_subdev_format *fmt)
2548 {
2549 struct vfe_line *line = v4l2_get_subdevdata(sd);
2550 struct v4l2_mbus_framefmt *format;
2551
2552 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
2553 if (format == NULL)
2554 return -EINVAL;
2555
2556 vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
2557 *format = fmt->format;
2558
2559 if (fmt->pad == MSM_VFE_PAD_SINK) {
2560 struct v4l2_subdev_selection sel = { 0 };
2561 int ret;
2562
2563 /* Propagate the format from sink to source */
2564 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
2565 fmt->which);
2566
2567 *format = fmt->format;
2568 vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
2569 fmt->which);
2570
2571 if (line->id != VFE_LINE_PIX)
2572 return 0;
2573
2574 /* Reset sink pad compose selection */
2575 sel.which = fmt->which;
2576 sel.pad = MSM_VFE_PAD_SINK;
2577 sel.target = V4L2_SEL_TGT_COMPOSE;
2578 sel.r.width = fmt->format.width;
2579 sel.r.height = fmt->format.height;
2580 ret = vfe_set_selection(sd, cfg, &sel);
2581 if (ret < 0)
2582 return ret;
2583 }
2584
2585 return 0;
2586 }
2587
2588 /*
2589 * vfe_get_selection - Handle get selection by pads subdev method
2590 * @sd: VFE V4L2 subdevice
2591 * @cfg: V4L2 subdev pad configuration
2592 * @sel: pointer to v4l2 subdev selection structure
2593 *
2594 * Return -EINVAL or zero on success
2595 */
2596 static int vfe_get_selection(struct v4l2_subdev *sd,
2597 struct v4l2_subdev_pad_config *cfg,
2598 struct v4l2_subdev_selection *sel)
2599 {
2600 struct vfe_line *line = v4l2_get_subdevdata(sd);
2601 struct v4l2_subdev_format fmt = { 0 };
2602 struct v4l2_rect *rect;
2603 int ret;
2604
2605 if (line->id != VFE_LINE_PIX)
2606 return -EINVAL;
2607
2608 if (sel->pad == MSM_VFE_PAD_SINK)
2609 switch (sel->target) {
2610 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
2611 fmt.pad = sel->pad;
2612 fmt.which = sel->which;
2613 ret = vfe_get_format(sd, cfg, &fmt);
2614 if (ret < 0)
2615 return ret;
2616
2617 sel->r.left = 0;
2618 sel->r.top = 0;
2619 sel->r.width = fmt.format.width;
2620 sel->r.height = fmt.format.height;
2621 break;
2622 case V4L2_SEL_TGT_COMPOSE:
2623 rect = __vfe_get_compose(line, cfg, sel->which);
2624 if (rect == NULL)
2625 return -EINVAL;
2626
2627 sel->r = *rect;
2628 break;
2629 default:
2630 return -EINVAL;
2631 }
2632 else if (sel->pad == MSM_VFE_PAD_SRC)
2633 switch (sel->target) {
2634 case V4L2_SEL_TGT_CROP_BOUNDS:
2635 rect = __vfe_get_compose(line, cfg, sel->which);
2636 if (rect == NULL)
2637 return -EINVAL;
2638
2639 sel->r.left = rect->left;
2640 sel->r.top = rect->top;
2641 sel->r.width = rect->width;
2642 sel->r.height = rect->height;
2643 break;
2644 case V4L2_SEL_TGT_CROP:
2645 rect = __vfe_get_crop(line, cfg, sel->which);
2646 if (rect == NULL)
2647 return -EINVAL;
2648
2649 sel->r = *rect;
2650 break;
2651 default:
2652 return -EINVAL;
2653 }
2654
2655 return 0;
2656 }
2657
2658 /*
2659 * vfe_set_selection - Handle set selection by pads subdev method
2660 * @sd: VFE V4L2 subdevice
2661 * @cfg: V4L2 subdev pad configuration
2662 * @sel: pointer to v4l2 subdev selection structure
2663 *
2664 * Return -EINVAL or zero on success
2665 */
2666 static int vfe_set_selection(struct v4l2_subdev *sd,
2667 struct v4l2_subdev_pad_config *cfg,
2668 struct v4l2_subdev_selection *sel)
2669 {
2670 struct vfe_line *line = v4l2_get_subdevdata(sd);
2671 struct v4l2_rect *rect;
2672 int ret;
2673
2674 if (line->id != VFE_LINE_PIX)
2675 return -EINVAL;
2676
2677 if (sel->target == V4L2_SEL_TGT_COMPOSE &&
2678 sel->pad == MSM_VFE_PAD_SINK) {
2679 struct v4l2_subdev_selection crop = { 0 };
2680
2681 rect = __vfe_get_compose(line, cfg, sel->which);
2682 if (rect == NULL)
2683 return -EINVAL;
2684
2685 vfe_try_compose(line, cfg, &sel->r, sel->which);
2686 *rect = sel->r;
2687
2688 /* Reset source crop selection */
2689 crop.which = sel->which;
2690 crop.pad = MSM_VFE_PAD_SRC;
2691 crop.target = V4L2_SEL_TGT_CROP;
2692 crop.r = *rect;
2693 ret = vfe_set_selection(sd, cfg, &crop);
2694 } else if (sel->target == V4L2_SEL_TGT_CROP &&
2695 sel->pad == MSM_VFE_PAD_SRC) {
2696 struct v4l2_subdev_format fmt = { 0 };
2697
2698 rect = __vfe_get_crop(line, cfg, sel->which);
2699 if (rect == NULL)
2700 return -EINVAL;
2701
2702 vfe_try_crop(line, cfg, &sel->r, sel->which);
2703 *rect = sel->r;
2704
2705 /* Reset source pad format width and height */
2706 fmt.which = sel->which;
2707 fmt.pad = MSM_VFE_PAD_SRC;
2708 ret = vfe_get_format(sd, cfg, &fmt);
2709 if (ret < 0)
2710 return ret;
2711
2712 fmt.format.width = rect->width;
2713 fmt.format.height = rect->height;
2714 ret = vfe_set_format(sd, cfg, &fmt);
2715 } else {
2716 ret = -EINVAL;
2717 }
2718
2719 return ret;
2720 }
2721
2722 /*
2723 * vfe_init_formats - Initialize formats on all pads
2724 * @sd: VFE V4L2 subdevice
2725 * @fh: V4L2 subdev file handle
2726 *
2727 * Initialize all pad formats with default values.
2728 *
2729 * Return 0 on success or a negative error code otherwise
2730 */
2731 static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2732 {
2733 struct v4l2_subdev_format format = {
2734 .pad = MSM_VFE_PAD_SINK,
2735 .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
2736 V4L2_SUBDEV_FORMAT_ACTIVE,
2737 .format = {
2738 .code = MEDIA_BUS_FMT_UYVY8_2X8,
2739 .width = 1920,
2740 .height = 1080
2741 }
2742 };
2743
2744 return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
2745 }
2746
2747 /*
2748 * msm_vfe_subdev_init - Initialize VFE device structure and resources
2749 * @vfe: VFE device
2750 * @res: VFE module resources table
2751 *
2752 * Return 0 on success or a negative error code otherwise
2753 */
2754 int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
2755 {
2756 struct device *dev = to_device(vfe);
2757 struct platform_device *pdev = to_platform_device(dev);
2758 struct resource *r;
2759 struct camss *camss = to_camss(vfe);
2760 int i, j;
2761 int ret;
2762
2763 /* Memory */
2764
2765 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
2766 vfe->base = devm_ioremap_resource(dev, r);
2767 if (IS_ERR(vfe->base)) {
2768 dev_err(dev, "could not map memory\n");
2769 return PTR_ERR(vfe->base);
2770 }
2771
2772 /* Interrupt */
2773
2774 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
2775 res->interrupt[0]);
2776 if (!r) {
2777 dev_err(dev, "missing IRQ\n");
2778 return -EINVAL;
2779 }
2780
2781 vfe->irq = r->start;
2782 snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
2783 dev_name(dev), MSM_VFE_NAME, vfe->id);
2784 ret = devm_request_irq(dev, vfe->irq, vfe_isr,
2785 IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
2786 if (ret < 0) {
2787 dev_err(dev, "request_irq failed: %d\n", ret);
2788 return ret;
2789 }
2790
2791 /* Clocks */
2792
2793 vfe->nclocks = 0;
2794 while (res->clock[vfe->nclocks])
2795 vfe->nclocks++;
2796
2797 vfe->clock = devm_kzalloc(dev, vfe->nclocks * sizeof(*vfe->clock),
2798 GFP_KERNEL);
2799 if (!vfe->clock)
2800 return -ENOMEM;
2801
2802 for (i = 0; i < vfe->nclocks; i++) {
2803 struct camss_clock *clock = &vfe->clock[i];
2804
2805 clock->clk = devm_clk_get(dev, res->clock[i]);
2806 if (IS_ERR(clock->clk))
2807 return PTR_ERR(clock->clk);
2808
2809 clock->name = res->clock[i];
2810
2811 clock->nfreqs = 0;
2812 while (res->clock_rate[i][clock->nfreqs])
2813 clock->nfreqs++;
2814
2815 if (!clock->nfreqs) {
2816 clock->freq = NULL;
2817 continue;
2818 }
2819
2820 clock->freq = devm_kzalloc(dev, clock->nfreqs *
2821 sizeof(*clock->freq), GFP_KERNEL);
2822 if (!clock->freq)
2823 return -ENOMEM;
2824
2825 for (j = 0; j < clock->nfreqs; j++)
2826 clock->freq[j] = res->clock_rate[i][j];
2827 }
2828
2829 mutex_init(&vfe->power_lock);
2830 vfe->power_count = 0;
2831
2832 mutex_init(&vfe->stream_lock);
2833 vfe->stream_count = 0;
2834
2835 spin_lock_init(&vfe->output_lock);
2836
2837 vfe->id = 0;
2838 vfe->reg_update = 0;
2839
2840 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
2841 vfe->line[i].video_out.type =
2842 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2843 vfe->line[i].video_out.camss = camss;
2844 vfe->line[i].id = i;
2845 init_completion(&vfe->line[i].output.sof);
2846 init_completion(&vfe->line[i].output.reg_update);
2847 }
2848
2849 init_completion(&vfe->reset_complete);
2850 init_completion(&vfe->halt_complete);
2851
2852 return 0;
2853 }
2854
2855 /*
2856 * msm_vfe_get_vfe_id - Get VFE HW module id
2857 * @entity: Pointer to VFE media entity structure
2858 * @id: Return CSID HW module id here
2859 */
2860 void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id)
2861 {
2862 struct v4l2_subdev *sd;
2863 struct vfe_line *line;
2864 struct vfe_device *vfe;
2865
2866 sd = media_entity_to_v4l2_subdev(entity);
2867 line = v4l2_get_subdevdata(sd);
2868 vfe = to_vfe(line);
2869
2870 *id = vfe->id;
2871 }
2872
2873 /*
2874 * msm_vfe_get_vfe_line_id - Get VFE line id by media entity
2875 * @entity: Pointer to VFE media entity structure
2876 * @id: Return VFE line id here
2877 */
2878 void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id)
2879 {
2880 struct v4l2_subdev *sd;
2881 struct vfe_line *line;
2882
2883 sd = media_entity_to_v4l2_subdev(entity);
2884 line = v4l2_get_subdevdata(sd);
2885
2886 *id = line->id;
2887 }
2888
2889 /*
2890 * vfe_link_setup - Setup VFE connections
2891 * @entity: Pointer to media entity structure
2892 * @local: Pointer to local pad
2893 * @remote: Pointer to remote pad
2894 * @flags: Link flags
2895 *
2896 * Return 0 on success
2897 */
2898 static int vfe_link_setup(struct media_entity *entity,
2899 const struct media_pad *local,
2900 const struct media_pad *remote, u32 flags)
2901 {
2902 if (flags & MEDIA_LNK_FL_ENABLED)
2903 if (media_entity_remote_pad(local))
2904 return -EBUSY;
2905
2906 return 0;
2907 }
2908
2909 static const struct v4l2_subdev_core_ops vfe_core_ops = {
2910 .s_power = vfe_set_power,
2911 };
2912
2913 static const struct v4l2_subdev_video_ops vfe_video_ops = {
2914 .s_stream = vfe_set_stream,
2915 };
2916
2917 static const struct v4l2_subdev_pad_ops vfe_pad_ops = {
2918 .enum_mbus_code = vfe_enum_mbus_code,
2919 .enum_frame_size = vfe_enum_frame_size,
2920 .get_fmt = vfe_get_format,
2921 .set_fmt = vfe_set_format,
2922 .get_selection = vfe_get_selection,
2923 .set_selection = vfe_set_selection,
2924 };
2925
2926 static const struct v4l2_subdev_ops vfe_v4l2_ops = {
2927 .core = &vfe_core_ops,
2928 .video = &vfe_video_ops,
2929 .pad = &vfe_pad_ops,
2930 };
2931
2932 static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = {
2933 .open = vfe_init_formats,
2934 };
2935
2936 static const struct media_entity_operations vfe_media_ops = {
2937 .link_setup = vfe_link_setup,
2938 .link_validate = v4l2_subdev_link_validate,
2939 };
2940
2941 static const struct camss_video_ops camss_vfe_video_ops = {
2942 .queue_buffer = vfe_queue_buffer,
2943 .flush_buffers = vfe_flush_buffers,
2944 };
2945
2946 void msm_vfe_stop_streaming(struct vfe_device *vfe)
2947 {
2948 int i;
2949
2950 for (i = 0; i < ARRAY_SIZE(vfe->line); i++)
2951 msm_video_stop_streaming(&vfe->line[i].video_out);
2952 }
2953
2954 /*
2955 * msm_vfe_register_entities - Register subdev node for VFE module
2956 * @vfe: VFE device
2957 * @v4l2_dev: V4L2 device
2958 *
2959 * Initialize and register a subdev node for the VFE module. Then
2960 * call msm_video_register() to register the video device node which
2961 * will be connected to this subdev node. Then actually create the
2962 * media link between them.
2963 *
2964 * Return 0 on success or a negative error code otherwise
2965 */
2966 int msm_vfe_register_entities(struct vfe_device *vfe,
2967 struct v4l2_device *v4l2_dev)
2968 {
2969 struct device *dev = to_device(vfe);
2970 struct v4l2_subdev *sd;
2971 struct media_pad *pads;
2972 struct camss_video *video_out;
2973 int ret;
2974 int i;
2975
2976 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
2977 char name[32];
2978
2979 sd = &vfe->line[i].subdev;
2980 pads = vfe->line[i].pads;
2981 video_out = &vfe->line[i].video_out;
2982
2983 v4l2_subdev_init(sd, &vfe_v4l2_ops);
2984 sd->internal_ops = &vfe_v4l2_internal_ops;
2985 sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
2986 if (i == VFE_LINE_PIX)
2987 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
2988 MSM_VFE_NAME, vfe->id, "pix");
2989 else
2990 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d",
2991 MSM_VFE_NAME, vfe->id, "rdi", i);
2992
2993 v4l2_set_subdevdata(sd, &vfe->line[i]);
2994
2995 ret = vfe_init_formats(sd, NULL);
2996 if (ret < 0) {
2997 dev_err(dev, "Failed to init format: %d\n", ret);
2998 goto error_init;
2999 }
3000
3001 pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
3002 pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
3003
3004 sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
3005 sd->entity.ops = &vfe_media_ops;
3006 ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM,
3007 pads);
3008 if (ret < 0) {
3009 dev_err(dev, "Failed to init media entity: %d\n", ret);
3010 goto error_init;
3011 }
3012
3013 ret = v4l2_device_register_subdev(v4l2_dev, sd);
3014 if (ret < 0) {
3015 dev_err(dev, "Failed to register subdev: %d\n", ret);
3016 goto error_reg_subdev;
3017 }
3018
3019 video_out->ops = &camss_vfe_video_ops;
3020 video_out->bpl_alignment = 8;
3021 video_out->line_based = 0;
3022 if (i == VFE_LINE_PIX) {
3023 video_out->bpl_alignment = 16;
3024 video_out->line_based = 1;
3025 }
3026 snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d",
3027 MSM_VFE_NAME, vfe->id, "video", i);
3028 ret = msm_video_register(video_out, v4l2_dev, name,
3029 i == VFE_LINE_PIX ? 1 : 0);
3030 if (ret < 0) {
3031 dev_err(dev, "Failed to register video node: %d\n",
3032 ret);
3033 goto error_reg_video;
3034 }
3035
3036 ret = media_create_pad_link(
3037 &sd->entity, MSM_VFE_PAD_SRC,
3038 &video_out->vdev.entity, 0,
3039 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
3040 if (ret < 0) {
3041 dev_err(dev, "Failed to link %s->%s entities: %d\n",
3042 sd->entity.name, video_out->vdev.entity.name,
3043 ret);
3044 goto error_link;
3045 }
3046 }
3047
3048 return 0;
3049
3050 error_link:
3051 msm_video_unregister(video_out);
3052
3053 error_reg_video:
3054 v4l2_device_unregister_subdev(sd);
3055
3056 error_reg_subdev:
3057 media_entity_cleanup(&sd->entity);
3058
3059 error_init:
3060 for (i--; i >= 0; i--) {
3061 sd = &vfe->line[i].subdev;
3062 video_out = &vfe->line[i].video_out;
3063
3064 msm_video_unregister(video_out);
3065 v4l2_device_unregister_subdev(sd);
3066 media_entity_cleanup(&sd->entity);
3067 }
3068
3069 return ret;
3070 }
3071
3072 /*
3073 * msm_vfe_unregister_entities - Unregister VFE module subdev node
3074 * @vfe: VFE device
3075 */
3076 void msm_vfe_unregister_entities(struct vfe_device *vfe)
3077 {
3078 int i;
3079
3080 mutex_destroy(&vfe->power_lock);
3081 mutex_destroy(&vfe->stream_lock);
3082
3083 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
3084 struct v4l2_subdev *sd = &vfe->line[i].subdev;
3085 struct camss_video *video_out = &vfe->line[i].video_out;
3086
3087 msm_video_unregister(video_out);
3088 v4l2_device_unregister_subdev(sd);
3089 media_entity_cleanup(&sd->entity);
3090 }
3091 }