2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_crtc_helper.h>
20 #include <drm/drm_plane_helper.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/clk.h>
27 #include <linux/of_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/component.h>
31 #include <linux/reset.h>
32 #include <linux/delay.h>
34 #include "rockchip_drm_drv.h"
35 #include "rockchip_drm_gem.h"
36 #include "rockchip_drm_fb.h"
37 #include "rockchip_drm_vop.h"
39 #define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
40 vop_mask_write(x, off, mask, shift, v, write_mask, true)
42 #define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
43 vop_mask_write(x, off, mask, shift, v, write_mask, false)
45 #define REG_SET(x, base, reg, v, mode) \
46 __REG_SET_##mode(x, base + reg.offset, \
47 reg.mask, reg.shift, v, reg.write_mask)
48 #define REG_SET_MASK(x, base, reg, mask, v, mode) \
49 __REG_SET_##mode(x, base + reg.offset, \
50 mask, reg.shift, v, reg.write_mask)
52 #define VOP_WIN_SET(x, win, name, v) \
53 REG_SET(x, win->base, win->phy->name, v, RELAXED)
54 #define VOP_SCL_SET(x, win, name, v) \
55 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
56 #define VOP_SCL_SET_EXT(x, win, name, v) \
57 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
58 #define VOP_CTRL_SET(x, name, v) \
59 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
61 #define VOP_INTR_GET(vop, name) \
62 vop_read_reg(vop, 0, &vop->data->ctrl->name)
64 #define VOP_INTR_SET(vop, name, mask, v) \
65 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
66 #define VOP_INTR_SET_TYPE(vop, name, type, v) \
68 int i, reg = 0, mask = 0; \
69 for (i = 0; i < vop->data->intr->nintrs; i++) { \
70 if (vop->data->intr->intrs[i] & type) { \
75 VOP_INTR_SET(vop, name, mask, reg); \
77 #define VOP_INTR_GET_TYPE(vop, name, type) \
78 vop_get_intr_type(vop, &vop->data->intr->name, type)
80 #define VOP_WIN_GET(x, win, name) \
81 vop_read_reg(x, win->base, &win->phy->name)
83 #define VOP_WIN_GET_YRGBADDR(vop, win) \
84 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
86 #define to_vop(x) container_of(x, struct vop, crtc)
87 #define to_vop_win(x) container_of(x, struct vop_win, base)
88 #define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
90 struct vop_plane_state
{
91 struct drm_plane_state base
;
98 struct drm_plane base
;
99 const struct vop_win_data
*data
;
102 /* protected by dev->event_lock */
108 struct drm_crtc crtc
;
110 struct drm_device
*drm_dev
;
113 /* mutex vsync_ work */
114 struct mutex vsync_mutex
;
115 bool vsync_work_pending
;
116 struct completion dsp_hold_completion
;
117 struct completion wait_update_complete
;
119 /* protected by dev->event_lock */
120 struct drm_pending_vblank_event
*event
;
122 const struct vop_data
*data
;
127 /* physical map length of vop register */
130 /* one time only one process allowed to config the register */
132 /* lock vop irq reg */
141 /* vop share memory frequency */
145 struct reset_control
*dclk_rst
;
147 struct vop_win win
[];
150 static inline void vop_writel(struct vop
*vop
, uint32_t offset
, uint32_t v
)
152 writel(v
, vop
->regs
+ offset
);
153 vop
->regsbak
[offset
>> 2] = v
;
156 static inline uint32_t vop_readl(struct vop
*vop
, uint32_t offset
)
158 return readl(vop
->regs
+ offset
);
161 static inline uint32_t vop_read_reg(struct vop
*vop
, uint32_t base
,
162 const struct vop_reg
*reg
)
164 return (vop_readl(vop
, base
+ reg
->offset
) >> reg
->shift
) & reg
->mask
;
167 static inline void vop_mask_write(struct vop
*vop
, uint32_t offset
,
168 uint32_t mask
, uint32_t shift
, uint32_t v
,
169 bool write_mask
, bool relaxed
)
175 v
= ((v
<< shift
) & 0xffff) | (mask
<< (shift
+ 16));
177 uint32_t cached_val
= vop
->regsbak
[offset
>> 2];
179 v
= (cached_val
& ~(mask
<< shift
)) | ((v
& mask
) << shift
);
180 vop
->regsbak
[offset
>> 2] = v
;
184 writel_relaxed(v
, vop
->regs
+ offset
);
186 writel(v
, vop
->regs
+ offset
);
189 static inline uint32_t vop_get_intr_type(struct vop
*vop
,
190 const struct vop_reg
*reg
, int type
)
193 uint32_t regs
= vop_read_reg(vop
, 0, reg
);
195 for (i
= 0; i
< vop
->data
->intr
->nintrs
; i
++) {
196 if ((type
& vop
->data
->intr
->intrs
[i
]) && (regs
& 1 << i
))
197 ret
|= vop
->data
->intr
->intrs
[i
];
203 static inline void vop_cfg_done(struct vop
*vop
)
205 VOP_CTRL_SET(vop
, cfg_done
, 1);
208 static bool has_rb_swapped(uint32_t format
)
211 case DRM_FORMAT_XBGR8888
:
212 case DRM_FORMAT_ABGR8888
:
213 case DRM_FORMAT_BGR888
:
214 case DRM_FORMAT_BGR565
:
221 static enum vop_data_format
vop_convert_format(uint32_t format
)
224 case DRM_FORMAT_XRGB8888
:
225 case DRM_FORMAT_ARGB8888
:
226 case DRM_FORMAT_XBGR8888
:
227 case DRM_FORMAT_ABGR8888
:
228 return VOP_FMT_ARGB8888
;
229 case DRM_FORMAT_RGB888
:
230 case DRM_FORMAT_BGR888
:
231 return VOP_FMT_RGB888
;
232 case DRM_FORMAT_RGB565
:
233 case DRM_FORMAT_BGR565
:
234 return VOP_FMT_RGB565
;
235 case DRM_FORMAT_NV12
:
236 return VOP_FMT_YUV420SP
;
237 case DRM_FORMAT_NV16
:
238 return VOP_FMT_YUV422SP
;
239 case DRM_FORMAT_NV24
:
240 return VOP_FMT_YUV444SP
;
242 DRM_ERROR("unsupport format[%08x]\n", format
);
247 static bool is_yuv_support(uint32_t format
)
250 case DRM_FORMAT_NV12
:
251 case DRM_FORMAT_NV16
:
252 case DRM_FORMAT_NV24
:
259 static bool is_alpha_support(uint32_t format
)
262 case DRM_FORMAT_ARGB8888
:
263 case DRM_FORMAT_ABGR8888
:
270 static uint16_t scl_vop_cal_scale(enum scale_mode mode
, uint32_t src
,
271 uint32_t dst
, bool is_horizontal
,
272 int vsu_mode
, int *vskiplines
)
274 uint16_t val
= 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT
;
277 if (mode
== SCALE_UP
)
278 val
= GET_SCL_FT_BIC(src
, dst
);
279 else if (mode
== SCALE_DOWN
)
280 val
= GET_SCL_FT_BILI_DN(src
, dst
);
282 if (mode
== SCALE_UP
) {
283 if (vsu_mode
== SCALE_UP_BIL
)
284 val
= GET_SCL_FT_BILI_UP(src
, dst
);
286 val
= GET_SCL_FT_BIC(src
, dst
);
287 } else if (mode
== SCALE_DOWN
) {
289 *vskiplines
= scl_get_vskiplines(src
, dst
);
290 val
= scl_get_bili_dn_vskip(src
, dst
,
293 val
= GET_SCL_FT_BILI_DN(src
, dst
);
301 static void scl_vop_cal_scl_fac(struct vop
*vop
, const struct vop_win_data
*win
,
302 uint32_t src_w
, uint32_t src_h
, uint32_t dst_w
,
303 uint32_t dst_h
, uint32_t pixel_format
)
305 uint16_t yrgb_hor_scl_mode
, yrgb_ver_scl_mode
;
306 uint16_t cbcr_hor_scl_mode
= SCALE_NONE
;
307 uint16_t cbcr_ver_scl_mode
= SCALE_NONE
;
308 int hsub
= drm_format_horz_chroma_subsampling(pixel_format
);
309 int vsub
= drm_format_vert_chroma_subsampling(pixel_format
);
310 bool is_yuv
= is_yuv_support(pixel_format
);
311 uint16_t cbcr_src_w
= src_w
/ hsub
;
312 uint16_t cbcr_src_h
= src_h
/ vsub
;
319 DRM_ERROR("Maximum destination width (3840) exceeded\n");
323 if (!win
->phy
->scl
->ext
) {
324 VOP_SCL_SET(vop
, win
, scale_yrgb_x
,
325 scl_cal_scale2(src_w
, dst_w
));
326 VOP_SCL_SET(vop
, win
, scale_yrgb_y
,
327 scl_cal_scale2(src_h
, dst_h
));
329 VOP_SCL_SET(vop
, win
, scale_cbcr_x
,
330 scl_cal_scale2(cbcr_src_w
, dst_w
));
331 VOP_SCL_SET(vop
, win
, scale_cbcr_y
,
332 scl_cal_scale2(cbcr_src_h
, dst_h
));
337 yrgb_hor_scl_mode
= scl_get_scl_mode(src_w
, dst_w
);
338 yrgb_ver_scl_mode
= scl_get_scl_mode(src_h
, dst_h
);
341 cbcr_hor_scl_mode
= scl_get_scl_mode(cbcr_src_w
, dst_w
);
342 cbcr_ver_scl_mode
= scl_get_scl_mode(cbcr_src_h
, dst_h
);
343 if (cbcr_hor_scl_mode
== SCALE_DOWN
)
344 lb_mode
= scl_vop_cal_lb_mode(dst_w
, true);
346 lb_mode
= scl_vop_cal_lb_mode(cbcr_src_w
, true);
348 if (yrgb_hor_scl_mode
== SCALE_DOWN
)
349 lb_mode
= scl_vop_cal_lb_mode(dst_w
, false);
351 lb_mode
= scl_vop_cal_lb_mode(src_w
, false);
354 VOP_SCL_SET_EXT(vop
, win
, lb_mode
, lb_mode
);
355 if (lb_mode
== LB_RGB_3840X2
) {
356 if (yrgb_ver_scl_mode
!= SCALE_NONE
) {
357 DRM_ERROR("ERROR : not allow yrgb ver scale\n");
360 if (cbcr_ver_scl_mode
!= SCALE_NONE
) {
361 DRM_ERROR("ERROR : not allow cbcr ver scale\n");
364 vsu_mode
= SCALE_UP_BIL
;
365 } else if (lb_mode
== LB_RGB_2560X4
) {
366 vsu_mode
= SCALE_UP_BIL
;
368 vsu_mode
= SCALE_UP_BIC
;
371 val
= scl_vop_cal_scale(yrgb_hor_scl_mode
, src_w
, dst_w
,
373 VOP_SCL_SET(vop
, win
, scale_yrgb_x
, val
);
374 val
= scl_vop_cal_scale(yrgb_ver_scl_mode
, src_h
, dst_h
,
375 false, vsu_mode
, &vskiplines
);
376 VOP_SCL_SET(vop
, win
, scale_yrgb_y
, val
);
378 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt4
, vskiplines
== 4);
379 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt2
, vskiplines
== 2);
381 VOP_SCL_SET_EXT(vop
, win
, yrgb_hor_scl_mode
, yrgb_hor_scl_mode
);
382 VOP_SCL_SET_EXT(vop
, win
, yrgb_ver_scl_mode
, yrgb_ver_scl_mode
);
383 VOP_SCL_SET_EXT(vop
, win
, yrgb_hsd_mode
, SCALE_DOWN_BIL
);
384 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsd_mode
, SCALE_DOWN_BIL
);
385 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsu_mode
, vsu_mode
);
387 val
= scl_vop_cal_scale(cbcr_hor_scl_mode
, cbcr_src_w
,
388 dst_w
, true, 0, NULL
);
389 VOP_SCL_SET(vop
, win
, scale_cbcr_x
, val
);
390 val
= scl_vop_cal_scale(cbcr_ver_scl_mode
, cbcr_src_h
,
391 dst_h
, false, vsu_mode
, &vskiplines
);
392 VOP_SCL_SET(vop
, win
, scale_cbcr_y
, val
);
394 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt4
, vskiplines
== 4);
395 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt2
, vskiplines
== 2);
396 VOP_SCL_SET_EXT(vop
, win
, cbcr_hor_scl_mode
, cbcr_hor_scl_mode
);
397 VOP_SCL_SET_EXT(vop
, win
, cbcr_ver_scl_mode
, cbcr_ver_scl_mode
);
398 VOP_SCL_SET_EXT(vop
, win
, cbcr_hsd_mode
, SCALE_DOWN_BIL
);
399 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsd_mode
, SCALE_DOWN_BIL
);
400 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsu_mode
, vsu_mode
);
404 static void vop_dsp_hold_valid_irq_enable(struct vop
*vop
)
408 if (WARN_ON(!vop
->is_enabled
))
411 spin_lock_irqsave(&vop
->irq_lock
, flags
);
413 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 1);
415 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
418 static void vop_dsp_hold_valid_irq_disable(struct vop
*vop
)
422 if (WARN_ON(!vop
->is_enabled
))
425 spin_lock_irqsave(&vop
->irq_lock
, flags
);
427 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 0);
429 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
432 static void vop_enable(struct drm_crtc
*crtc
)
434 struct vop
*vop
= to_vop(crtc
);
437 ret
= pm_runtime_get_sync(vop
->dev
);
439 dev_err(vop
->dev
, "failed to get pm runtime: %d\n", ret
);
443 ret
= clk_enable(vop
->hclk
);
445 dev_err(vop
->dev
, "failed to enable hclk - %d\n", ret
);
449 ret
= clk_enable(vop
->dclk
);
451 dev_err(vop
->dev
, "failed to enable dclk - %d\n", ret
);
452 goto err_disable_hclk
;
455 ret
= clk_enable(vop
->aclk
);
457 dev_err(vop
->dev
, "failed to enable aclk - %d\n", ret
);
458 goto err_disable_dclk
;
462 * Slave iommu shares power, irq and clock with vop. It was associated
463 * automatically with this master device via common driver code.
464 * Now that we have enabled the clock we attach it to the shared drm
467 ret
= rockchip_drm_dma_attach_device(vop
->drm_dev
, vop
->dev
);
469 dev_err(vop
->dev
, "failed to attach dma mapping, %d\n", ret
);
470 goto err_disable_aclk
;
473 memcpy(vop
->regs
, vop
->regsbak
, vop
->len
);
475 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
477 vop
->is_enabled
= true;
479 spin_lock(&vop
->reg_lock
);
481 VOP_CTRL_SET(vop
, standby
, 0);
483 spin_unlock(&vop
->reg_lock
);
485 enable_irq(vop
->irq
);
487 drm_crtc_vblank_on(crtc
);
492 clk_disable(vop
->aclk
);
494 clk_disable(vop
->dclk
);
496 clk_disable(vop
->hclk
);
499 static void vop_crtc_disable(struct drm_crtc
*crtc
)
501 struct vop
*vop
= to_vop(crtc
);
507 * We need to make sure that all windows are disabled before we
508 * disable that crtc. Otherwise we might try to scan from a destroyed
511 for (i
= 0; i
< vop
->data
->win_size
; i
++) {
512 struct vop_win
*vop_win
= &vop
->win
[i
];
513 const struct vop_win_data
*win
= vop_win
->data
;
515 spin_lock(&vop
->reg_lock
);
516 VOP_WIN_SET(vop
, win
, enable
, 0);
517 spin_unlock(&vop
->reg_lock
);
520 drm_crtc_vblank_off(crtc
);
523 * Vop standby will take effect at end of current frame,
524 * if dsp hold valid irq happen, it means standby complete.
526 * we must wait standby complete when we want to disable aclk,
527 * if not, memory bus maybe dead.
529 reinit_completion(&vop
->dsp_hold_completion
);
530 vop_dsp_hold_valid_irq_enable(vop
);
532 spin_lock(&vop
->reg_lock
);
534 VOP_CTRL_SET(vop
, standby
, 1);
536 spin_unlock(&vop
->reg_lock
);
538 wait_for_completion(&vop
->dsp_hold_completion
);
540 vop_dsp_hold_valid_irq_disable(vop
);
542 disable_irq(vop
->irq
);
544 vop
->is_enabled
= false;
547 * vop standby complete, so iommu detach is safe.
549 rockchip_drm_dma_detach_device(vop
->drm_dev
, vop
->dev
);
551 clk_disable(vop
->dclk
);
552 clk_disable(vop
->aclk
);
553 clk_disable(vop
->hclk
);
554 pm_runtime_put(vop
->dev
);
556 if (crtc
->state
->event
&& !crtc
->state
->active
) {
557 spin_lock_irq(&crtc
->dev
->event_lock
);
558 drm_crtc_send_vblank_event(crtc
, crtc
->state
->event
);
559 spin_unlock_irq(&crtc
->dev
->event_lock
);
561 crtc
->state
->event
= NULL
;
565 static void vop_plane_destroy(struct drm_plane
*plane
)
567 drm_plane_cleanup(plane
);
570 static int vop_plane_prepare_fb(struct drm_plane
*plane
,
571 const struct drm_plane_state
*new_state
)
573 if (plane
->state
->fb
)
574 drm_framebuffer_reference(plane
->state
->fb
);
579 static void vop_plane_cleanup_fb(struct drm_plane
*plane
,
580 const struct drm_plane_state
*old_state
)
583 drm_framebuffer_unreference(old_state
->fb
);
586 static int vop_plane_atomic_check(struct drm_plane
*plane
,
587 struct drm_plane_state
*state
)
589 struct drm_crtc
*crtc
= state
->crtc
;
590 struct drm_crtc_state
*crtc_state
;
591 struct drm_framebuffer
*fb
= state
->fb
;
592 struct vop_win
*vop_win
= to_vop_win(plane
);
593 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(state
);
594 const struct vop_win_data
*win
= vop_win
->data
;
596 struct drm_rect clip
;
597 int min_scale
= win
->phy
->scl
? FRAC_16_16(1, 8) :
598 DRM_PLANE_HELPER_NO_SCALING
;
599 int max_scale
= win
->phy
->scl
? FRAC_16_16(8, 1) :
600 DRM_PLANE_HELPER_NO_SCALING
;
605 crtc_state
= drm_atomic_get_existing_crtc_state(state
->state
, crtc
);
606 if (WARN_ON(!crtc_state
))
611 clip
.x2
= crtc_state
->adjusted_mode
.hdisplay
;
612 clip
.y2
= crtc_state
->adjusted_mode
.vdisplay
;
614 ret
= drm_plane_helper_check_state(state
, &clip
,
615 min_scale
, max_scale
,
623 vop_plane_state
->format
= vop_convert_format(fb
->pixel_format
);
624 if (vop_plane_state
->format
< 0)
625 return vop_plane_state
->format
;
628 * Src.x1 can be odd when do clip, but yuv plane start point
629 * need align with 2 pixel.
631 if (is_yuv_support(fb
->pixel_format
) && ((state
->src
.x1
>> 16) % 2))
634 vop_plane_state
->enable
= true;
639 vop_plane_state
->enable
= false;
643 static void vop_plane_atomic_disable(struct drm_plane
*plane
,
644 struct drm_plane_state
*old_state
)
646 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(old_state
);
647 struct vop_win
*vop_win
= to_vop_win(plane
);
648 const struct vop_win_data
*win
= vop_win
->data
;
649 struct vop
*vop
= to_vop(old_state
->crtc
);
651 if (!old_state
->crtc
)
654 spin_lock_irq(&plane
->dev
->event_lock
);
655 vop_win
->enable
= false;
656 vop_win
->yrgb_mst
= 0;
657 spin_unlock_irq(&plane
->dev
->event_lock
);
659 spin_lock(&vop
->reg_lock
);
661 VOP_WIN_SET(vop
, win
, enable
, 0);
663 spin_unlock(&vop
->reg_lock
);
665 vop_plane_state
->enable
= false;
668 static void vop_plane_atomic_update(struct drm_plane
*plane
,
669 struct drm_plane_state
*old_state
)
671 struct drm_plane_state
*state
= plane
->state
;
672 struct drm_crtc
*crtc
= state
->crtc
;
673 struct vop_win
*vop_win
= to_vop_win(plane
);
674 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(state
);
675 const struct vop_win_data
*win
= vop_win
->data
;
676 struct vop
*vop
= to_vop(state
->crtc
);
677 struct drm_framebuffer
*fb
= state
->fb
;
678 unsigned int actual_w
, actual_h
;
679 unsigned int dsp_stx
, dsp_sty
;
680 uint32_t act_info
, dsp_info
, dsp_st
;
681 struct drm_rect
*src
= &state
->src
;
682 struct drm_rect
*dest
= &state
->dst
;
683 struct drm_gem_object
*obj
, *uv_obj
;
684 struct rockchip_gem_object
*rk_obj
, *rk_uv_obj
;
685 unsigned long offset
;
691 * can't update plane when vop is disabled.
696 if (WARN_ON(!vop
->is_enabled
))
699 if (!vop_plane_state
->enable
) {
700 vop_plane_atomic_disable(plane
, old_state
);
704 obj
= rockchip_fb_get_gem_obj(fb
, 0);
705 rk_obj
= to_rockchip_obj(obj
);
707 actual_w
= drm_rect_width(src
) >> 16;
708 actual_h
= drm_rect_height(src
) >> 16;
709 act_info
= (actual_h
- 1) << 16 | ((actual_w
- 1) & 0xffff);
711 dsp_info
= (drm_rect_height(dest
) - 1) << 16;
712 dsp_info
|= (drm_rect_width(dest
) - 1) & 0xffff;
714 dsp_stx
= dest
->x1
+ crtc
->mode
.htotal
- crtc
->mode
.hsync_start
;
715 dsp_sty
= dest
->y1
+ crtc
->mode
.vtotal
- crtc
->mode
.vsync_start
;
716 dsp_st
= dsp_sty
<< 16 | (dsp_stx
& 0xffff);
718 offset
= (src
->x1
>> 16) * drm_format_plane_cpp(fb
->pixel_format
, 0);
719 offset
+= (src
->y1
>> 16) * fb
->pitches
[0];
720 vop_plane_state
->yrgb_mst
= rk_obj
->dma_addr
+ offset
+ fb
->offsets
[0];
722 spin_lock_irq(&plane
->dev
->event_lock
);
723 vop_win
->enable
= true;
724 vop_win
->yrgb_mst
= vop_plane_state
->yrgb_mst
;
725 spin_unlock_irq(&plane
->dev
->event_lock
);
727 spin_lock(&vop
->reg_lock
);
729 VOP_WIN_SET(vop
, win
, format
, vop_plane_state
->format
);
730 VOP_WIN_SET(vop
, win
, yrgb_vir
, fb
->pitches
[0] >> 2);
731 VOP_WIN_SET(vop
, win
, yrgb_mst
, vop_plane_state
->yrgb_mst
);
732 if (is_yuv_support(fb
->pixel_format
)) {
733 int hsub
= drm_format_horz_chroma_subsampling(fb
->pixel_format
);
734 int vsub
= drm_format_vert_chroma_subsampling(fb
->pixel_format
);
735 int bpp
= drm_format_plane_cpp(fb
->pixel_format
, 1);
737 uv_obj
= rockchip_fb_get_gem_obj(fb
, 1);
738 rk_uv_obj
= to_rockchip_obj(uv_obj
);
740 offset
= (src
->x1
>> 16) * bpp
/ hsub
;
741 offset
+= (src
->y1
>> 16) * fb
->pitches
[1] / vsub
;
743 dma_addr
= rk_uv_obj
->dma_addr
+ offset
+ fb
->offsets
[1];
744 VOP_WIN_SET(vop
, win
, uv_vir
, fb
->pitches
[1] >> 2);
745 VOP_WIN_SET(vop
, win
, uv_mst
, dma_addr
);
749 scl_vop_cal_scl_fac(vop
, win
, actual_w
, actual_h
,
750 drm_rect_width(dest
), drm_rect_height(dest
),
753 VOP_WIN_SET(vop
, win
, act_info
, act_info
);
754 VOP_WIN_SET(vop
, win
, dsp_info
, dsp_info
);
755 VOP_WIN_SET(vop
, win
, dsp_st
, dsp_st
);
757 rb_swap
= has_rb_swapped(fb
->pixel_format
);
758 VOP_WIN_SET(vop
, win
, rb_swap
, rb_swap
);
760 if (is_alpha_support(fb
->pixel_format
)) {
761 VOP_WIN_SET(vop
, win
, dst_alpha_ctl
,
762 DST_FACTOR_M0(ALPHA_SRC_INVERSE
));
763 val
= SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL
) |
764 SRC_ALPHA_M0(ALPHA_STRAIGHT
) |
765 SRC_BLEND_M0(ALPHA_PER_PIX
) |
766 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION
) |
767 SRC_FACTOR_M0(ALPHA_ONE
);
768 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, val
);
770 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, SRC_ALPHA_EN(0));
773 VOP_WIN_SET(vop
, win
, enable
, 1);
774 spin_unlock(&vop
->reg_lock
);
777 static const struct drm_plane_helper_funcs plane_helper_funcs
= {
778 .prepare_fb
= vop_plane_prepare_fb
,
779 .cleanup_fb
= vop_plane_cleanup_fb
,
780 .atomic_check
= vop_plane_atomic_check
,
781 .atomic_update
= vop_plane_atomic_update
,
782 .atomic_disable
= vop_plane_atomic_disable
,
785 static void vop_atomic_plane_reset(struct drm_plane
*plane
)
787 struct vop_plane_state
*vop_plane_state
=
788 to_vop_plane_state(plane
->state
);
790 if (plane
->state
&& plane
->state
->fb
)
791 drm_framebuffer_unreference(plane
->state
->fb
);
793 kfree(vop_plane_state
);
794 vop_plane_state
= kzalloc(sizeof(*vop_plane_state
), GFP_KERNEL
);
795 if (!vop_plane_state
)
798 plane
->state
= &vop_plane_state
->base
;
799 plane
->state
->plane
= plane
;
802 static struct drm_plane_state
*
803 vop_atomic_plane_duplicate_state(struct drm_plane
*plane
)
805 struct vop_plane_state
*old_vop_plane_state
;
806 struct vop_plane_state
*vop_plane_state
;
808 if (WARN_ON(!plane
->state
))
811 old_vop_plane_state
= to_vop_plane_state(plane
->state
);
812 vop_plane_state
= kmemdup(old_vop_plane_state
,
813 sizeof(*vop_plane_state
), GFP_KERNEL
);
814 if (!vop_plane_state
)
817 __drm_atomic_helper_plane_duplicate_state(plane
,
818 &vop_plane_state
->base
);
820 return &vop_plane_state
->base
;
823 static void vop_atomic_plane_destroy_state(struct drm_plane
*plane
,
824 struct drm_plane_state
*state
)
826 struct vop_plane_state
*vop_state
= to_vop_plane_state(state
);
828 __drm_atomic_helper_plane_destroy_state(state
);
833 static const struct drm_plane_funcs vop_plane_funcs
= {
834 .update_plane
= drm_atomic_helper_update_plane
,
835 .disable_plane
= drm_atomic_helper_disable_plane
,
836 .destroy
= vop_plane_destroy
,
837 .reset
= vop_atomic_plane_reset
,
838 .atomic_duplicate_state
= vop_atomic_plane_duplicate_state
,
839 .atomic_destroy_state
= vop_atomic_plane_destroy_state
,
842 static int vop_crtc_enable_vblank(struct drm_crtc
*crtc
)
844 struct vop
*vop
= to_vop(crtc
);
847 if (WARN_ON(!vop
->is_enabled
))
850 spin_lock_irqsave(&vop
->irq_lock
, flags
);
852 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 1);
854 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
859 static void vop_crtc_disable_vblank(struct drm_crtc
*crtc
)
861 struct vop
*vop
= to_vop(crtc
);
864 if (WARN_ON(!vop
->is_enabled
))
867 spin_lock_irqsave(&vop
->irq_lock
, flags
);
869 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 0);
871 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
874 static void vop_crtc_wait_for_update(struct drm_crtc
*crtc
)
876 struct vop
*vop
= to_vop(crtc
);
878 reinit_completion(&vop
->wait_update_complete
);
879 WARN_ON(!wait_for_completion_timeout(&vop
->wait_update_complete
, 100));
882 static const struct rockchip_crtc_funcs private_crtc_funcs
= {
883 .enable_vblank
= vop_crtc_enable_vblank
,
884 .disable_vblank
= vop_crtc_disable_vblank
,
885 .wait_for_update
= vop_crtc_wait_for_update
,
888 static bool vop_crtc_mode_fixup(struct drm_crtc
*crtc
,
889 const struct drm_display_mode
*mode
,
890 struct drm_display_mode
*adjusted_mode
)
892 struct vop
*vop
= to_vop(crtc
);
894 adjusted_mode
->clock
=
895 clk_round_rate(vop
->dclk
, mode
->clock
* 1000) / 1000;
900 static void vop_crtc_enable(struct drm_crtc
*crtc
)
902 struct vop
*vop
= to_vop(crtc
);
903 struct rockchip_crtc_state
*s
= to_rockchip_crtc_state(crtc
->state
);
904 struct drm_display_mode
*adjusted_mode
= &crtc
->state
->adjusted_mode
;
905 u16 hsync_len
= adjusted_mode
->hsync_end
- adjusted_mode
->hsync_start
;
906 u16 hdisplay
= adjusted_mode
->hdisplay
;
907 u16 htotal
= adjusted_mode
->htotal
;
908 u16 hact_st
= adjusted_mode
->htotal
- adjusted_mode
->hsync_start
;
909 u16 hact_end
= hact_st
+ hdisplay
;
910 u16 vdisplay
= adjusted_mode
->vdisplay
;
911 u16 vtotal
= adjusted_mode
->vtotal
;
912 u16 vsync_len
= adjusted_mode
->vsync_end
- adjusted_mode
->vsync_start
;
913 u16 vact_st
= adjusted_mode
->vtotal
- adjusted_mode
->vsync_start
;
914 u16 vact_end
= vact_st
+ vdisplay
;
915 uint32_t pin_pol
, val
;
921 * If dclk rate is zero, mean that scanout is stop,
922 * we don't need wait any more.
924 if (clk_get_rate(vop
->dclk
)) {
926 * Rk3288 vop timing register is immediately, when configure
927 * display timing on display time, may cause tearing.
929 * Vop standby will take effect at end of current frame,
930 * if dsp hold valid irq happen, it means standby complete.
933 * standby and wait complete --> |----
937 * configure display timing --> |
942 reinit_completion(&vop
->dsp_hold_completion
);
943 vop_dsp_hold_valid_irq_enable(vop
);
945 spin_lock(&vop
->reg_lock
);
947 VOP_CTRL_SET(vop
, standby
, 1);
949 spin_unlock(&vop
->reg_lock
);
951 wait_for_completion(&vop
->dsp_hold_completion
);
953 vop_dsp_hold_valid_irq_disable(vop
);
957 pin_pol
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_NHSYNC
) ? 0 : 1;
958 pin_pol
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_NVSYNC
) ? 0 : (1 << 1);
959 VOP_CTRL_SET(vop
, pin_pol
, pin_pol
);
961 switch (s
->output_type
) {
962 case DRM_MODE_CONNECTOR_LVDS
:
963 VOP_CTRL_SET(vop
, rgb_en
, 1);
964 VOP_CTRL_SET(vop
, rgb_pin_pol
, pin_pol
);
966 case DRM_MODE_CONNECTOR_eDP
:
967 VOP_CTRL_SET(vop
, edp_pin_pol
, pin_pol
);
968 VOP_CTRL_SET(vop
, edp_en
, 1);
970 case DRM_MODE_CONNECTOR_HDMIA
:
971 VOP_CTRL_SET(vop
, hdmi_pin_pol
, pin_pol
);
972 VOP_CTRL_SET(vop
, hdmi_en
, 1);
974 case DRM_MODE_CONNECTOR_DSI
:
975 VOP_CTRL_SET(vop
, mipi_pin_pol
, pin_pol
);
976 VOP_CTRL_SET(vop
, mipi_en
, 1);
979 DRM_ERROR("unsupport connector_type[%d]\n", s
->output_type
);
981 VOP_CTRL_SET(vop
, out_mode
, s
->output_mode
);
983 VOP_CTRL_SET(vop
, htotal_pw
, (htotal
<< 16) | hsync_len
);
986 VOP_CTRL_SET(vop
, hact_st_end
, val
);
987 VOP_CTRL_SET(vop
, hpost_st_end
, val
);
989 VOP_CTRL_SET(vop
, vtotal_pw
, (vtotal
<< 16) | vsync_len
);
992 VOP_CTRL_SET(vop
, vact_st_end
, val
);
993 VOP_CTRL_SET(vop
, vpost_st_end
, val
);
995 clk_set_rate(vop
->dclk
, adjusted_mode
->clock
* 1000);
997 VOP_CTRL_SET(vop
, standby
, 0);
1000 static void vop_crtc_atomic_flush(struct drm_crtc
*crtc
,
1001 struct drm_crtc_state
*old_crtc_state
)
1003 struct vop
*vop
= to_vop(crtc
);
1005 if (WARN_ON(!vop
->is_enabled
))
1008 spin_lock(&vop
->reg_lock
);
1012 spin_unlock(&vop
->reg_lock
);
1015 static void vop_crtc_atomic_begin(struct drm_crtc
*crtc
,
1016 struct drm_crtc_state
*old_crtc_state
)
1018 struct vop
*vop
= to_vop(crtc
);
1020 spin_lock_irq(&crtc
->dev
->event_lock
);
1021 if (crtc
->state
->event
) {
1022 WARN_ON(drm_crtc_vblank_get(crtc
) != 0);
1023 WARN_ON(vop
->event
);
1025 vop
->event
= crtc
->state
->event
;
1026 crtc
->state
->event
= NULL
;
1028 spin_unlock_irq(&crtc
->dev
->event_lock
);
1031 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs
= {
1032 .enable
= vop_crtc_enable
,
1033 .disable
= vop_crtc_disable
,
1034 .mode_fixup
= vop_crtc_mode_fixup
,
1035 .atomic_flush
= vop_crtc_atomic_flush
,
1036 .atomic_begin
= vop_crtc_atomic_begin
,
1039 static void vop_crtc_destroy(struct drm_crtc
*crtc
)
1041 drm_crtc_cleanup(crtc
);
1044 static void vop_crtc_reset(struct drm_crtc
*crtc
)
1047 __drm_atomic_helper_crtc_destroy_state(crtc
->state
);
1050 crtc
->state
= kzalloc(sizeof(struct rockchip_crtc_state
), GFP_KERNEL
);
1052 crtc
->state
->crtc
= crtc
;
1055 static struct drm_crtc_state
*vop_crtc_duplicate_state(struct drm_crtc
*crtc
)
1057 struct rockchip_crtc_state
*rockchip_state
;
1059 rockchip_state
= kzalloc(sizeof(*rockchip_state
), GFP_KERNEL
);
1060 if (!rockchip_state
)
1063 __drm_atomic_helper_crtc_duplicate_state(crtc
, &rockchip_state
->base
);
1064 return &rockchip_state
->base
;
1067 static void vop_crtc_destroy_state(struct drm_crtc
*crtc
,
1068 struct drm_crtc_state
*state
)
1070 struct rockchip_crtc_state
*s
= to_rockchip_crtc_state(state
);
1072 __drm_atomic_helper_crtc_destroy_state(&s
->base
);
1076 static const struct drm_crtc_funcs vop_crtc_funcs
= {
1077 .set_config
= drm_atomic_helper_set_config
,
1078 .page_flip
= drm_atomic_helper_page_flip
,
1079 .destroy
= vop_crtc_destroy
,
1080 .reset
= vop_crtc_reset
,
1081 .atomic_duplicate_state
= vop_crtc_duplicate_state
,
1082 .atomic_destroy_state
= vop_crtc_destroy_state
,
1085 static bool vop_win_pending_is_complete(struct vop_win
*vop_win
)
1087 dma_addr_t yrgb_mst
;
1089 if (!vop_win
->enable
)
1090 return VOP_WIN_GET(vop_win
->vop
, vop_win
->data
, enable
) == 0;
1092 yrgb_mst
= VOP_WIN_GET_YRGBADDR(vop_win
->vop
, vop_win
->data
);
1094 return yrgb_mst
== vop_win
->yrgb_mst
;
1097 static void vop_handle_vblank(struct vop
*vop
)
1099 struct drm_device
*drm
= vop
->drm_dev
;
1100 struct drm_crtc
*crtc
= &vop
->crtc
;
1101 unsigned long flags
;
1104 for (i
= 0; i
< vop
->data
->win_size
; i
++) {
1105 if (!vop_win_pending_is_complete(&vop
->win
[i
]))
1109 spin_lock_irqsave(&drm
->event_lock
, flags
);
1112 drm_crtc_send_vblank_event(crtc
, vop
->event
);
1113 drm_crtc_vblank_put(crtc
);
1117 spin_unlock_irqrestore(&drm
->event_lock
, flags
);
1119 if (!completion_done(&vop
->wait_update_complete
))
1120 complete(&vop
->wait_update_complete
);
1123 static irqreturn_t
vop_isr(int irq
, void *data
)
1125 struct vop
*vop
= data
;
1126 struct drm_crtc
*crtc
= &vop
->crtc
;
1127 uint32_t active_irqs
;
1128 unsigned long flags
;
1132 * interrupt register has interrupt status, enable and clear bits, we
1133 * must hold irq_lock to avoid a race with enable/disable_vblank().
1135 spin_lock_irqsave(&vop
->irq_lock
, flags
);
1137 active_irqs
= VOP_INTR_GET_TYPE(vop
, status
, INTR_MASK
);
1138 /* Clear all active interrupt sources */
1140 VOP_INTR_SET_TYPE(vop
, clear
, active_irqs
, 1);
1142 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
1144 /* This is expected for vop iommu irqs, since the irq is shared */
1148 if (active_irqs
& DSP_HOLD_VALID_INTR
) {
1149 complete(&vop
->dsp_hold_completion
);
1150 active_irqs
&= ~DSP_HOLD_VALID_INTR
;
1154 if (active_irqs
& FS_INTR
) {
1155 drm_crtc_handle_vblank(crtc
);
1156 vop_handle_vblank(vop
);
1157 active_irqs
&= ~FS_INTR
;
1161 /* Unhandled irqs are spurious. */
1163 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs
);
1168 static int vop_create_crtc(struct vop
*vop
)
1170 const struct vop_data
*vop_data
= vop
->data
;
1171 struct device
*dev
= vop
->dev
;
1172 struct drm_device
*drm_dev
= vop
->drm_dev
;
1173 struct drm_plane
*primary
= NULL
, *cursor
= NULL
, *plane
, *tmp
;
1174 struct drm_crtc
*crtc
= &vop
->crtc
;
1175 struct device_node
*port
;
1180 * Create drm_plane for primary and cursor planes first, since we need
1181 * to pass them to drm_crtc_init_with_planes, which sets the
1182 * "possible_crtcs" to the newly initialized crtc.
1184 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1185 struct vop_win
*vop_win
= &vop
->win
[i
];
1186 const struct vop_win_data
*win_data
= vop_win
->data
;
1188 if (win_data
->type
!= DRM_PLANE_TYPE_PRIMARY
&&
1189 win_data
->type
!= DRM_PLANE_TYPE_CURSOR
)
1192 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1193 0, &vop_plane_funcs
,
1194 win_data
->phy
->data_formats
,
1195 win_data
->phy
->nformats
,
1196 win_data
->type
, NULL
);
1198 DRM_ERROR("failed to initialize plane\n");
1199 goto err_cleanup_planes
;
1202 plane
= &vop_win
->base
;
1203 drm_plane_helper_add(plane
, &plane_helper_funcs
);
1204 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
1206 else if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
1210 ret
= drm_crtc_init_with_planes(drm_dev
, crtc
, primary
, cursor
,
1211 &vop_crtc_funcs
, NULL
);
1213 goto err_cleanup_planes
;
1215 drm_crtc_helper_add(crtc
, &vop_crtc_helper_funcs
);
1218 * Create drm_planes for overlay windows with possible_crtcs restricted
1219 * to the newly created crtc.
1221 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1222 struct vop_win
*vop_win
= &vop
->win
[i
];
1223 const struct vop_win_data
*win_data
= vop_win
->data
;
1224 unsigned long possible_crtcs
= 1 << drm_crtc_index(crtc
);
1226 if (win_data
->type
!= DRM_PLANE_TYPE_OVERLAY
)
1229 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1232 win_data
->phy
->data_formats
,
1233 win_data
->phy
->nformats
,
1234 win_data
->type
, NULL
);
1236 DRM_ERROR("failed to initialize overlay plane\n");
1237 goto err_cleanup_crtc
;
1239 drm_plane_helper_add(&vop_win
->base
, &plane_helper_funcs
);
1242 port
= of_get_child_by_name(dev
->of_node
, "port");
1244 DRM_ERROR("no port node found in %s\n",
1245 dev
->of_node
->full_name
);
1247 goto err_cleanup_crtc
;
1250 init_completion(&vop
->dsp_hold_completion
);
1251 init_completion(&vop
->wait_update_complete
);
1253 rockchip_register_crtc_funcs(crtc
, &private_crtc_funcs
);
1258 drm_crtc_cleanup(crtc
);
1260 list_for_each_entry_safe(plane
, tmp
, &drm_dev
->mode_config
.plane_list
,
1262 drm_plane_cleanup(plane
);
1266 static void vop_destroy_crtc(struct vop
*vop
)
1268 struct drm_crtc
*crtc
= &vop
->crtc
;
1269 struct drm_device
*drm_dev
= vop
->drm_dev
;
1270 struct drm_plane
*plane
, *tmp
;
1272 rockchip_unregister_crtc_funcs(crtc
);
1273 of_node_put(crtc
->port
);
1276 * We need to cleanup the planes now. Why?
1278 * The planes are "&vop->win[i].base". That means the memory is
1279 * all part of the big "struct vop" chunk of memory. That memory
1280 * was devm allocated and associated with this component. We need to
1281 * free it ourselves before vop_unbind() finishes.
1283 list_for_each_entry_safe(plane
, tmp
, &drm_dev
->mode_config
.plane_list
,
1285 vop_plane_destroy(plane
);
1288 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1289 * references the CRTC.
1291 drm_crtc_cleanup(crtc
);
1294 static int vop_initial(struct vop
*vop
)
1296 const struct vop_data
*vop_data
= vop
->data
;
1297 const struct vop_reg_data
*init_table
= vop_data
->init_table
;
1298 struct reset_control
*ahb_rst
;
1301 vop
->hclk
= devm_clk_get(vop
->dev
, "hclk_vop");
1302 if (IS_ERR(vop
->hclk
)) {
1303 dev_err(vop
->dev
, "failed to get hclk source\n");
1304 return PTR_ERR(vop
->hclk
);
1306 vop
->aclk
= devm_clk_get(vop
->dev
, "aclk_vop");
1307 if (IS_ERR(vop
->aclk
)) {
1308 dev_err(vop
->dev
, "failed to get aclk source\n");
1309 return PTR_ERR(vop
->aclk
);
1311 vop
->dclk
= devm_clk_get(vop
->dev
, "dclk_vop");
1312 if (IS_ERR(vop
->dclk
)) {
1313 dev_err(vop
->dev
, "failed to get dclk source\n");
1314 return PTR_ERR(vop
->dclk
);
1317 ret
= clk_prepare(vop
->dclk
);
1319 dev_err(vop
->dev
, "failed to prepare dclk\n");
1323 /* Enable both the hclk and aclk to setup the vop */
1324 ret
= clk_prepare_enable(vop
->hclk
);
1326 dev_err(vop
->dev
, "failed to prepare/enable hclk\n");
1327 goto err_unprepare_dclk
;
1330 ret
= clk_prepare_enable(vop
->aclk
);
1332 dev_err(vop
->dev
, "failed to prepare/enable aclk\n");
1333 goto err_disable_hclk
;
1337 * do hclk_reset, reset all vop registers.
1339 ahb_rst
= devm_reset_control_get(vop
->dev
, "ahb");
1340 if (IS_ERR(ahb_rst
)) {
1341 dev_err(vop
->dev
, "failed to get ahb reset\n");
1342 ret
= PTR_ERR(ahb_rst
);
1343 goto err_disable_aclk
;
1345 reset_control_assert(ahb_rst
);
1346 usleep_range(10, 20);
1347 reset_control_deassert(ahb_rst
);
1349 memcpy(vop
->regsbak
, vop
->regs
, vop
->len
);
1351 for (i
= 0; i
< vop_data
->table_size
; i
++)
1352 vop_writel(vop
, init_table
[i
].offset
, init_table
[i
].value
);
1354 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1355 const struct vop_win_data
*win
= &vop_data
->win
[i
];
1357 VOP_WIN_SET(vop
, win
, enable
, 0);
1363 * do dclk_reset, let all config take affect.
1365 vop
->dclk_rst
= devm_reset_control_get(vop
->dev
, "dclk");
1366 if (IS_ERR(vop
->dclk_rst
)) {
1367 dev_err(vop
->dev
, "failed to get dclk reset\n");
1368 ret
= PTR_ERR(vop
->dclk_rst
);
1369 goto err_disable_aclk
;
1371 reset_control_assert(vop
->dclk_rst
);
1372 usleep_range(10, 20);
1373 reset_control_deassert(vop
->dclk_rst
);
1375 clk_disable(vop
->hclk
);
1376 clk_disable(vop
->aclk
);
1378 vop
->is_enabled
= false;
1383 clk_disable_unprepare(vop
->aclk
);
1385 clk_disable_unprepare(vop
->hclk
);
1387 clk_unprepare(vop
->dclk
);
1392 * Initialize the vop->win array elements.
1394 static void vop_win_init(struct vop
*vop
)
1396 const struct vop_data
*vop_data
= vop
->data
;
1399 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1400 struct vop_win
*vop_win
= &vop
->win
[i
];
1401 const struct vop_win_data
*win_data
= &vop_data
->win
[i
];
1403 vop_win
->data
= win_data
;
1408 static int vop_bind(struct device
*dev
, struct device
*master
, void *data
)
1410 struct platform_device
*pdev
= to_platform_device(dev
);
1411 const struct vop_data
*vop_data
;
1412 struct drm_device
*drm_dev
= data
;
1414 struct resource
*res
;
1418 vop_data
= of_device_get_match_data(dev
);
1422 /* Allocate vop struct and its vop_win array */
1423 alloc_size
= sizeof(*vop
) + sizeof(*vop
->win
) * vop_data
->win_size
;
1424 vop
= devm_kzalloc(dev
, alloc_size
, GFP_KERNEL
);
1429 vop
->data
= vop_data
;
1430 vop
->drm_dev
= drm_dev
;
1431 dev_set_drvdata(dev
, vop
);
1435 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1436 vop
->len
= resource_size(res
);
1437 vop
->regs
= devm_ioremap_resource(dev
, res
);
1438 if (IS_ERR(vop
->regs
))
1439 return PTR_ERR(vop
->regs
);
1441 vop
->regsbak
= devm_kzalloc(dev
, vop
->len
, GFP_KERNEL
);
1445 ret
= vop_initial(vop
);
1447 dev_err(&pdev
->dev
, "cannot initial vop dev - err %d\n", ret
);
1451 irq
= platform_get_irq(pdev
, 0);
1453 dev_err(dev
, "cannot find irq for vop\n");
1456 vop
->irq
= (unsigned int)irq
;
1458 spin_lock_init(&vop
->reg_lock
);
1459 spin_lock_init(&vop
->irq_lock
);
1461 mutex_init(&vop
->vsync_mutex
);
1463 ret
= devm_request_irq(dev
, vop
->irq
, vop_isr
,
1464 IRQF_SHARED
, dev_name(dev
), vop
);
1468 /* IRQ is initially disabled; it gets enabled in power_on */
1469 disable_irq(vop
->irq
);
1471 ret
= vop_create_crtc(vop
);
1475 pm_runtime_enable(&pdev
->dev
);
1479 static void vop_unbind(struct device
*dev
, struct device
*master
, void *data
)
1481 struct vop
*vop
= dev_get_drvdata(dev
);
1483 pm_runtime_disable(dev
);
1484 vop_destroy_crtc(vop
);
1487 const struct component_ops vop_component_ops
= {
1489 .unbind
= vop_unbind
,
1491 EXPORT_SYMBOL_GPL(vop_component_ops
);