2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
30 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
38 /* layer mixer used for this CRTC (+ its lock): */
39 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
41 spinlock_t lm_lock
; /* protect REG_MDP5_LM_* registers */
43 /* CTL used for this CRTC: */
46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event
*event
;
49 /* Bits have been flushed at the last commit,
50 * used to decide if a vsync has happened since last commit.
54 #define PENDING_CURSOR 0x1
55 #define PENDING_FLIP 0x2
58 /* for unref'ing cursor bo's after scanout completes: */
59 struct drm_flip_work unref_cursor_work
;
61 struct mdp_irq vblank
;
63 struct mdp_irq pp_done
;
65 struct completion pp_completion
;
70 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
73 /* current cursor being scanned out: */
74 struct drm_gem_object
*scanout_bo
;
75 uint32_t width
, height
;
79 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
81 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
83 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
84 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
87 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
89 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
91 atomic_or(pending
, &mdp5_crtc
->pending
);
92 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
95 static void request_pp_done_pending(struct drm_crtc
*crtc
)
97 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
98 reinit_completion(&mdp5_crtc
->pp_completion
);
101 static u32
crtc_flush(struct drm_crtc
*crtc
, u32 flush_mask
)
103 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
105 DBG("%s: flush=%08x", mdp5_crtc
->name
, flush_mask
);
106 return mdp5_ctl_commit(mdp5_crtc
->ctl
, flush_mask
);
110 * flush updates, to make sure hw is updated to new scanout fb,
111 * so that we can safely queue unref to current fb (ie. next
112 * vblank we know hw is done w/ previous scanout_fb).
114 static u32
crtc_flush_all(struct drm_crtc
*crtc
)
116 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
117 struct drm_plane
*plane
;
118 uint32_t flush_mask
= 0;
120 /* this should not happen: */
121 if (WARN_ON(!mdp5_crtc
->ctl
))
124 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
125 flush_mask
|= mdp5_plane_get_flush(plane
);
128 flush_mask
|= mdp_ctl_flush_mask_lm(mdp5_crtc
->lm
);
130 return crtc_flush(crtc
, flush_mask
);
133 /* if file!=NULL, this is preclose potential cancel-flip path */
134 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
136 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
137 struct drm_device
*dev
= crtc
->dev
;
138 struct drm_pending_vblank_event
*event
;
139 struct drm_plane
*plane
;
142 spin_lock_irqsave(&dev
->event_lock
, flags
);
143 event
= mdp5_crtc
->event
;
145 /* if regular vblank case (!file) or if cancel-flip from
146 * preclose on file that requested flip, then send the
149 if (!file
|| (event
->base
.file_priv
== file
)) {
150 mdp5_crtc
->event
= NULL
;
151 DBG("%s: send event: %p", mdp5_crtc
->name
, event
);
152 drm_crtc_send_vblank_event(crtc
, event
);
155 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
157 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
158 mdp5_plane_complete_flip(plane
);
161 if (mdp5_crtc
->ctl
&& !crtc
->state
->enable
) {
162 /* set STAGE_UNUSED for all layers */
163 mdp5_ctl_blend(mdp5_crtc
->ctl
, NULL
, 0, 0);
164 mdp5_crtc
->ctl
= NULL
;
168 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
170 struct mdp5_crtc
*mdp5_crtc
=
171 container_of(work
, struct mdp5_crtc
, unref_cursor_work
);
172 struct mdp5_kms
*mdp5_kms
= get_kms(&mdp5_crtc
->base
);
174 msm_gem_put_iova(val
, mdp5_kms
->id
);
175 drm_gem_object_unreference_unlocked(val
);
178 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
180 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
182 drm_crtc_cleanup(crtc
);
183 drm_flip_work_cleanup(&mdp5_crtc
->unref_cursor_work
);
189 * blend_setup() - blend all the planes of a CRTC
191 * If no base layer is available, border will be enabled as the base layer.
192 * Otherwise all layers will be blended based on their stage calculated
193 * in mdp5_crtc_atomic_check.
195 static void blend_setup(struct drm_crtc
*crtc
)
197 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
198 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
199 struct drm_plane
*plane
;
200 const struct mdp5_cfg_hw
*hw_cfg
;
201 struct mdp5_plane_state
*pstate
, *pstates
[STAGE_MAX
+ 1] = {NULL
};
202 const struct mdp_format
*format
;
203 uint32_t lm
= mdp5_crtc
->lm
;
204 uint32_t blend_op
, fg_alpha
, bg_alpha
, ctl_blend_flags
= 0;
206 uint8_t stage
[STAGE_MAX
+ 1];
207 int i
, plane_cnt
= 0;
208 #define blender(stage) ((stage) - STAGE0)
210 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
212 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
214 /* ctl could be released already when we are shutting down: */
218 /* Collect all plane information */
219 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
220 pstate
= to_mdp5_plane_state(plane
->state
);
221 pstates
[pstate
->stage
] = pstate
;
222 stage
[pstate
->stage
] = mdp5_plane_pipe(plane
);
227 * If there is no base layer, enable border color.
228 * Although it's not possbile in current blend logic,
229 * put it here as a reminder.
231 if (!pstates
[STAGE_BASE
] && plane_cnt
) {
232 ctl_blend_flags
|= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT
;
233 DBG("Border Color is enabled");
236 /* The reset for blending */
237 for (i
= STAGE0
; i
<= STAGE_MAX
; i
++) {
241 format
= to_mdp_format(
242 msm_framebuffer_format(pstates
[i
]->base
.fb
));
243 plane
= pstates
[i
]->base
.plane
;
244 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
245 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST
);
246 fg_alpha
= pstates
[i
]->alpha
;
247 bg_alpha
= 0xFF - pstates
[i
]->alpha
;
248 DBG("Stage %d fg_alpha %x bg_alpha %x", i
, fg_alpha
, bg_alpha
);
250 if (format
->alpha_enable
&& pstates
[i
]->premultiplied
) {
251 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
252 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
253 if (fg_alpha
!= 0xff) {
256 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
257 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
259 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
261 } else if (format
->alpha_enable
) {
262 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL
) |
263 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
264 if (fg_alpha
!= 0xff) {
267 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA
|
268 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA
|
269 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
270 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
272 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
276 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(lm
,
277 blender(i
)), blend_op
);
278 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(lm
,
279 blender(i
)), fg_alpha
);
280 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(lm
,
281 blender(i
)), bg_alpha
);
284 mdp5_ctl_blend(mdp5_crtc
->ctl
, stage
, plane_cnt
, ctl_blend_flags
);
287 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
290 static void mdp5_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
292 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
293 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
295 struct drm_display_mode
*mode
;
297 if (WARN_ON(!crtc
->state
))
300 mode
= &crtc
->state
->adjusted_mode
;
302 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
303 mdp5_crtc
->name
, mode
->base
.id
, mode
->name
,
304 mode
->vrefresh
, mode
->clock
,
305 mode
->hdisplay
, mode
->hsync_start
,
306 mode
->hsync_end
, mode
->htotal
,
307 mode
->vdisplay
, mode
->vsync_start
,
308 mode
->vsync_end
, mode
->vtotal
,
309 mode
->type
, mode
->flags
);
311 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
312 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(mdp5_crtc
->lm
),
313 MDP5_LM_OUT_SIZE_WIDTH(mode
->hdisplay
) |
314 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
315 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
318 static void mdp5_crtc_disable(struct drm_crtc
*crtc
)
320 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
321 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
323 DBG("%s", mdp5_crtc
->name
);
325 if (WARN_ON(!mdp5_crtc
->enabled
))
328 if (mdp5_crtc
->cmd_mode
)
329 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
331 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
332 mdp5_disable(mdp5_kms
);
334 mdp5_crtc
->enabled
= false;
337 static void mdp5_crtc_enable(struct drm_crtc
*crtc
)
339 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
340 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
342 DBG("%s", mdp5_crtc
->name
);
344 if (WARN_ON(mdp5_crtc
->enabled
))
347 mdp5_enable(mdp5_kms
);
348 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
350 if (mdp5_crtc
->cmd_mode
)
351 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
353 mdp5_crtc
->enabled
= true;
357 struct drm_plane
*plane
;
358 struct mdp5_plane_state
*state
;
361 static int pstate_cmp(const void *a
, const void *b
)
363 struct plane_state
*pa
= (struct plane_state
*)a
;
364 struct plane_state
*pb
= (struct plane_state
*)b
;
365 return pa
->state
->zpos
- pb
->state
->zpos
;
368 static int mdp5_crtc_atomic_check(struct drm_crtc
*crtc
,
369 struct drm_crtc_state
*state
)
371 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
372 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
373 struct drm_plane
*plane
;
374 struct drm_device
*dev
= crtc
->dev
;
375 struct plane_state pstates
[STAGE_MAX
+ 1];
376 const struct mdp5_cfg_hw
*hw_cfg
;
377 const struct drm_plane_state
*pstate
;
380 DBG("%s: check", mdp5_crtc
->name
);
382 /* verify that there are not too many planes attached to crtc
383 * and that we don't have conflicting mixer stages:
385 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
386 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, state
) {
387 if (cnt
>= (hw_cfg
->lm
.nb_stages
)) {
388 dev_err(dev
->dev
, "too many planes!\n");
393 pstates
[cnt
].plane
= plane
;
394 pstates
[cnt
].state
= to_mdp5_plane_state(pstate
);
399 /* assign a stage based on sorted zpos property */
400 sort(pstates
, cnt
, sizeof(pstates
[0]), pstate_cmp
, NULL
);
402 for (i
= 0; i
< cnt
; i
++) {
403 pstates
[i
].state
->stage
= STAGE_BASE
+ i
;
404 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc
->name
,
405 pipe2name(mdp5_plane_pipe(pstates
[i
].plane
)),
406 pstates
[i
].state
->stage
);
412 static void mdp5_crtc_atomic_begin(struct drm_crtc
*crtc
,
413 struct drm_crtc_state
*old_crtc_state
)
415 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
416 DBG("%s: begin", mdp5_crtc
->name
);
419 static void mdp5_crtc_atomic_flush(struct drm_crtc
*crtc
,
420 struct drm_crtc_state
*old_crtc_state
)
422 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
423 struct drm_device
*dev
= crtc
->dev
;
426 DBG("%s: event: %p", mdp5_crtc
->name
, crtc
->state
->event
);
428 WARN_ON(mdp5_crtc
->event
);
430 spin_lock_irqsave(&dev
->event_lock
, flags
);
431 mdp5_crtc
->event
= crtc
->state
->event
;
432 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
435 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
436 * it means we are trying to flush a CRTC whose state is disabled:
437 * nothing else needs to be done.
439 if (unlikely(!mdp5_crtc
->ctl
))
444 /* PP_DONE irq is only used by command mode for now.
445 * It is better to request pending before FLUSH and START trigger
446 * to make sure no pp_done irq missed.
447 * This is safe because no pp_done will happen before SW trigger
450 if (mdp5_crtc
->cmd_mode
)
451 request_pp_done_pending(crtc
);
453 mdp5_crtc
->flushed_mask
= crtc_flush_all(crtc
);
455 request_pending(crtc
, PENDING_FLIP
);
458 static void get_roi(struct drm_crtc
*crtc
, uint32_t *roi_w
, uint32_t *roi_h
)
460 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
461 uint32_t xres
= crtc
->mode
.hdisplay
;
462 uint32_t yres
= crtc
->mode
.vdisplay
;
465 * Cursor Region Of Interest (ROI) is a plane read from cursor
466 * buffer to render. The ROI region is determined by the visibility of
467 * the cursor point. In the default Cursor image the cursor point will
468 * be at the top left of the cursor image, unless it is specified
469 * otherwise using hotspot feature.
471 * If the cursor point reaches the right (xres - x < cursor.width) or
472 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
473 * width and ROI height need to be evaluated to crop the cursor image
475 * (xres-x) will be new cursor width when x > (xres - cursor.width)
476 * (yres-y) will be new cursor height when y > (yres - cursor.height)
478 *roi_w
= min(mdp5_crtc
->cursor
.width
, xres
-
479 mdp5_crtc
->cursor
.x
);
480 *roi_h
= min(mdp5_crtc
->cursor
.height
, yres
-
481 mdp5_crtc
->cursor
.y
);
484 static int mdp5_crtc_cursor_set(struct drm_crtc
*crtc
,
485 struct drm_file
*file
, uint32_t handle
,
486 uint32_t width
, uint32_t height
)
488 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
489 struct drm_device
*dev
= crtc
->dev
;
490 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
491 struct drm_gem_object
*cursor_bo
, *old_bo
= NULL
;
492 uint32_t blendcfg
, cursor_addr
, stride
;
494 enum mdp5_cursor_alpha cur_alpha
= CURSOR_ALPHA_PER_PIXEL
;
495 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
496 uint32_t roi_w
, roi_h
;
497 bool cursor_enable
= true;
500 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
501 dev_err(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
505 if (NULL
== mdp5_crtc
->ctl
)
510 cursor_enable
= false;
514 cursor_bo
= drm_gem_object_lookup(file
, handle
);
518 ret
= msm_gem_get_iova(cursor_bo
, mdp5_kms
->id
, &cursor_addr
);
523 stride
= width
* drm_format_plane_cpp(DRM_FORMAT_ARGB8888
, 0);
525 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
526 old_bo
= mdp5_crtc
->cursor
.scanout_bo
;
528 mdp5_crtc
->cursor
.scanout_bo
= cursor_bo
;
529 mdp5_crtc
->cursor
.width
= width
;
530 mdp5_crtc
->cursor
.height
= height
;
532 get_roi(crtc
, &roi_w
, &roi_h
);
534 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_STRIDE(lm
), stride
);
535 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_FORMAT(lm
),
536 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888
));
537 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_IMG_SIZE(lm
),
538 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height
) |
539 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width
));
540 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
541 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
542 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
543 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BASE_ADDR(lm
), cursor_addr
);
545 blendcfg
= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN
;
546 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha
);
547 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm
), blendcfg
);
549 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
552 ret
= mdp5_ctl_set_cursor(mdp5_crtc
->ctl
, 0, cursor_enable
);
554 dev_err(dev
->dev
, "failed to %sable cursor: %d\n",
555 cursor_enable
? "en" : "dis", ret
);
559 crtc_flush(crtc
, flush_mask
);
563 drm_flip_work_queue(&mdp5_crtc
->unref_cursor_work
, old_bo
);
564 /* enable vblank to complete cursor work: */
565 request_pending(crtc
, PENDING_CURSOR
);
570 static int mdp5_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
572 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
573 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
574 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
579 /* In case the CRTC is disabled, just drop the cursor update */
580 if (unlikely(!crtc
->state
->enable
))
583 mdp5_crtc
->cursor
.x
= x
= max(x
, 0);
584 mdp5_crtc
->cursor
.y
= y
= max(y
, 0);
586 get_roi(crtc
, &roi_w
, &roi_h
);
588 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
589 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc
->lm
),
590 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
591 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
592 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc
->lm
),
593 MDP5_LM_CURSOR_START_XY_Y_START(y
) |
594 MDP5_LM_CURSOR_START_XY_X_START(x
));
595 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
597 crtc_flush(crtc
, flush_mask
);
602 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
603 .set_config
= drm_atomic_helper_set_config
,
604 .destroy
= mdp5_crtc_destroy
,
605 .page_flip
= drm_atomic_helper_page_flip
,
606 .set_property
= drm_atomic_helper_crtc_set_property
,
607 .reset
= drm_atomic_helper_crtc_reset
,
608 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
609 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
610 .cursor_set
= mdp5_crtc_cursor_set
,
611 .cursor_move
= mdp5_crtc_cursor_move
,
614 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
615 .mode_set_nofb
= mdp5_crtc_mode_set_nofb
,
616 .disable
= mdp5_crtc_disable
,
617 .enable
= mdp5_crtc_enable
,
618 .atomic_check
= mdp5_crtc_atomic_check
,
619 .atomic_begin
= mdp5_crtc_atomic_begin
,
620 .atomic_flush
= mdp5_crtc_atomic_flush
,
623 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
625 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
626 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
627 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
630 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
632 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
634 if (pending
& PENDING_FLIP
) {
635 complete_flip(crtc
, NULL
);
638 if (pending
& PENDING_CURSOR
)
639 drm_flip_work_commit(&mdp5_crtc
->unref_cursor_work
, priv
->wq
);
642 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
644 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
646 DBG("%s: error: %08x", mdp5_crtc
->name
, irqstatus
);
649 static void mdp5_crtc_pp_done_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
651 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
,
654 complete(&mdp5_crtc
->pp_completion
);
657 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc
*crtc
)
659 struct drm_device
*dev
= crtc
->dev
;
660 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
663 ret
= wait_for_completion_timeout(&mdp5_crtc
->pp_completion
,
664 msecs_to_jiffies(50));
666 dev_warn(dev
->dev
, "pp done time out, lm=%d\n", mdp5_crtc
->lm
);
669 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc
*crtc
)
671 struct drm_device
*dev
= crtc
->dev
;
672 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
675 /* Should not call this function if crtc is disabled. */
679 ret
= drm_crtc_vblank_get(crtc
);
683 ret
= wait_event_timeout(dev
->vblank
[drm_crtc_index(crtc
)].queue
,
684 ((mdp5_ctl_get_commit_status(mdp5_crtc
->ctl
) &
685 mdp5_crtc
->flushed_mask
) == 0),
686 msecs_to_jiffies(50));
688 dev_warn(dev
->dev
, "vblank time out, crtc=%d\n", mdp5_crtc
->id
);
690 mdp5_crtc
->flushed_mask
= 0;
692 drm_crtc_vblank_put(crtc
);
695 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
697 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
698 return mdp5_crtc
->vblank
.irqmask
;
701 void mdp5_crtc_set_pipeline(struct drm_crtc
*crtc
,
702 struct mdp5_interface
*intf
, struct mdp5_ctl
*ctl
)
704 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
705 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
706 int lm
= mdp5_crtc_get_lm(crtc
);
708 /* now that we know what irq's we want: */
709 mdp5_crtc
->err
.irqmask
= intf2err(intf
->num
);
710 mdp5_crtc
->vblank
.irqmask
= intf2vblank(lm
, intf
);
712 if ((intf
->type
== INTF_DSI
) &&
713 (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)) {
714 mdp5_crtc
->pp_done
.irqmask
= lm2ppdone(lm
);
715 mdp5_crtc
->pp_done
.irq
= mdp5_crtc_pp_done_irq
;
716 mdp5_crtc
->cmd_mode
= true;
718 mdp5_crtc
->pp_done
.irqmask
= 0;
719 mdp5_crtc
->pp_done
.irq
= NULL
;
720 mdp5_crtc
->cmd_mode
= false;
723 mdp_irq_update(&mdp5_kms
->base
);
725 mdp5_crtc
->ctl
= ctl
;
726 mdp5_ctl_set_pipeline(ctl
, intf
, lm
);
729 int mdp5_crtc_get_lm(struct drm_crtc
*crtc
)
731 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
732 return WARN_ON(!crtc
) ? -EINVAL
: mdp5_crtc
->lm
;
735 void mdp5_crtc_wait_for_commit_done(struct drm_crtc
*crtc
)
737 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
739 if (mdp5_crtc
->cmd_mode
)
740 mdp5_crtc_wait_for_pp_done(crtc
);
742 mdp5_crtc_wait_for_flush_done(crtc
);
745 /* initialize crtc */
746 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
747 struct drm_plane
*plane
, int id
)
749 struct drm_crtc
*crtc
= NULL
;
750 struct mdp5_crtc
*mdp5_crtc
;
752 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
754 return ERR_PTR(-ENOMEM
);
756 crtc
= &mdp5_crtc
->base
;
759 mdp5_crtc
->lm
= GET_LM_ID(id
);
761 spin_lock_init(&mdp5_crtc
->lm_lock
);
762 spin_lock_init(&mdp5_crtc
->cursor
.lock
);
763 init_completion(&mdp5_crtc
->pp_completion
);
765 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
766 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
768 snprintf(mdp5_crtc
->name
, sizeof(mdp5_crtc
->name
), "%s:%d",
769 pipe2name(mdp5_plane_pipe(plane
)), id
);
771 drm_crtc_init_with_planes(dev
, crtc
, plane
, NULL
, &mdp5_crtc_funcs
,
774 drm_flip_work_init(&mdp5_crtc
->unref_cursor_work
,
775 "unref cursor", unref_cursor_worker
);
777 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);